Loading drivers/gpu/drm/msm/sde/sde_crtc.c +116 −71 Original line number Diff line number Diff line Loading @@ -81,8 +81,8 @@ static int sde_crtc_reserve_hw_resources(struct drm_crtc *crtc, &sde_crtc->mixer[sde_crtc->num_ctls]; mixer->hw_ctl = sde_rm_get_ctl_path(sde_kms, i); if (IS_ERR_OR_NULL(mixer->hw_ctl)) { DBG("[%s], Invalid ctl_path", __func__); return -EACCES; DRM_ERROR("Invalid ctl_path\n"); return PTR_ERR(mixer->hw_ctl); } sde_crtc->num_ctls++; } Loading Loading @@ -146,26 +146,12 @@ static void sde_crtc_destroy(struct drm_crtc *crtc) kfree(sde_crtc); } static void update_crtc_vsync_count(struct sde_crtc *sde_crtc) { struct vsync_info vsync; /* request vsync info, cache the current frame count */ sde_encoder_get_vblank_status(sde_crtc->encoder, &vsync); sde_crtc->vsync_count = vsync.frame_count; } static bool sde_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct sde_crtc *sde_crtc = to_sde_crtc(crtc); DBG(""); /* Update vsync counter incase wait for vsync needed before mode_set */ update_crtc_vsync_count(sde_crtc); if (msm_is_mode_seamless(adjusted_mode)) { DBG("Seamless mode set requested"); if (!crtc->enabled || crtc->state->active_changed) { Loading Loading @@ -377,8 +363,11 @@ static void sde_crtc_vblank_cb(void *data) pending = atomic_xchg(&sde_crtc->pending, 0); if (pending & PENDING_FLIP) if (pending & PENDING_FLIP) { complete_flip(crtc, NULL); /* free ref count paired with the atomic_flush */ drm_crtc_vblank_put(crtc); } if (sde_crtc->drm_requested_vblank) { drm_handle_vblank(dev, sde_crtc->id); Loading @@ -387,73 +376,98 @@ static void sde_crtc_vblank_cb(void *data) } } static bool frame_flushed(struct sde_crtc *sde_crtc) { struct vsync_info vsync; /* * encoder get vsync_info * if frame_count does not match * frame is flushed */ sde_encoder_get_vblank_status(sde_crtc->encoder, &vsync); return (vsync.frame_count != sde_crtc->vsync_count) ? true : false; } void sde_crtc_wait_for_commit_done(struct drm_crtc *crtc) static u32 _sde_crtc_update_ctl_flush_mask(struct drm_crtc *crtc) { struct sde_crtc *sde_crtc = to_sde_crtc(crtc); struct drm_device *dev = crtc->dev; int i, ret, wait_ret_val; if (!sde_crtc->num_ctls) return; struct sde_hw_ctl *ctl; struct sde_crtc_mixer *mixer; int i; /* ref count the vblank event */ ret = drm_crtc_vblank_get(crtc); if (ret) return; if (!crtc) { DRM_ERROR("invalid argument\n"); return -EINVAL; } /* wait */ wait_ret_val = wait_event_timeout( dev->vblank[drm_crtc_index(crtc)].queue, frame_flushed(sde_crtc), msecs_to_jiffies(50)); if (wait_ret_val <= 1) dev_warn(dev->dev, "vblank time out, crtc=%d, ret %u\n", sde_crtc->id, ret); MSM_EVT(crtc->dev, sde_crtc->id, 0); for (i = 0; i < sde_crtc->num_ctls; i++) sde_crtc->mixer[i].flush_mask = 0; DBG(""); /* release */ drm_crtc_vblank_put(crtc); for (i = 0; i < sde_crtc->num_ctls; i++) { mixer = &sde_crtc->mixer[i]; ctl = mixer->hw_ctl; ctl->ops.get_bitmask_intf(ctl, &mixer->flush_mask, mixer->intf_idx); ctl->ops.update_pending_flush(ctl, mixer->flush_mask); DBG("added CTL_ID %d mask 0x%x to pending flush", ctl->idx, mixer->flush_mask); } return 0; } /** * Flush the CTL PATH * _sde_crtc_trigger_kickoff - Iterate through the control paths and trigger * the hw_ctl object to flush any pending flush mask, and trigger * control start if the interface types require it. * * This is currently designed to be called only once per crtc, per flush. * It should be called from the encoder, through the * sde_encoder_schedule_kickoff callflow, after all the encoders are ready * to have CTL_START triggered. * * It is called from the commit thread context. * @data: crtc pointer */ u32 crtc_flush_all(struct drm_crtc *crtc) static void _sde_crtc_trigger_kickoff(void *data) { struct drm_crtc *crtc = (struct drm_crtc *)data; struct sde_crtc *sde_crtc = to_sde_crtc(crtc); struct sde_hw_ctl *ctl; int i; u32 i; DBG(""); if (!data) { DRM_ERROR("invalid argument\n"); return; } MSM_EVT(crtc->dev, sde_crtc->id, 0); /* Commit all pending flush masks to hardware */ for (i = 0; i < sde_crtc->num_ctls; i++) { ctl = sde_crtc->mixer[i].hw_ctl; ctl->ops.get_bitmask_intf(ctl, &(sde_crtc->mixer[i].flush_mask), sde_crtc->mixer[i].intf_idx); DBG("Flushing CTL_ID %d, flush_mask %x", ctl->idx, sde_crtc->mixer[i].flush_mask); ctl->ops.setup_flush(ctl, sde_crtc->mixer[i].flush_mask); ctl->ops.trigger_flush(ctl); } return 0; /* Signal start to any interface types that require it */ for (i = 0; i < sde_crtc->num_ctls; i++) { ctl = sde_crtc->mixer[i].hw_ctl; if (sde_crtc->mixer[i].mode != INTF_MODE_VIDEO) { ctl->ops.trigger_start(ctl); DBG("trigger start on ctl %d", ctl->idx); } } } void sde_crtc_wait_for_commit_done(struct drm_crtc *crtc) { struct sde_crtc *sde_crtc = to_sde_crtc(crtc); int ret; /* ref count the vblank event and interrupts while we wait for it */ if (drm_crtc_vblank_get(crtc)) return; /* * Wait post-flush if necessary to delay before plane_cleanup * For example, wait for vsync in case of video mode panels * This should be a no-op for command mode panels */ MSM_EVT(crtc->dev, sde_crtc->id, 0); ret = sde_encoder_wait_for_commit_done(sde_crtc->encoder); if (ret) DBG("sde_encoder_wait_post_flush returned %d", ret); /* release vblank event ref count */ drm_crtc_vblank_put(crtc); } /** Loading Loading @@ -521,6 +535,7 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc, struct sde_crtc *sde_crtc; struct drm_device *dev; unsigned long flags; u32 i; DBG(""); Loading @@ -540,6 +555,14 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc, spin_unlock_irqrestore(&dev->event_lock, flags); } /* Reset flush mask from previous commit */ for (i = 0; i < sde_crtc->num_ctls; i++) { struct sde_hw_ctl *ctl = sde_crtc->mixer[i].hw_ctl; sde_crtc->mixer[i].flush_mask = 0; ctl->ops.clear_pending_flush(ctl); } /* * If no CTL has been allocated in sde_crtc_atomic_check(), * it means we are trying to flush a CRTC whose state is disabled: Loading @@ -563,8 +586,11 @@ static void request_pending(struct drm_crtc *crtc, u32 pending) { struct sde_crtc *sde_crtc = to_sde_crtc(crtc); update_crtc_vsync_count(sde_crtc); atomic_or(pending, &sde_crtc->pending); /* ref count the vblank event and interrupts over the atomic commit */ if (drm_crtc_vblank_get(crtc)) return; } static void sde_crtc_atomic_flush(struct drm_crtc *crtc, Loading @@ -589,7 +615,6 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc, if (sde_crtc->event) { DBG("already received sde_crtc->event"); } else { DBG("%s: event: %pK", sde_crtc->name, crtc->state->event); spin_lock_irqsave(&dev->event_lock, flags); sde_crtc->event = crtc->state->event; spin_unlock_irqrestore(&dev->event_lock, flags); Loading @@ -614,9 +639,13 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc, drm_atomic_crtc_for_each_plane(plane, crtc) sde_plane_flush(plane); crtc_flush_all(crtc); /* Add pending blocks to the flush mask */ if (_sde_crtc_update_ctl_flush_mask(crtc)) return; request_pending(crtc, PENDING_FLIP); /* Kickoff will be scheduled by outer layer */ } /** Loading Loading @@ -647,6 +676,23 @@ static void sde_crtc_destroy_state(struct drm_crtc *crtc, cstate->property_values, cstate->property_blobs); } void sde_crtc_commit_kickoff(struct drm_crtc *crtc) { struct sde_crtc *sde_crtc = to_sde_crtc(crtc); if (!crtc) { DRM_ERROR("invalid argument\n"); return; } /* * Encoder will flush/start now, unless it has a tx pending * in which case it may delay and flush at an irq event (e.g. ppdone) */ sde_encoder_schedule_kickoff(sde_crtc->encoder, _sde_crtc_trigger_kickoff, crtc); } /** * sde_crtc_duplicate_state - state duplicate hook * @crtc: Pointer to drm crtc structure Loading Loading @@ -883,6 +929,8 @@ int sde_crtc_vblank(struct drm_crtc *crtc, bool en) DBG("%d", en); MSM_EVT(crtc->dev, en, 0); /* * Mark that framework requested vblank, * as opposed to enabling vblank only for our internal purposes Loading Loading @@ -1091,9 +1139,6 @@ static void _sde_crtc_init_debugfs(struct sde_crtc *sde_crtc, sde_debugfs_get_root(sde_kms)); if (sde_crtc->debugfs_root) { /* don't error check these */ debugfs_create_u32("vsync_count", 0444, sde_crtc->debugfs_root, &sde_crtc->vsync_count); debugfs_create_file("mixers", 0444, sde_crtc->debugfs_root, sde_crtc, &debugfs_mixer_fops); Loading drivers/gpu/drm/msm/sde/sde_encoder.c +199 −60 Original line number Diff line number Diff line Loading @@ -23,10 +23,18 @@ #include "sde_encoder_phys.h" #include "display_manager.h" /* * Two to anticipate panels that can do cmd/vid dynamic switching * plan is to create all possible physical encoder types, and switch between * them at runtime */ #define NUM_PHYS_ENCODER_TYPES 2 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \ (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES) #define WAIT_TIMEOUT_MSEC 100 /** * struct sde_encoder_virt - virtual encoder. Container of one or more physical * encoders. Virtual encoder manages one "logical" display. Physical Loading @@ -40,21 +48,37 @@ * @phys_encs: Container of physical encoders managed. * @cur_master: Pointer to the current master in this mode. Optimization * Only valid after enable. Cleared as disable. * @kms_vblank_callback: Callback into the upper layer / CRTC for * @crtc_vblank_cb: Callback into the upper layer / CRTC for * notification of the VBLANK * @kms_vblank_callback_data: Data from upper layer for VBLANK notification * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification * @pending_kickoff_mask: Bitmask used to track which physical encoders * still have pending transmissions before we can * trigger the next kickoff. Bitmask tracks the * index of the phys_enc table. Protect since * shared between irq and commit thread * @crtc_kickoff_cb: Callback into CRTC that will flush & start * all CTL paths * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb * @pending_kickoff_mask: Bitmask tracking which phys_enc we are still * waiting on before we can trigger the next * kickoff. Bit0 = phys_encs[0] etc. * @pending_kickoff_wq: Wait queue commit thread to wait on phys_encs * become ready for kickoff in IRQ contexts */ struct sde_encoder_virt { struct drm_encoder base; spinlock_t spin_lock; uint32_t bus_scaling_client; int num_phys_encs; unsigned int num_phys_encs; struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; struct sde_encoder_phys *cur_master; void (*kms_vblank_callback)(void *); void *kms_vblank_callback_data; void (*crtc_vblank_cb)(void *); void *crtc_vblank_cb_data; unsigned int pending_kickoff_mask; wait_queue_head_t pending_kickoff_wq; }; #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base) Loading Loading @@ -255,12 +279,6 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc, if (phys && phys->ops.mode_set) phys->ops.mode_set(phys, mode, adj_mode); } if (msm_is_mode_dynamic_fps(adj_mode)) { if (sde_enc->cur_master->ops.flush_intf) sde_enc->cur_master->ops.flush_intf( sde_enc->cur_master); } } static void sde_encoder_virt_enable(struct drm_encoder *drm_enc) Loading Loading @@ -369,44 +387,168 @@ static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc) sde_enc = to_sde_encoder_virt(drm_enc); spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); if (sde_enc->kms_vblank_callback) sde_enc->kms_vblank_callback(sde_enc->kms_vblank_callback_data); if (sde_enc->crtc_vblank_cb) sde_enc->crtc_vblank_cb(sde_enc->crtc_vblank_cb_data); spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); } static int sde_encoder_virt_add_phys_vid_enc( void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, void (*vbl_cb)(void *), void *vbl_data) { struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned long lock_flags; bool enable; int i; enable = vbl_cb ? true : false; MSM_EVT(drm_enc->dev, enable, 0); spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); sde_enc->crtc_vblank_cb = vbl_cb; sde_enc->crtc_vblank_cb_data = vbl_data; spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; if (phys && phys->ops.control_vblank_irq) phys->ops.control_vblank_irq(phys, enable); } } static void sde_encoder_handle_phys_enc_ready_for_kickoff( struct drm_encoder *drm_enc, struct sde_encoder_phys *ready_phys) { struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned long lock_flags; unsigned int i, mask; /* One of the physical encoders has become ready for kickoff */ for (i = 0; i < sde_enc->num_phys_encs; i++) { if (sde_enc->phys_encs[i] == ready_phys) { spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); sde_enc->pending_kickoff_mask &= ~(1 << i); mask = sde_enc->pending_kickoff_mask; spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); MSM_EVT(drm_enc->dev, i, mask); } } /* Wake the commit thread to check if they all ready for kickoff */ wake_up_all(&sde_enc->pending_kickoff_wq); } void sde_encoder_schedule_kickoff(struct drm_encoder *drm_enc, void (*kickoff_cb)(void *), void *kickoff_data) { struct sde_encoder_virt *sde_enc; struct sde_encoder_phys *phys; unsigned long lock_flags; bool need_to_wait; unsigned int i; int ret; if (!drm_enc) { DRM_ERROR("invalid arguments"); return; } sde_enc = to_sde_encoder_virt(drm_enc); MSM_EVT(drm_enc->dev, 0, 0); spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); sde_enc->pending_kickoff_mask = 0; spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); for (i = 0; i < sde_enc->num_phys_encs; i++) { need_to_wait = false; phys = sde_enc->phys_encs[i]; if (phys && phys->ops.prepare_for_kickoff) phys->ops.prepare_for_kickoff(phys, &need_to_wait); if (need_to_wait) { spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); sde_enc->pending_kickoff_mask |= 1 << i; spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); } } spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); MSM_EVT(drm_enc->dev, sde_enc->pending_kickoff_mask, 0); spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); /* Wait for the busy phys encs to be ready */ ret = -ERESTARTSYS; while (ret == -ERESTARTSYS) { spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); ret = wait_event_interruptible_lock_irq_timeout( sde_enc->pending_kickoff_wq, sde_enc->pending_kickoff_mask == 0, sde_enc->spin_lock, msecs_to_jiffies(WAIT_TIMEOUT_MSEC)); spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); if (!ret) DBG("wait %u msec timed out", WAIT_TIMEOUT_MSEC); } /* All phys encs are ready to go, trigger the kickoff */ if (kickoff_cb) kickoff_cb(kickoff_data); /* Allow phys encs to handle any post-kickoff business */ for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; if (phys && phys->ops.handle_post_kickoff) phys->ops.handle_post_kickoff(phys); } } static int sde_encoder_virt_add_phys_encs( enum display_interface_mode intf_mode, struct sde_encoder_virt *sde_enc, struct sde_kms *sde_kms, enum sde_intf intf_idx, enum sde_ctl ctl_idx, enum sde_enc_split_role split_role) { int ret = 0; struct sde_encoder_phys *enc = NULL; struct sde_encoder_virt_ops parent_ops = { sde_encoder_vblank_callback, sde_encoder_handle_phys_enc_ready_for_kickoff }; DBG(""); if (sde_enc->num_phys_encs >= ARRAY_SIZE(sde_enc->phys_encs)) { DRM_ERROR("Too many video encoders %d, unable to add\n", /* * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types * in this function, check up-front. */ if (sde_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >= ARRAY_SIZE(sde_enc->phys_encs)) { DRM_ERROR("Too many physical encoders %d, unable to add\n", sde_enc->num_phys_encs); ret = -EINVAL; } else { struct sde_encoder_virt_ops parent_ops = { sde_encoder_vblank_callback }; struct sde_encoder_phys *enc = sde_encoder_phys_vid_init(sde_kms, intf_idx, ctl_idx, return -EINVAL; } if (intf_mode & DISPLAY_INTF_MODE_VID) { enc = sde_encoder_phys_vid_init(sde_kms, intf_idx, ctl_idx, split_role, &sde_enc->base, parent_ops); if (IS_ERR_OR_NULL(enc)) { DRM_ERROR("Failed to initialize phys enc: %ld\n", DRM_ERROR("Failed to initialize phys vid enc: %ld\n", PTR_ERR(enc)); ret = enc == 0 ? -EINVAL : PTR_ERR(enc); } else { return enc == 0 ? -EINVAL : PTR_ERR(enc); } sde_enc->phys_encs[sde_enc->num_phys_encs] = enc; ++sde_enc->num_phys_encs; } } return ret; return 0; } static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, Loading Loading @@ -469,11 +611,15 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, ret = -EINVAL; } else { ctl_idx = hw_res_map->ctl; ret = sde_encoder_virt_add_phys_vid_enc( sde_enc, sde_kms, intf_idx, ctl_idx, split_role); } if (!ret) { ret = sde_encoder_virt_add_phys_encs( disp_info->intf_mode, sde_enc, sde_kms, intf_idx, ctl_idx, split_role); if (ret) DRM_ERROR("Failed to add phys enc\n"); DRM_ERROR("Failed to add phys encs\n"); } } Loading Loading @@ -510,6 +656,8 @@ static struct drm_encoder *sde_encoder_virt_init( drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode); drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs); bs_init(sde_enc); sde_enc->pending_kickoff_mask = 0; init_waitqueue_head(&sde_enc->pending_kickoff_wq); DBG("Created encoder"); Loading @@ -523,39 +671,30 @@ static struct drm_encoder *sde_encoder_virt_init( return ERR_PTR(ret); } void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, void (*cb)(void *), void *data) { struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned long lock_flags; DBG(""); spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); sde_enc->kms_vblank_callback = cb; sde_enc->kms_vblank_callback_data = data; spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); } void sde_encoder_get_vblank_status(struct drm_encoder *drm_enc, struct vsync_info *vsync) int sde_encoder_wait_for_commit_done(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc = NULL; struct sde_encoder_phys *master = NULL; int i, ret = 0; DBG(""); if (!vsync || !drm_enc) { if (!drm_enc) { DRM_ERROR("Invalid pointer"); return; return -EINVAL; } sde_enc = to_sde_encoder_virt(drm_enc); memset(vsync, 0, sizeof(*vsync)); for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; if (phys && phys->ops.wait_for_commit_done) { ret = phys->ops.wait_for_commit_done(phys); if (ret) return ret; } } master = sde_enc->cur_master; if (master && master->ops.get_vblank_status) master->ops.get_vblank_status(master, vsync); return ret; } /* encoders init, Loading drivers/gpu/drm/msm/sde/sde_encoder_phys.h +42 −13 Original line number Diff line number Diff line Loading @@ -18,6 +18,7 @@ #include "sde_kms.h" #include "sde_hw_intf.h" #include "sde_hw_ctl.h" #include "sde_hw_top.h" /** * enum sde_enc_split_role - Role this physical encoder will play in a Loading @@ -40,9 +41,13 @@ struct sde_encoder_phys; * provides for the physical encoders to use to callback. * @handle_vblank_virt: Notify virtual encoder of vblank IRQ reception * Note: This is called from IRQ handler context. * @handle_ready_for_kickoff: Notify virtual encoder that this phys encoder * is now ready for the next kickoff. */ struct sde_encoder_virt_ops { void (*handle_vblank_virt)(struct drm_encoder *); void (*handle_ready_for_kickoff)(struct drm_encoder *, struct sde_encoder_phys *phys); }; /** Loading @@ -60,10 +65,16 @@ struct sde_encoder_virt_ops { * @get_hw_resources: Populate the structure with the hardware * resources that this phys_enc is using. * Expect no overlap between phys_encs. * @get_vblank_status: Query hardware for the vblank info * appropriate for this phys_enc (vsync/pprdptr). * Only appropriate for master phys_enc. * @control_vblank_irq Register/Deregister for VBLANK IRQ * @wait_for_commit_done: Wait for hardware to have flushed the * current pending frames to hardware * @prepare_for_kickoff: Do any work necessary prior to a kickoff * and report whether need to wait before * triggering the next kickoff * (ie for previous tx to complete) * @handle_post_kickoff: Do any work necessary post-kickoff work */ struct sde_encoder_phys_ops { bool (*is_master)(struct sde_encoder_phys *encoder); bool (*mode_fixup)(struct sde_encoder_phys *encoder, Loading @@ -77,9 +88,24 @@ struct sde_encoder_phys_ops { void (*destroy)(struct sde_encoder_phys *encoder); void (*get_hw_resources)(struct sde_encoder_phys *encoder, struct sde_encoder_hw_resources *hw_res); void (*get_vblank_status)(struct sde_encoder_phys *enc, struct vsync_info *vsync); void (*flush_intf)(struct sde_encoder_phys *phys_enc); int (*control_vblank_irq)(struct sde_encoder_phys *enc, bool enable); int (*wait_for_commit_done)(struct sde_encoder_phys *phys_enc); void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc, bool *wait_until_ready); void (*handle_post_kickoff)(struct sde_encoder_phys *phys_enc); }; /** * enum sde_enc_enable_state - current enabled state of the physical encoder * @SDE_ENC_DISABLED: Encoder is disabled * @SDE_ENC_ENABLING: Encoder transitioning to enabled * Events bounding transition are encoder type specific * @SDE_ENC_ENABLED: Encoder is enabled */ enum sde_enc_enable_state { SDE_ENC_DISABLED, SDE_ENC_ENABLING, SDE_ENC_ENABLED }; /** Loading @@ -90,26 +116,27 @@ struct sde_encoder_phys_ops { * @ops: Operations exposed to the virtual encoder * @parent_ops: Callbacks exposed by the parent to the phys_enc * @hw_mdptop: Hardware interface to the top registers * @hw_intf: Hardware interface to the intf registers * @hw_ctl: Hardware interface to the ctl registers * @sde_kms: Pointer to the sde_kms top level * @cached_mode: DRM mode cached at mode_set time, acted on in enable * @enabled: Whether the encoder has enabled and running a mode * @split_role: Role to play in a split-panel configuration * @spin_lock: Lock for IRQ purposes * @mode_3d: 3D mux configuration * @enable_state: Enable state tracking */ struct sde_encoder_phys { struct drm_encoder *parent; struct sde_encoder_phys_ops ops; struct sde_encoder_virt_ops parent_ops; struct sde_hw_mdp *hw_mdptop; struct sde_hw_intf *hw_intf; struct sde_hw_ctl *hw_ctl; struct sde_kms *sde_kms; struct drm_display_mode cached_mode; bool enabled; enum sde_enc_split_role split_role; spinlock_t spin_lock; enum sde_3d_blend_mode mode_3d; enum sde_enc_enable_state enable_state; }; /** Loading @@ -117,12 +144,14 @@ struct sde_encoder_phys { * mode specific operations * @base: Baseclass physical encoder structure * @irq_idx: IRQ interface lookup index * @vblank_complete: for vblank irq synchronization * @hw_intf: Hardware interface to the intf registers * @vblank_completion: Completion event signaled on reception of the vsync irq */ struct sde_encoder_phys_vid { struct sde_encoder_phys base; int irq_idx; struct completion vblank_complete; struct sde_hw_intf *hw_intf; struct completion vblank_completion; }; /** Loading Loading
drivers/gpu/drm/msm/sde/sde_crtc.c +116 −71 Original line number Diff line number Diff line Loading @@ -81,8 +81,8 @@ static int sde_crtc_reserve_hw_resources(struct drm_crtc *crtc, &sde_crtc->mixer[sde_crtc->num_ctls]; mixer->hw_ctl = sde_rm_get_ctl_path(sde_kms, i); if (IS_ERR_OR_NULL(mixer->hw_ctl)) { DBG("[%s], Invalid ctl_path", __func__); return -EACCES; DRM_ERROR("Invalid ctl_path\n"); return PTR_ERR(mixer->hw_ctl); } sde_crtc->num_ctls++; } Loading Loading @@ -146,26 +146,12 @@ static void sde_crtc_destroy(struct drm_crtc *crtc) kfree(sde_crtc); } static void update_crtc_vsync_count(struct sde_crtc *sde_crtc) { struct vsync_info vsync; /* request vsync info, cache the current frame count */ sde_encoder_get_vblank_status(sde_crtc->encoder, &vsync); sde_crtc->vsync_count = vsync.frame_count; } static bool sde_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct sde_crtc *sde_crtc = to_sde_crtc(crtc); DBG(""); /* Update vsync counter incase wait for vsync needed before mode_set */ update_crtc_vsync_count(sde_crtc); if (msm_is_mode_seamless(adjusted_mode)) { DBG("Seamless mode set requested"); if (!crtc->enabled || crtc->state->active_changed) { Loading Loading @@ -377,8 +363,11 @@ static void sde_crtc_vblank_cb(void *data) pending = atomic_xchg(&sde_crtc->pending, 0); if (pending & PENDING_FLIP) if (pending & PENDING_FLIP) { complete_flip(crtc, NULL); /* free ref count paired with the atomic_flush */ drm_crtc_vblank_put(crtc); } if (sde_crtc->drm_requested_vblank) { drm_handle_vblank(dev, sde_crtc->id); Loading @@ -387,73 +376,98 @@ static void sde_crtc_vblank_cb(void *data) } } static bool frame_flushed(struct sde_crtc *sde_crtc) { struct vsync_info vsync; /* * encoder get vsync_info * if frame_count does not match * frame is flushed */ sde_encoder_get_vblank_status(sde_crtc->encoder, &vsync); return (vsync.frame_count != sde_crtc->vsync_count) ? true : false; } void sde_crtc_wait_for_commit_done(struct drm_crtc *crtc) static u32 _sde_crtc_update_ctl_flush_mask(struct drm_crtc *crtc) { struct sde_crtc *sde_crtc = to_sde_crtc(crtc); struct drm_device *dev = crtc->dev; int i, ret, wait_ret_val; if (!sde_crtc->num_ctls) return; struct sde_hw_ctl *ctl; struct sde_crtc_mixer *mixer; int i; /* ref count the vblank event */ ret = drm_crtc_vblank_get(crtc); if (ret) return; if (!crtc) { DRM_ERROR("invalid argument\n"); return -EINVAL; } /* wait */ wait_ret_val = wait_event_timeout( dev->vblank[drm_crtc_index(crtc)].queue, frame_flushed(sde_crtc), msecs_to_jiffies(50)); if (wait_ret_val <= 1) dev_warn(dev->dev, "vblank time out, crtc=%d, ret %u\n", sde_crtc->id, ret); MSM_EVT(crtc->dev, sde_crtc->id, 0); for (i = 0; i < sde_crtc->num_ctls; i++) sde_crtc->mixer[i].flush_mask = 0; DBG(""); /* release */ drm_crtc_vblank_put(crtc); for (i = 0; i < sde_crtc->num_ctls; i++) { mixer = &sde_crtc->mixer[i]; ctl = mixer->hw_ctl; ctl->ops.get_bitmask_intf(ctl, &mixer->flush_mask, mixer->intf_idx); ctl->ops.update_pending_flush(ctl, mixer->flush_mask); DBG("added CTL_ID %d mask 0x%x to pending flush", ctl->idx, mixer->flush_mask); } return 0; } /** * Flush the CTL PATH * _sde_crtc_trigger_kickoff - Iterate through the control paths and trigger * the hw_ctl object to flush any pending flush mask, and trigger * control start if the interface types require it. * * This is currently designed to be called only once per crtc, per flush. * It should be called from the encoder, through the * sde_encoder_schedule_kickoff callflow, after all the encoders are ready * to have CTL_START triggered. * * It is called from the commit thread context. * @data: crtc pointer */ u32 crtc_flush_all(struct drm_crtc *crtc) static void _sde_crtc_trigger_kickoff(void *data) { struct drm_crtc *crtc = (struct drm_crtc *)data; struct sde_crtc *sde_crtc = to_sde_crtc(crtc); struct sde_hw_ctl *ctl; int i; u32 i; DBG(""); if (!data) { DRM_ERROR("invalid argument\n"); return; } MSM_EVT(crtc->dev, sde_crtc->id, 0); /* Commit all pending flush masks to hardware */ for (i = 0; i < sde_crtc->num_ctls; i++) { ctl = sde_crtc->mixer[i].hw_ctl; ctl->ops.get_bitmask_intf(ctl, &(sde_crtc->mixer[i].flush_mask), sde_crtc->mixer[i].intf_idx); DBG("Flushing CTL_ID %d, flush_mask %x", ctl->idx, sde_crtc->mixer[i].flush_mask); ctl->ops.setup_flush(ctl, sde_crtc->mixer[i].flush_mask); ctl->ops.trigger_flush(ctl); } return 0; /* Signal start to any interface types that require it */ for (i = 0; i < sde_crtc->num_ctls; i++) { ctl = sde_crtc->mixer[i].hw_ctl; if (sde_crtc->mixer[i].mode != INTF_MODE_VIDEO) { ctl->ops.trigger_start(ctl); DBG("trigger start on ctl %d", ctl->idx); } } } void sde_crtc_wait_for_commit_done(struct drm_crtc *crtc) { struct sde_crtc *sde_crtc = to_sde_crtc(crtc); int ret; /* ref count the vblank event and interrupts while we wait for it */ if (drm_crtc_vblank_get(crtc)) return; /* * Wait post-flush if necessary to delay before plane_cleanup * For example, wait for vsync in case of video mode panels * This should be a no-op for command mode panels */ MSM_EVT(crtc->dev, sde_crtc->id, 0); ret = sde_encoder_wait_for_commit_done(sde_crtc->encoder); if (ret) DBG("sde_encoder_wait_post_flush returned %d", ret); /* release vblank event ref count */ drm_crtc_vblank_put(crtc); } /** Loading Loading @@ -521,6 +535,7 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc, struct sde_crtc *sde_crtc; struct drm_device *dev; unsigned long flags; u32 i; DBG(""); Loading @@ -540,6 +555,14 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc, spin_unlock_irqrestore(&dev->event_lock, flags); } /* Reset flush mask from previous commit */ for (i = 0; i < sde_crtc->num_ctls; i++) { struct sde_hw_ctl *ctl = sde_crtc->mixer[i].hw_ctl; sde_crtc->mixer[i].flush_mask = 0; ctl->ops.clear_pending_flush(ctl); } /* * If no CTL has been allocated in sde_crtc_atomic_check(), * it means we are trying to flush a CRTC whose state is disabled: Loading @@ -563,8 +586,11 @@ static void request_pending(struct drm_crtc *crtc, u32 pending) { struct sde_crtc *sde_crtc = to_sde_crtc(crtc); update_crtc_vsync_count(sde_crtc); atomic_or(pending, &sde_crtc->pending); /* ref count the vblank event and interrupts over the atomic commit */ if (drm_crtc_vblank_get(crtc)) return; } static void sde_crtc_atomic_flush(struct drm_crtc *crtc, Loading @@ -589,7 +615,6 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc, if (sde_crtc->event) { DBG("already received sde_crtc->event"); } else { DBG("%s: event: %pK", sde_crtc->name, crtc->state->event); spin_lock_irqsave(&dev->event_lock, flags); sde_crtc->event = crtc->state->event; spin_unlock_irqrestore(&dev->event_lock, flags); Loading @@ -614,9 +639,13 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc, drm_atomic_crtc_for_each_plane(plane, crtc) sde_plane_flush(plane); crtc_flush_all(crtc); /* Add pending blocks to the flush mask */ if (_sde_crtc_update_ctl_flush_mask(crtc)) return; request_pending(crtc, PENDING_FLIP); /* Kickoff will be scheduled by outer layer */ } /** Loading Loading @@ -647,6 +676,23 @@ static void sde_crtc_destroy_state(struct drm_crtc *crtc, cstate->property_values, cstate->property_blobs); } void sde_crtc_commit_kickoff(struct drm_crtc *crtc) { struct sde_crtc *sde_crtc = to_sde_crtc(crtc); if (!crtc) { DRM_ERROR("invalid argument\n"); return; } /* * Encoder will flush/start now, unless it has a tx pending * in which case it may delay and flush at an irq event (e.g. ppdone) */ sde_encoder_schedule_kickoff(sde_crtc->encoder, _sde_crtc_trigger_kickoff, crtc); } /** * sde_crtc_duplicate_state - state duplicate hook * @crtc: Pointer to drm crtc structure Loading Loading @@ -883,6 +929,8 @@ int sde_crtc_vblank(struct drm_crtc *crtc, bool en) DBG("%d", en); MSM_EVT(crtc->dev, en, 0); /* * Mark that framework requested vblank, * as opposed to enabling vblank only for our internal purposes Loading Loading @@ -1091,9 +1139,6 @@ static void _sde_crtc_init_debugfs(struct sde_crtc *sde_crtc, sde_debugfs_get_root(sde_kms)); if (sde_crtc->debugfs_root) { /* don't error check these */ debugfs_create_u32("vsync_count", 0444, sde_crtc->debugfs_root, &sde_crtc->vsync_count); debugfs_create_file("mixers", 0444, sde_crtc->debugfs_root, sde_crtc, &debugfs_mixer_fops); Loading
drivers/gpu/drm/msm/sde/sde_encoder.c +199 −60 Original line number Diff line number Diff line Loading @@ -23,10 +23,18 @@ #include "sde_encoder_phys.h" #include "display_manager.h" /* * Two to anticipate panels that can do cmd/vid dynamic switching * plan is to create all possible physical encoder types, and switch between * them at runtime */ #define NUM_PHYS_ENCODER_TYPES 2 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \ (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES) #define WAIT_TIMEOUT_MSEC 100 /** * struct sde_encoder_virt - virtual encoder. Container of one or more physical * encoders. Virtual encoder manages one "logical" display. Physical Loading @@ -40,21 +48,37 @@ * @phys_encs: Container of physical encoders managed. * @cur_master: Pointer to the current master in this mode. Optimization * Only valid after enable. Cleared as disable. * @kms_vblank_callback: Callback into the upper layer / CRTC for * @crtc_vblank_cb: Callback into the upper layer / CRTC for * notification of the VBLANK * @kms_vblank_callback_data: Data from upper layer for VBLANK notification * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification * @pending_kickoff_mask: Bitmask used to track which physical encoders * still have pending transmissions before we can * trigger the next kickoff. Bitmask tracks the * index of the phys_enc table. Protect since * shared between irq and commit thread * @crtc_kickoff_cb: Callback into CRTC that will flush & start * all CTL paths * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb * @pending_kickoff_mask: Bitmask tracking which phys_enc we are still * waiting on before we can trigger the next * kickoff. Bit0 = phys_encs[0] etc. * @pending_kickoff_wq: Wait queue commit thread to wait on phys_encs * become ready for kickoff in IRQ contexts */ struct sde_encoder_virt { struct drm_encoder base; spinlock_t spin_lock; uint32_t bus_scaling_client; int num_phys_encs; unsigned int num_phys_encs; struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; struct sde_encoder_phys *cur_master; void (*kms_vblank_callback)(void *); void *kms_vblank_callback_data; void (*crtc_vblank_cb)(void *); void *crtc_vblank_cb_data; unsigned int pending_kickoff_mask; wait_queue_head_t pending_kickoff_wq; }; #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base) Loading Loading @@ -255,12 +279,6 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc, if (phys && phys->ops.mode_set) phys->ops.mode_set(phys, mode, adj_mode); } if (msm_is_mode_dynamic_fps(adj_mode)) { if (sde_enc->cur_master->ops.flush_intf) sde_enc->cur_master->ops.flush_intf( sde_enc->cur_master); } } static void sde_encoder_virt_enable(struct drm_encoder *drm_enc) Loading Loading @@ -369,44 +387,168 @@ static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc) sde_enc = to_sde_encoder_virt(drm_enc); spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); if (sde_enc->kms_vblank_callback) sde_enc->kms_vblank_callback(sde_enc->kms_vblank_callback_data); if (sde_enc->crtc_vblank_cb) sde_enc->crtc_vblank_cb(sde_enc->crtc_vblank_cb_data); spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); } static int sde_encoder_virt_add_phys_vid_enc( void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, void (*vbl_cb)(void *), void *vbl_data) { struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned long lock_flags; bool enable; int i; enable = vbl_cb ? true : false; MSM_EVT(drm_enc->dev, enable, 0); spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); sde_enc->crtc_vblank_cb = vbl_cb; sde_enc->crtc_vblank_cb_data = vbl_data; spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; if (phys && phys->ops.control_vblank_irq) phys->ops.control_vblank_irq(phys, enable); } } static void sde_encoder_handle_phys_enc_ready_for_kickoff( struct drm_encoder *drm_enc, struct sde_encoder_phys *ready_phys) { struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned long lock_flags; unsigned int i, mask; /* One of the physical encoders has become ready for kickoff */ for (i = 0; i < sde_enc->num_phys_encs; i++) { if (sde_enc->phys_encs[i] == ready_phys) { spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); sde_enc->pending_kickoff_mask &= ~(1 << i); mask = sde_enc->pending_kickoff_mask; spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); MSM_EVT(drm_enc->dev, i, mask); } } /* Wake the commit thread to check if they all ready for kickoff */ wake_up_all(&sde_enc->pending_kickoff_wq); } void sde_encoder_schedule_kickoff(struct drm_encoder *drm_enc, void (*kickoff_cb)(void *), void *kickoff_data) { struct sde_encoder_virt *sde_enc; struct sde_encoder_phys *phys; unsigned long lock_flags; bool need_to_wait; unsigned int i; int ret; if (!drm_enc) { DRM_ERROR("invalid arguments"); return; } sde_enc = to_sde_encoder_virt(drm_enc); MSM_EVT(drm_enc->dev, 0, 0); spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); sde_enc->pending_kickoff_mask = 0; spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); for (i = 0; i < sde_enc->num_phys_encs; i++) { need_to_wait = false; phys = sde_enc->phys_encs[i]; if (phys && phys->ops.prepare_for_kickoff) phys->ops.prepare_for_kickoff(phys, &need_to_wait); if (need_to_wait) { spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); sde_enc->pending_kickoff_mask |= 1 << i; spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); } } spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); MSM_EVT(drm_enc->dev, sde_enc->pending_kickoff_mask, 0); spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); /* Wait for the busy phys encs to be ready */ ret = -ERESTARTSYS; while (ret == -ERESTARTSYS) { spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); ret = wait_event_interruptible_lock_irq_timeout( sde_enc->pending_kickoff_wq, sde_enc->pending_kickoff_mask == 0, sde_enc->spin_lock, msecs_to_jiffies(WAIT_TIMEOUT_MSEC)); spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); if (!ret) DBG("wait %u msec timed out", WAIT_TIMEOUT_MSEC); } /* All phys encs are ready to go, trigger the kickoff */ if (kickoff_cb) kickoff_cb(kickoff_data); /* Allow phys encs to handle any post-kickoff business */ for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; if (phys && phys->ops.handle_post_kickoff) phys->ops.handle_post_kickoff(phys); } } static int sde_encoder_virt_add_phys_encs( enum display_interface_mode intf_mode, struct sde_encoder_virt *sde_enc, struct sde_kms *sde_kms, enum sde_intf intf_idx, enum sde_ctl ctl_idx, enum sde_enc_split_role split_role) { int ret = 0; struct sde_encoder_phys *enc = NULL; struct sde_encoder_virt_ops parent_ops = { sde_encoder_vblank_callback, sde_encoder_handle_phys_enc_ready_for_kickoff }; DBG(""); if (sde_enc->num_phys_encs >= ARRAY_SIZE(sde_enc->phys_encs)) { DRM_ERROR("Too many video encoders %d, unable to add\n", /* * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types * in this function, check up-front. */ if (sde_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >= ARRAY_SIZE(sde_enc->phys_encs)) { DRM_ERROR("Too many physical encoders %d, unable to add\n", sde_enc->num_phys_encs); ret = -EINVAL; } else { struct sde_encoder_virt_ops parent_ops = { sde_encoder_vblank_callback }; struct sde_encoder_phys *enc = sde_encoder_phys_vid_init(sde_kms, intf_idx, ctl_idx, return -EINVAL; } if (intf_mode & DISPLAY_INTF_MODE_VID) { enc = sde_encoder_phys_vid_init(sde_kms, intf_idx, ctl_idx, split_role, &sde_enc->base, parent_ops); if (IS_ERR_OR_NULL(enc)) { DRM_ERROR("Failed to initialize phys enc: %ld\n", DRM_ERROR("Failed to initialize phys vid enc: %ld\n", PTR_ERR(enc)); ret = enc == 0 ? -EINVAL : PTR_ERR(enc); } else { return enc == 0 ? -EINVAL : PTR_ERR(enc); } sde_enc->phys_encs[sde_enc->num_phys_encs] = enc; ++sde_enc->num_phys_encs; } } return ret; return 0; } static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, Loading Loading @@ -469,11 +611,15 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, ret = -EINVAL; } else { ctl_idx = hw_res_map->ctl; ret = sde_encoder_virt_add_phys_vid_enc( sde_enc, sde_kms, intf_idx, ctl_idx, split_role); } if (!ret) { ret = sde_encoder_virt_add_phys_encs( disp_info->intf_mode, sde_enc, sde_kms, intf_idx, ctl_idx, split_role); if (ret) DRM_ERROR("Failed to add phys enc\n"); DRM_ERROR("Failed to add phys encs\n"); } } Loading Loading @@ -510,6 +656,8 @@ static struct drm_encoder *sde_encoder_virt_init( drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode); drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs); bs_init(sde_enc); sde_enc->pending_kickoff_mask = 0; init_waitqueue_head(&sde_enc->pending_kickoff_wq); DBG("Created encoder"); Loading @@ -523,39 +671,30 @@ static struct drm_encoder *sde_encoder_virt_init( return ERR_PTR(ret); } void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, void (*cb)(void *), void *data) { struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned long lock_flags; DBG(""); spin_lock_irqsave(&sde_enc->spin_lock, lock_flags); sde_enc->kms_vblank_callback = cb; sde_enc->kms_vblank_callback_data = data; spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags); } void sde_encoder_get_vblank_status(struct drm_encoder *drm_enc, struct vsync_info *vsync) int sde_encoder_wait_for_commit_done(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc = NULL; struct sde_encoder_phys *master = NULL; int i, ret = 0; DBG(""); if (!vsync || !drm_enc) { if (!drm_enc) { DRM_ERROR("Invalid pointer"); return; return -EINVAL; } sde_enc = to_sde_encoder_virt(drm_enc); memset(vsync, 0, sizeof(*vsync)); for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; if (phys && phys->ops.wait_for_commit_done) { ret = phys->ops.wait_for_commit_done(phys); if (ret) return ret; } } master = sde_enc->cur_master; if (master && master->ops.get_vblank_status) master->ops.get_vblank_status(master, vsync); return ret; } /* encoders init, Loading
drivers/gpu/drm/msm/sde/sde_encoder_phys.h +42 −13 Original line number Diff line number Diff line Loading @@ -18,6 +18,7 @@ #include "sde_kms.h" #include "sde_hw_intf.h" #include "sde_hw_ctl.h" #include "sde_hw_top.h" /** * enum sde_enc_split_role - Role this physical encoder will play in a Loading @@ -40,9 +41,13 @@ struct sde_encoder_phys; * provides for the physical encoders to use to callback. * @handle_vblank_virt: Notify virtual encoder of vblank IRQ reception * Note: This is called from IRQ handler context. * @handle_ready_for_kickoff: Notify virtual encoder that this phys encoder * is now ready for the next kickoff. */ struct sde_encoder_virt_ops { void (*handle_vblank_virt)(struct drm_encoder *); void (*handle_ready_for_kickoff)(struct drm_encoder *, struct sde_encoder_phys *phys); }; /** Loading @@ -60,10 +65,16 @@ struct sde_encoder_virt_ops { * @get_hw_resources: Populate the structure with the hardware * resources that this phys_enc is using. * Expect no overlap between phys_encs. * @get_vblank_status: Query hardware for the vblank info * appropriate for this phys_enc (vsync/pprdptr). * Only appropriate for master phys_enc. * @control_vblank_irq Register/Deregister for VBLANK IRQ * @wait_for_commit_done: Wait for hardware to have flushed the * current pending frames to hardware * @prepare_for_kickoff: Do any work necessary prior to a kickoff * and report whether need to wait before * triggering the next kickoff * (ie for previous tx to complete) * @handle_post_kickoff: Do any work necessary post-kickoff work */ struct sde_encoder_phys_ops { bool (*is_master)(struct sde_encoder_phys *encoder); bool (*mode_fixup)(struct sde_encoder_phys *encoder, Loading @@ -77,9 +88,24 @@ struct sde_encoder_phys_ops { void (*destroy)(struct sde_encoder_phys *encoder); void (*get_hw_resources)(struct sde_encoder_phys *encoder, struct sde_encoder_hw_resources *hw_res); void (*get_vblank_status)(struct sde_encoder_phys *enc, struct vsync_info *vsync); void (*flush_intf)(struct sde_encoder_phys *phys_enc); int (*control_vblank_irq)(struct sde_encoder_phys *enc, bool enable); int (*wait_for_commit_done)(struct sde_encoder_phys *phys_enc); void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc, bool *wait_until_ready); void (*handle_post_kickoff)(struct sde_encoder_phys *phys_enc); }; /** * enum sde_enc_enable_state - current enabled state of the physical encoder * @SDE_ENC_DISABLED: Encoder is disabled * @SDE_ENC_ENABLING: Encoder transitioning to enabled * Events bounding transition are encoder type specific * @SDE_ENC_ENABLED: Encoder is enabled */ enum sde_enc_enable_state { SDE_ENC_DISABLED, SDE_ENC_ENABLING, SDE_ENC_ENABLED }; /** Loading @@ -90,26 +116,27 @@ struct sde_encoder_phys_ops { * @ops: Operations exposed to the virtual encoder * @parent_ops: Callbacks exposed by the parent to the phys_enc * @hw_mdptop: Hardware interface to the top registers * @hw_intf: Hardware interface to the intf registers * @hw_ctl: Hardware interface to the ctl registers * @sde_kms: Pointer to the sde_kms top level * @cached_mode: DRM mode cached at mode_set time, acted on in enable * @enabled: Whether the encoder has enabled and running a mode * @split_role: Role to play in a split-panel configuration * @spin_lock: Lock for IRQ purposes * @mode_3d: 3D mux configuration * @enable_state: Enable state tracking */ struct sde_encoder_phys { struct drm_encoder *parent; struct sde_encoder_phys_ops ops; struct sde_encoder_virt_ops parent_ops; struct sde_hw_mdp *hw_mdptop; struct sde_hw_intf *hw_intf; struct sde_hw_ctl *hw_ctl; struct sde_kms *sde_kms; struct drm_display_mode cached_mode; bool enabled; enum sde_enc_split_role split_role; spinlock_t spin_lock; enum sde_3d_blend_mode mode_3d; enum sde_enc_enable_state enable_state; }; /** Loading @@ -117,12 +144,14 @@ struct sde_encoder_phys { * mode specific operations * @base: Baseclass physical encoder structure * @irq_idx: IRQ interface lookup index * @vblank_complete: for vblank irq synchronization * @hw_intf: Hardware interface to the intf registers * @vblank_completion: Completion event signaled on reception of the vsync irq */ struct sde_encoder_phys_vid { struct sde_encoder_phys base; int irq_idx; struct completion vblank_complete; struct sde_hw_intf *hw_intf; struct completion vblank_completion; }; /** Loading