diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 3c9fc99648b7c912a4b9fa686798d8fe8d23b651..134bee53b3ab76d5e74108e002271e3d0bfd8c53 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -98,6 +98,14 @@ struct detailed_mode_closure { #define LEVEL_GTF2 2 #define LEVEL_CVT 3 +/*Enum storing luminance types for HDR blocks in EDID*/ +enum luminance_value { + NO_LUMINANCE_DATA = 3, + MAXIMUM_LUMINANCE = 4, + FRAME_AVERAGE_LUMINANCE = 5, + MINIMUM_LUMINANCE = 6 +}; + static const struct edid_quirk { char vendor[4]; int product_id; @@ -213,7 +221,8 @@ static const struct drm_display_mode drm_dmt_modes[] = { /* 0x05 - 640x480@72Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, 704, 832, 0, 480, 489, 492, 520, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 72, }, /* 0x06 - 640x480@75Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, 720, 840, 0, 480, 481, 484, 500, 0, @@ -570,7 +579,8 @@ static const struct drm_display_mode edid_est_modes[] = { DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, 704, 832, 0, 480, 489, 492, 520, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + .vrefresh = 72, }, /* 640x480@72Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704, 768, 864, 0, 480, 483, 486, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */ @@ -2823,11 +2833,12 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, return closure.modes; } - +#define VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK 0x0 #define AUDIO_BLOCK 0x01 #define VIDEO_BLOCK 0x02 #define VENDOR_BLOCK 0x03 #define SPEAKER_BLOCK 0x04 +#define HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK 0x06 #define USE_EXTENDED_TAG 0x07 #define EXT_VIDEO_CAPABILITY_BLOCK 0x00 #define EXT_VIDEO_DATA_BLOCK_420 0x0E @@ -3827,6 +3838,156 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db) connector->audio_latency[1]); } +/* + * drm_extract_vcdb_info - Parse the HDMI Video Capability Data Block + * @connector: connector corresponding to the HDMI sink + * @db: start of the CEA vendor specific block + * + * Parses the HDMI VCDB to extract sink info for @connector. + */ +static void +drm_extract_vcdb_info(struct drm_connector *connector, const u8 *db) +{ + /* + * Check if the sink specifies underscan + * support for: + * BIT 5: preferred video format + * BIT 3: IT video format + * BIT 1: CE video format + */ + + connector->pt_scan_info = + (db[2] & (BIT(4) | BIT(5))) >> 4; + connector->it_scan_info = + (db[2] & (BIT(3) | BIT(2))) >> 2; + connector->ce_scan_info = + db[2] & (BIT(1) | BIT(0)); + + DRM_DEBUG_KMS("Scan Info (pt|it|ce): (%d|%d|%d)", + (int) connector->pt_scan_info, + (int) connector->it_scan_info, + (int) connector->ce_scan_info); +} + +static bool drm_edid_is_luminance_value_present( +u32 block_length, enum luminance_value value) +{ + return block_length > NO_LUMINANCE_DATA && value <= block_length; +} + +/* + * drm_extract_hdr_db - Parse the HDMI HDR extended block + * @connector: connector corresponding to the HDMI sink + * @db: start of the HDMI HDR extended block + * + * Parses the HDMI HDR extended block to extract sink info for @connector. + */ +static void +drm_extract_hdr_db(struct drm_connector *connector, const u8 *db) +{ + + u8 len = 0; + + if (!db) + return; + + len = db[0] & 0x1f; + /* Byte 3: Electro-Optical Transfer Functions */ + connector->hdr_eotf = db[2] & 0x3F; + + /* Byte 4: Static Metadata Descriptor Type 1 */ + connector->hdr_metadata_type_one = (db[3] & BIT(0)); + + /* Byte 5: Desired Content Maximum Luminance */ + if (drm_edid_is_luminance_value_present(len, MAXIMUM_LUMINANCE)) + connector->hdr_max_luminance = + db[MAXIMUM_LUMINANCE]; + + /* Byte 6: Desired Content Max Frame-average Luminance */ + if (drm_edid_is_luminance_value_present(len, FRAME_AVERAGE_LUMINANCE)) + connector->hdr_avg_luminance = + db[FRAME_AVERAGE_LUMINANCE]; + + /* Byte 7: Desired Content Min Luminance */ + if (drm_edid_is_luminance_value_present(len, MINIMUM_LUMINANCE)) + connector->hdr_min_luminance = + db[MINIMUM_LUMINANCE]; + + connector->hdr_supported = true; + + DRM_DEBUG_KMS("HDR electro-optical %d\n", connector->hdr_eotf); + DRM_DEBUG_KMS("metadata desc 1 %d\n", connector->hdr_metadata_type_one); + DRM_DEBUG_KMS("max luminance %d\n", connector->hdr_max_luminance); + DRM_DEBUG_KMS("avg luminance %d\n", connector->hdr_avg_luminance); + DRM_DEBUG_KMS("min luminance %d\n", connector->hdr_min_luminance); +} +/* + * drm_hdmi_extract_extended_blk_info - Parse the HDMI extended tag blocks + * @connector: connector corresponding to the HDMI sink + * @edid: handle to the EDID structure + * Parses the all extended tag blocks extract sink info for @connector. + */ +static void +drm_hdmi_extract_extended_blk_info(struct drm_connector *connector, + const struct edid *edid) +{ + const u8 *cea = drm_find_cea_extension(edid); + const u8 *db = NULL; + + if (cea && cea_revision(cea) >= 3) { + int i, start, end; + + if (cea_db_offsets(cea, &start, &end)) + return; + + for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + + if (cea_db_tag(db) == USE_EXTENDED_TAG) { + DRM_DEBUG_KMS("found extended tag block = %d\n", + db[1]); + switch (db[1]) { + case VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK: + drm_extract_vcdb_info(connector, db); + break; + case HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK: + drm_extract_hdr_db(connector, db); + break; + default: + break; + } + } + } + } +} + +static void +parse_hdmi_hf_vsdb(struct drm_connector *connector, const u8 *db) +{ + u8 len = cea_db_payload_len(db); + + if (len < 7) + return; + + if (db[4] != 1) + return; /* invalid version */ + + connector->max_tmds_char = db[5] * 5; + connector->scdc_present = db[6] & (1 << 7); + connector->rr_capable = db[6] & (1 << 6); + connector->flags_3d = db[6] & 0x7; + connector->supports_scramble = connector->scdc_present && + (db[6] & (1 << 3)); + + DRM_DEBUG_KMS( + "HDMI v2: max TMDS char %d, scdc %s, rr %s,3D flags 0x%x, scramble %s\n", + connector->max_tmds_char, + connector->scdc_present ? "available" : "not available", + connector->rr_capable ? "capable" : "not capable", + connector->flags_3d, + connector->supports_scramble ? "supported" : "not supported"); +} + static void monitor_name(struct detailed_timing *t, void *data) { @@ -3959,6 +4120,9 @@ static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) /* HDMI Vendor-Specific Data Block */ if (cea_db_is_hdmi_vsdb(db)) drm_parse_hdmi_vsdb_audio(connector, db); + /* HDMI Forum Vendor-Specific Data Block */ + else if (cea_db_is_hdmi_forum_vsdb(db)) + parse_hdmi_hf_vsdb(connector, db); break; default: break; @@ -4471,6 +4635,38 @@ drm_reset_display_info(struct drm_connector *connector) info->non_desktop = 0; } +static void +drm_hdmi_extract_vsdbs_info(struct drm_connector *connector, + const struct edid *edid) +{ + const u8 *cea = drm_find_cea_extension(edid); + const u8 *db = NULL; + + if (cea && cea_revision(cea) >= 3) { + int i, start, end; + + if (cea_db_offsets(cea, &start, &end)) + return; + + for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + + if (cea_db_tag(db) == VENDOR_BLOCK) { + /* HDMI Vendor-Specific Data Block */ + if (cea_db_is_hdmi_vsdb(db)) { + drm_parse_hdmi_vsdb_video( + connector, db); + drm_parse_hdmi_vsdb_audio( + connector, db); + } + /* HDMI Forum Vendor-Specific Data Block */ + else if (cea_db_is_hdmi_forum_vsdb(db)) + parse_hdmi_hf_vsdb(connector, db); + } + } + } +} + u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid) { struct drm_display_info *info = &connector->display_info; @@ -4508,6 +4704,11 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi connector->name, info->bpc); } + /* Extract audio and video latency fields for the sink */ + drm_hdmi_extract_vsdbs_info(connector, edid); + /* Extract info from extended tag blocks */ + drm_hdmi_extract_extended_blk_info(connector, edid); + /* Only defined for 1.4 with digital displays */ if (edid->revision < 4) return quirks; diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index ffa8dc35515ffaddf0f87c56cc2108e0be4151f6..d4147cd0f3f75764d0bc59601044a9a8d804eee6 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -46,6 +46,8 @@ /* from BKL pushdown */ DEFINE_MUTEX(drm_global_mutex); +#define MAX_DRM_OPEN_COUNT 20 + /** * DOC: file operations * @@ -310,6 +312,11 @@ int drm_open(struct inode *inode, struct file *filp) if (!dev->open_count++) need_setup = 1; + if (dev->open_count >= MAX_DRM_OPEN_COUNT) { + retcode = -EPERM; + goto err_undo; + } + /* share address_space across all char-devs of a single device */ filp->f_mapping = dev->anon_inode->i_mapping; diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 781af1d42d766bf63db12801ace4703132db84fa..77b180002d7298d4f57d418d7d6d82337ea27698 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -278,7 +278,8 @@ drm_internal_framebuffer_create(struct drm_device *dev, struct drm_framebuffer *fb; int ret; - if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) { + if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS | + DRM_MODE_FB_SECURE)) { DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 80b75501f5c6a203d7967fffdb94378b89039dab..05d7db2471512b0dfd097165eb8f24a74a3c72b5 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -456,7 +456,7 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, return -EINVAL; memset(packet, 0, sizeof(*packet)); - packet->header[0] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f); + packet->header[2] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f); /* TODO: compute ECC if hardware support is not available */ @@ -468,16 +468,16 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, * and 2. */ if (mipi_dsi_packet_format_is_long(msg->type)) { - packet->header[1] = (msg->tx_len >> 0) & 0xff; - packet->header[2] = (msg->tx_len >> 8) & 0xff; + packet->header[0] = (msg->tx_len >> 0) & 0xff; + packet->header[1] = (msg->tx_len >> 8) & 0xff; packet->payload_length = msg->tx_len; packet->payload = msg->tx_buf; } else { const u8 *tx = msg->tx_buf; - packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0; - packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0; + packet->header[0] = (msg->tx_len > 0) ? tx[0] : 0; + packet->header[1] = (msg->tx_len > 1) ? tx[1] : 0; } packet->size = sizeof(packet->header) + packet->payload_length; diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index cdb10f885a4febea85fc5272e22f1378d770da8b..450a51b5744c97ac54f0c8c1b385d7f7f55bf81b 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -26,6 +26,9 @@ #include "drm_crtc_internal.h" +#define MAX_BLOB_PROP_SIZE (PAGE_SIZE * 30) +#define MAX_BLOB_PROP_COUNT 250 + /** * DOC: overview * @@ -556,7 +559,8 @@ drm_property_create_blob(struct drm_device *dev, size_t length, struct drm_property_blob *blob; int ret; - if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) + if (!length || length > MAX_BLOB_PROP_SIZE - + sizeof(struct drm_property_blob)) return ERR_PTR(-EINVAL); blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); @@ -782,12 +786,19 @@ int drm_mode_createblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_create_blob *out_resp = data; - struct drm_property_blob *blob; + struct drm_property_blob *blob, *bt; int ret = 0; + u32 count = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; + list_for_each_entry(bt, &file_priv->blobs, head_file) + count++; + + if (count >= MAX_BLOB_PROP_COUNT) + return -EINVAL; + blob = drm_property_create_blob(dev, out_resp->length, NULL); if (IS_ERR(blob)) return PTR_ERR(blob); diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 736890fcb73ff27db4affc85b2394e87e3bd2152..ea44cb9ef497571e8f8971dddff5002df1b3753a 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -122,6 +122,16 @@ config DRM_MSM_DSI_PLL Choose this option to enable DSI PLL driver which provides DSI source clocks under common clock framework. +config DRM_SDE_WB + bool "Enable Writeback support in SDE DRM" + depends on DRM_MSM && DRM_MSM_SDE + default y + help + Choose this option for writeback connector support. + This option enables a virtual writeback connector where + the output image is written back to memory in the format + selected by the connector's mode and property settings. + config DRM_MSM_DSI_28NM_PHY bool "Enable DSI 28nm PHY driver in MSM DRM" depends on DRM_MSM_DSI diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 911a1bf42e340aae41949aa0bec0abc6cd053ad8..26acd086edebeb998c3b696e809325e9986e5be3 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -1,9 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-$(CONFIG_DRM_MSM_DP):= -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging -Idrivers/gpu/drm/msm/dp +ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging -Idrivers/gpu/drm/msm/dp ccflags-y += -Idrivers/gpu/drm/msm/display-manager ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi -ccflags-$(CONFIG_DRM_MSM_SDE) += -Idrivers/gpu/drm/msm/sde +ccflags-y += -Idrivers/gpu/drm/msm/sde ccflags-y += -Idrivers/media/platform/msm/sde/rotator ccflags-y += -Idrivers/gpu/drm/msm/hdmi @@ -12,12 +12,15 @@ msm_drm-$(CONFIG_DRM_MSM_DP) += dp/dp_usbpd.o \ dp/dp_power.o \ dp/dp_catalog.o \ dp/dp_catalog_v420.o \ + dp/dp_catalog_v200.o \ dp/dp_aux.o \ dp/dp_panel.o \ dp/dp_link.o \ dp/dp_ctrl.o \ dp/dp_audio.o \ dp/dp_debug.o \ + dp/dp_hpd.o \ + dp/dp_gpio_hpd.o \ dp/dp_display.o \ dp/dp_drm.o \ dp/dp_hdcp2p2.o \ @@ -201,7 +204,9 @@ msm_drm-$(CONFIG_DRM_MSM) += \ msm_gem_vma.o \ msm_gpu.o \ msm_iommu.o \ + msm_smmu.o \ msm_perf.o \ + msm_prop.o \ msm_rd.o \ msm_ringbuffer.o \ msm_submitqueue.o diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c index 3754692c5fbc3e9e3b4a10bd94933e1efefb8fe7..d76b9da698561fae8a5fd52fc03eef287b7b92e0 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.c +++ b/drivers/gpu/drm/msm/dp/dp_audio.c @@ -607,10 +607,11 @@ static int dp_audio_register_ext_disp(struct dp_audio_private *audio) rc = -ENODEV; goto end; } - +#if defined(CONFIG_MSM_EXT_DISPLAY) rc = msm_ext_disp_register_intf(audio->ext_pdev, ext); if (rc) pr_err("failed to register disp\n"); +#endif end: if (pd) of_node_put(pd); @@ -647,9 +648,11 @@ static int dp_audio_deregister_ext_disp(struct dp_audio_private *audio) goto end; } +#if defined(CONFIG_MSM_EXT_DISPLAY) rc = msm_ext_disp_deregister_intf(audio->ext_pdev, ext); if (rc) pr_err("failed to deregister disp\n"); +#endif end: return rc; diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 593680ebf515ab5860b9a9cc40a56bce1f235167..2ec8d0886364c6042b585ef5c5f172093532102b 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -849,7 +849,7 @@ static int dp_display_usbpd_disconnect_cb(struct device *dev) /* wait for idle state */ cancel_delayed_work(&dp->connect_work); - cancel_work(&dp->attention_work); + cancel_work_sync(&dp->attention_work); flush_workqueue(dp->wq); if (!dp->debug->sim_mode && !dp->parser->no_aux_switch) @@ -990,7 +990,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev) /* wait for idle state */ cancel_delayed_work(&dp->connect_work); - cancel_work(&dp->attention_work); + cancel_work_sync(&dp->attention_work); flush_workqueue(dp->wq); dp_display_handle_disconnect(dp); diff --git a/drivers/gpu/drm/msm/dp/dp_mst_drm.c b/drivers/gpu/drm/msm/dp/dp_mst_drm.c index cca0034b3ec51577d6ef009ab48fde4b8995ce20..34a9f966bc3a589d92789cbdb364ff3831c2c054 100644 --- a/drivers/gpu/drm/msm/dp/dp_mst_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_mst_drm.c @@ -1267,7 +1267,7 @@ dp_mst_add_connector(struct drm_dp_mst_topology_mgr *mgr, connector->funcs->reset(connector); for (i = 1; i < MAX_DP_MST_DRM_BRIDGES; i++) { - drm_mode_connector_attach_encoder(connector, + drm_connector_attach_encoder(connector, dp_mst->mst_bridge[i].encoder); } diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index f0635c3da7f48ad1bdc15c2305fe4d0b1cb48ff1..376f92e65a181e883617c2c7d1e89a69b73fc4b9 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2014 Red Hat * Author: Rob Clark * @@ -15,11 +16,148 @@ * this program. If not, see . */ +#include +#include + #include "msm_drv.h" #include "msm_gem.h" #include "msm_kms.h" +#include "msm_fence.h" +#include "sde_trace.h" + +#define MULTIPLE_CONN_DETECTED(x) (x > 1) + +struct msm_commit { + struct drm_device *dev; + struct drm_atomic_state *state; + uint32_t crtc_mask; + bool nonblock; + struct kthread_work commit_work; +}; + +static BLOCKING_NOTIFIER_HEAD(msm_drm_notifier_list); + +/** + * msm_drm_register_client - register a client notifier + * @nb: notifier block to callback on events + * + * This function registers a notifier callback function + * to msm_drm_notifier_list, which would be called when + * received unblank/power down event. + */ +int msm_drm_register_client(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&msm_drm_notifier_list, + nb); +} +EXPORT_SYMBOL(msm_drm_register_client); + +/** + * msm_drm_unregister_client - unregister a client notifier + * @nb: notifier block to callback on events + * + * This function unregisters the callback function from + * msm_drm_notifier_list. + */ +int msm_drm_unregister_client(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&msm_drm_notifier_list, + nb); +} +EXPORT_SYMBOL(msm_drm_unregister_client); + +/** + * msm_drm_notifier_call_chain - notify clients of drm_events + * @val: event MSM_DRM_EARLY_EVENT_BLANK or MSM_DRM_EVENT_BLANK + * @v: notifier data, inculde display id and display blank + * event(unblank or power down). + */ +static int msm_drm_notifier_call_chain(unsigned long val, void *v) +{ + return blocking_notifier_call_chain(&msm_drm_notifier_list, val, + v); +} + +static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state, + struct drm_crtc_state *crtc_state, bool enable) +{ + struct drm_connector *connector = NULL; + struct drm_connector_state *conn_state = NULL; + int i = 0; + int conn_cnt = 0; + + if (msm_is_mode_seamless(&crtc_state->mode) || + msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode)) + return true; + + if (msm_is_mode_seamless_dms(&crtc_state->adjusted_mode) && !enable) + return true; + + if (!crtc_state->mode_changed && crtc_state->connectors_changed) { + for_each_old_connector_in_state(state, connector, + conn_state, i) { + if ((conn_state->crtc == crtc_state->crtc) || + (connector->state->crtc == + crtc_state->crtc)) + conn_cnt++; + + if (MULTIPLE_CONN_DETECTED(conn_cnt)) + return true; + } + } + + return false; +} + +static inline bool _msm_seamless_for_conn(struct drm_connector *connector, + struct drm_connector_state *old_conn_state, bool enable) +{ + if (!old_conn_state || !old_conn_state->crtc) + return false; + + if (!old_conn_state->crtc->state->mode_changed && + !old_conn_state->crtc->state->active_changed && + old_conn_state->crtc->state->connectors_changed) { + if (old_conn_state->crtc == connector->state->crtc) + return true; + } -static void msm_atomic_wait_for_commit_done(struct drm_device *dev, + if (enable) + return false; + + if (msm_is_mode_seamless(&connector->encoder->crtc->state->mode)) + return true; + + if (msm_is_mode_seamless_vrr( + &connector->encoder->crtc->state->adjusted_mode)) + return true; + + if (msm_is_mode_seamless_dms( + &connector->encoder->crtc->state->adjusted_mode)) + return true; + + return false; +} + +/* clear specified crtcs (no longer pending update) */ +static void commit_destroy(struct msm_commit *c) +{ + struct msm_drm_private *priv = c->dev->dev_private; + uint32_t crtc_mask = c->crtc_mask; + + /* End_atomic */ + spin_lock(&priv->pending_crtcs_event.lock); + DBG("end: %08x", crtc_mask); + priv->pending_crtcs &= ~crtc_mask; + wake_up_all_locked(&priv->pending_crtcs_event); + spin_unlock(&priv->pending_crtcs_event.lock); + + if (c->nonblock) + kfree(c); +} + +static void msm_atomic_wait_for_commit_done( + struct drm_device *dev, struct drm_atomic_state *old_state) { struct drm_crtc *crtc; @@ -36,6 +174,362 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, } } +static void +msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) +{ + struct drm_connector *connector; + struct drm_connector_state *old_conn_state; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + struct msm_drm_notifier notifier_data; + int i, blank; + + SDE_ATRACE_BEGIN("msm_disable"); + for_each_old_connector_in_state(old_state, connector, + old_conn_state, i) { + const struct drm_encoder_helper_funcs *funcs; + struct drm_encoder *encoder; + struct drm_crtc_state *old_crtc_state; + unsigned int crtc_idx; + + /* + * Shut down everything that's in the changeset and currently + * still on. So need to check the old, saved state. + */ + if (!old_conn_state->crtc) + continue; + + crtc_idx = drm_crtc_index(old_conn_state->crtc); + old_crtc_state = drm_atomic_get_old_crtc_state(old_state, + old_conn_state->crtc); + + if (!old_crtc_state->active || + !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state)) + continue; + + encoder = old_conn_state->best_encoder; + + /* We shouldn't get this far if we didn't previously have + * an encoder.. but WARN_ON() rather than explode. + */ + if (WARN_ON(!encoder)) + continue; + + if (_msm_seamless_for_conn(connector, old_conn_state, false)) + continue; + + funcs = encoder->helper_private; + + DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); + + if (connector->state->crtc && + connector->state->crtc->state->active_changed) { + blank = MSM_DRM_BLANK_POWERDOWN; + notifier_data.data = ␣ + notifier_data.id = crtc_idx; + msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK, + ¬ifier_data); + } + /* + * Each encoder has at most one connector (since we always steal + * it away), so we won't call disable hooks twice. + */ + drm_bridge_disable(encoder->bridge); + + /* Right function depends upon target state. */ + if (connector->state->crtc && funcs->prepare) + funcs->prepare(encoder); + else if (funcs->disable) + funcs->disable(encoder); + else + funcs->dpms(encoder, DRM_MODE_DPMS_OFF); + + drm_bridge_post_disable(encoder->bridge); + if (connector->state->crtc && + connector->state->crtc->state->active_changed) { + DRM_DEBUG_ATOMIC("Notify blank\n"); + msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK, + ¬ifier_data); + } + } + + for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; + + /* Shut down everything that needs a full modeset. */ + if (!drm_atomic_crtc_needs_modeset(crtc->state)) + continue; + + if (!old_crtc_state->active) + continue; + + if (_msm_seamless_for_crtc(old_state, crtc->state, false)) + continue; + + funcs = crtc->helper_private; + + DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n", + crtc->base.id); + + /* Right function depends upon target state. */ + if (crtc->state->enable && funcs->prepare) + funcs->prepare(crtc); + else if (funcs->disable) + funcs->disable(crtc); + else + funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + } + SDE_ATRACE_END("msm_disable"); +} + +static void +msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + struct drm_connector *connector; + struct drm_connector_state *old_conn_state; + int i; + + for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; + + if (!crtc->state->mode_changed) + continue; + + funcs = crtc->helper_private; + + if (crtc->state->enable && funcs->mode_set_nofb) { + DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n", + crtc->base.id); + + funcs->mode_set_nofb(crtc); + } + } + + for_each_old_connector_in_state(old_state, connector, + old_conn_state, i) { + const struct drm_encoder_helper_funcs *funcs; + struct drm_crtc_state *new_crtc_state; + struct drm_encoder *encoder; + struct drm_display_mode *mode, *adjusted_mode; + + if (!connector->state->best_encoder) + continue; + + encoder = connector->state->best_encoder; + funcs = encoder->helper_private; + new_crtc_state = connector->state->crtc->state; + mode = &new_crtc_state->mode; + adjusted_mode = &new_crtc_state->adjusted_mode; + + if (!new_crtc_state->mode_changed && + new_crtc_state->connectors_changed) { + if (_msm_seamless_for_conn(connector, + old_conn_state, false)) + continue; + } else if (!new_crtc_state->mode_changed) { + continue; + } + + DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); + + /* + * Each encoder has at most one connector (since we always steal + * it away), so we won't call mode_set hooks twice. + */ + if (funcs->mode_set) + funcs->mode_set(encoder, mode, adjusted_mode); + + drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode); + } +} + +/** + * msm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs + * @dev: DRM device + * @old_state: atomic state object with old state structures + * + * This function shuts down all the outputs that need to be shut down and + * prepares them (if required) with the new mode. + * + * For compatibility with legacy crtc helpers this should be called before + * drm_atomic_helper_commit_planes(), which is what the default commit function + * does. But drivers with different needs can group the modeset commits together + * and do the plane commits at the end. This is useful for drivers doing runtime + * PM since planes updates then only happen when the CRTC is actually enabled. + */ +void msm_atomic_helper_commit_modeset_disables(struct drm_device *dev, + struct drm_atomic_state *old_state) +{ + msm_disable_outputs(dev, old_state); + + drm_atomic_helper_update_legacy_modeset_state(dev, old_state); + + msm_crtc_set_mode(dev, old_state); +} + +/** + * msm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs + * @dev: DRM device + * @old_state: atomic state object with old state structures + * + * This function enables all the outputs with the new configuration which had to + * be turned off for the update. + * + * For compatibility with legacy crtc helpers this should be called after + * drm_atomic_helper_commit_planes(), which is what the default commit function + * does. But drivers with different needs can group the modeset commits together + * and do the plane commits at the end. This is useful for drivers doing runtime + * PM since planes updates then only happen when the CRTC is actually enabled. + */ +static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev, + struct drm_atomic_state *old_state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + struct drm_crtc_state *new_crtc_state; + struct drm_connector *connector; + struct drm_connector_state *new_conn_state; + struct msm_drm_notifier notifier_data; + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + int bridge_enable_count = 0; + int i, blank; + bool splash = false; + + SDE_ATRACE_BEGIN("msm_enable"); + for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, + new_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; + + /* Need to filter out CRTCs where only planes change. */ + if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) + continue; + + if (!new_crtc_state->active) + continue; + + if (_msm_seamless_for_crtc(old_state, crtc->state, true)) + continue; + + funcs = crtc->helper_private; + + if (crtc->state->enable) { + DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n", + crtc->base.id); + + if (funcs->atomic_enable) + funcs->atomic_enable(crtc, old_crtc_state); + else + funcs->commit(crtc); + } + + if (msm_needs_vblank_pre_modeset( + &new_crtc_state->adjusted_mode)) + drm_crtc_wait_one_vblank(crtc); + + } + + for_each_new_connector_in_state(old_state, connector, + new_conn_state, i) { + const struct drm_encoder_helper_funcs *funcs; + struct drm_encoder *encoder; + struct drm_connector_state *old_conn_state; + + if (!new_conn_state->best_encoder) + continue; + + if (!new_conn_state->crtc->state->active || + !drm_atomic_crtc_needs_modeset( + new_conn_state->crtc->state)) + continue; + + old_conn_state = drm_atomic_get_old_connector_state( + old_state, connector); + if (_msm_seamless_for_conn(connector, old_conn_state, true)) + continue; + + encoder = connector->state->best_encoder; + funcs = encoder->helper_private; + + DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); + + if (kms && kms->funcs && kms->funcs->check_for_splash) + splash = kms->funcs->check_for_splash(kms); + + if (splash || (connector->state->crtc && + connector->state->crtc->state->active_changed)) { + blank = MSM_DRM_BLANK_UNBLANK; + notifier_data.data = ␣ + notifier_data.id = + connector->state->crtc->index; + DRM_DEBUG_ATOMIC("Notify early unblank\n"); + msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK, + ¬ifier_data); + } + /* + * Each encoder has at most one connector (since we always steal + * it away), so we won't call enable hooks twice. + */ + drm_bridge_pre_enable(encoder->bridge); + ++bridge_enable_count; + + if (funcs->enable) + funcs->enable(encoder); + else + funcs->commit(encoder); + } + + if (kms && kms->funcs && kms->funcs->commit) { + DRM_DEBUG_ATOMIC("triggering commit\n"); + kms->funcs->commit(kms, old_state); + } + + /* If no bridges were pre_enabled, skip iterating over them again */ + if (bridge_enable_count == 0) { + SDE_ATRACE_END("msm_enable"); + return; + } + + for_each_new_connector_in_state(old_state, connector, + new_conn_state, i) { + struct drm_encoder *encoder; + struct drm_connector_state *old_conn_state; + + if (!new_conn_state->best_encoder) + continue; + + if (!new_conn_state->crtc->state->active || + !drm_atomic_crtc_needs_modeset( + new_conn_state->crtc->state)) + continue; + + old_conn_state = drm_atomic_get_old_connector_state( + old_state, connector); + if (_msm_seamless_for_conn(connector, old_conn_state, true)) + continue; + + encoder = connector->state->best_encoder; + + DRM_DEBUG_ATOMIC("bridge enable enabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); + + drm_bridge_enable(encoder->bridge); + + if (splash || (connector->state->crtc && + connector->state->crtc->state->active_changed)) { + DRM_DEBUG_ATOMIC("Notify unblank\n"); + msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK, + ¬ifier_data); + } + } + SDE_ATRACE_END("msm_enable"); +} + int msm_atomic_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { @@ -57,6 +551,295 @@ int msm_atomic_prepare_fb(struct drm_plane *plane, return msm_framebuffer_prepare(new_state->fb, kms->aspace); } +/* The (potentially) asynchronous part of the commit. At this point + * nothing can fail short of armageddon. + */ +static void complete_commit(struct msm_commit *c) +{ + struct drm_atomic_state *state = c->state; + struct drm_device *dev = state->dev; + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + + drm_atomic_helper_wait_for_fences(dev, state, false); + + kms->funcs->prepare_commit(kms, state); + + msm_atomic_helper_commit_modeset_disables(dev, state); + + drm_atomic_helper_commit_planes(dev, state, 0); + + msm_atomic_helper_commit_modeset_enables(dev, state); + + /* NOTE: _wait_for_vblanks() only waits for vblank on + * enabled CRTCs. So we end up faulting when disabling + * due to (potentially) unref'ing the outgoing fb's + * before the vblank when the disable has latched. + * + * But if it did wait on disabled (or newly disabled) + * CRTCs, that would be racy (ie. we could have missed + * the irq. We need some way to poll for pipe shut + * down. Or just live with occasionally hitting the + * timeout in the CRTC disable path (which really should + * not be critical path) + */ + + msm_atomic_wait_for_commit_done(dev, state); + + drm_atomic_helper_cleanup_planes(dev, state); + + kms->funcs->complete_commit(kms, state); + + drm_atomic_state_put(state); + + commit_destroy(c); +} + +static void _msm_drm_commit_work_cb(struct kthread_work *work) +{ + struct msm_commit *commit = NULL; + + if (!work) { + DRM_ERROR("%s: Invalid commit work data!\n", __func__); + return; + } + + commit = container_of(work, struct msm_commit, commit_work); + + SDE_ATRACE_BEGIN("complete_commit"); + complete_commit(commit); + SDE_ATRACE_END("complete_commit"); +} + +static struct msm_commit *commit_init(struct drm_atomic_state *state, + bool nonblock) +{ + struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); + + if (!c) + return NULL; + + c->dev = state->dev; + c->state = state; + c->nonblock = nonblock; + + kthread_init_work(&c->commit_work, _msm_drm_commit_work_cb); + + return c; +} + +/* Start display thread function */ +static void msm_atomic_commit_dispatch(struct drm_device *dev, + struct drm_atomic_state *state, struct msm_commit *commit) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_crtc *crtc = NULL; + struct drm_crtc_state *crtc_state = NULL; + int ret = -EINVAL, i = 0, j = 0; + bool nonblock; + + /* cache since work will kfree commit in non-blocking case */ + nonblock = commit->nonblock; + + for_each_old_crtc_in_state(state, crtc, crtc_state, i) { + for (j = 0; j < priv->num_crtcs; j++) { + if (priv->disp_thread[j].crtc_id == + crtc->base.id) { + if (priv->disp_thread[j].thread) { + kthread_queue_work( + &priv->disp_thread[j].worker, + &commit->commit_work); + /* only return zero if work is + * queued successfully. + */ + ret = 0; + } else { + DRM_ERROR(" Error for crtc_id: %d\n", + priv->disp_thread[j].crtc_id); + } + break; + } + } + /* + * TODO: handle cases where there will be more than + * one crtc per commit cycle. Remove this check then. + * Current assumption is there will be only one crtc + * per commit cycle. + */ + if (j < priv->num_crtcs) + break; + } + + if (ret) { + /** + * this is not expected to happen, but at this point the state + * has been swapped, but we couldn't dispatch to a crtc thread. + * fallback now to a synchronous complete_commit to try and + * ensure that SW and HW state don't get out of sync. + */ + DRM_ERROR("failed to dispatch commit to any CRTC\n"); + complete_commit(commit); + } else if (!nonblock) { + kthread_flush_work(&commit->commit_work); + } + + /* free nonblocking commits in this context, after processing */ + if (!nonblock) + kfree(commit); +} + +/** + * drm_atomic_helper_commit - commit validated state object + * @dev: DRM device + * @state: the driver state object + * @nonblock: nonblocking commit + * + * This function commits a with drm_atomic_helper_check() pre-validated state + * object. This can still fail when e.g. the framebuffer reservation fails. + * + * RETURNS + * Zero for success or -errno. + */ +int msm_atomic_commit(struct drm_device *dev, + struct drm_atomic_state *state, bool nonblock) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_commit *c; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane; + struct drm_plane_state *old_plane_state, *new_plane_state; + int i, ret; + + if (!priv || priv->shutdown_in_progress) { + DRM_ERROR("priv is null or shutdwon is in-progress\n"); + return -EINVAL; + } + + SDE_ATRACE_BEGIN("atomic_commit"); + ret = drm_atomic_helper_prepare_planes(dev, state); + if (ret) { + SDE_ATRACE_END("atomic_commit"); + return ret; + } + + c = commit_init(state, nonblock); + if (!c) { + ret = -ENOMEM; + goto error; + } + + /* + * Figure out what crtcs we have: + */ + for_each_new_crtc_in_state(state, crtc, crtc_state, i) + c->crtc_mask |= drm_crtc_mask(crtc); + + /* + * Figure out what fence to wait for: + */ + for_each_oldnew_plane_in_state(state, plane, old_plane_state, + new_plane_state, i) { + if ((new_plane_state->fb != old_plane_state->fb) + && new_plane_state->fb) { + struct drm_gem_object *obj = + msm_framebuffer_bo(new_plane_state->fb, 0); + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct dma_fence *fence = + reservation_object_get_excl_rcu(msm_obj->resv); + + drm_atomic_set_fence_for_plane(new_plane_state, fence); + } + } + + /* + * Wait for pending updates on any of the same crtc's and then + * mark our set of crtc's as busy: + */ + + /* Start Atomic */ + spin_lock(&priv->pending_crtcs_event.lock); + ret = wait_event_interruptible_locked(priv->pending_crtcs_event, + !(priv->pending_crtcs & c->crtc_mask)); + if (ret == 0) { + DBG("start: %08x", c->crtc_mask); + priv->pending_crtcs |= c->crtc_mask; + } + spin_unlock(&priv->pending_crtcs_event.lock); + + if (ret) + goto err_free; + + WARN_ON(drm_atomic_helper_swap_state(state, false) < 0); + + /* + * Provide the driver a chance to prepare for output fences. This is + * done after the point of no return, but before asynchronous commits + * are dispatched to work queues, so that the fence preparation is + * finished before the .atomic_commit returns. + */ + if (priv && priv->kms && priv->kms->funcs && + priv->kms->funcs->prepare_fence) + priv->kms->funcs->prepare_fence(priv->kms, state); + + /* + * Everything below can be run asynchronously without the need to grab + * any modeset locks at all under one conditions: It must be guaranteed + * that the asynchronous work has either been cancelled (if the driver + * supports it, which at least requires that the framebuffers get + * cleaned up with drm_atomic_helper_cleanup_planes()) or completed + * before the new state gets committed on the software side with + * drm_atomic_helper_swap_state(). + * + * This scheme allows new atomic state updates to be prepared and + * checked in parallel to the asynchronous completion of the previous + * update. Which is important since compositors need to figure out the + * composition of the next frame right after having submitted the + * current layout + */ + + drm_atomic_state_get(state); + msm_atomic_commit_dispatch(dev, state, c); + + SDE_ATRACE_END("atomic_commit"); + + return 0; +err_free: + kfree(c); +error: + drm_atomic_helper_cleanup_planes(dev, state); + SDE_ATRACE_END("atomic_commit"); + return ret; +} + +struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev) +{ + struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL); + + if (!state || drm_atomic_state_init(dev, &state->base) < 0) { + kfree(state); + return NULL; + } + + return &state->base; +} + +void msm_atomic_state_clear(struct drm_atomic_state *s) +{ + struct msm_kms_state *state = to_kms_state(s); + + drm_atomic_state_default_clear(&state->base); + kfree(state->state); + state->state = NULL; +} + +void msm_atomic_state_free(struct drm_atomic_state *state) +{ + kfree(to_kms_state(state)->state); + drm_atomic_state_default_release(state); + kfree(state); +} + void msm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index be0ff3566b98e2ddfd66f9a1883b546afd59b7b0..5b0b9fff235e53d36cb09e3ca2bb9f62140607a0 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark * @@ -14,7 +15,31 @@ * You should have received a copy of the GNU General Public License along with * this program. If not, see . */ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ +#include +#include +#include #include #include "msm_drv.h" @@ -22,7 +47,8 @@ #include "msm_fence.h" #include "msm_gpu.h" #include "msm_kms.h" - +#include "msm_mmu.h" +#include "sde_wb.h" /* * MSM driver version: @@ -37,11 +63,58 @@ #define MSM_VERSION_MINOR 3 #define MSM_VERSION_PATCHLEVEL 0 +static void msm_fb_output_poll_changed(struct drm_device *dev) +{ + struct msm_drm_private *priv = NULL; + + if (!dev) { + DRM_ERROR("output_poll_changed failed, invalid input\n"); + return; + } + + priv = dev->dev_private; + + if (priv->fbdev) + drm_fb_helper_hotplug_event(priv->fbdev); +} + +/** + * msm_atomic_helper_check - validate state object + * @dev: DRM device + * @state: the driver state object + * + * This is a wrapper for the drm_atomic_helper_check to check the modeset + * and state checking for planes. Additionally it checks if any secure + * transition(moving CRTC and planes between secure and non-secure states and + * vice versa) is allowed or not. When going to secure state, planes + * with fb_mode as dir translated only can be staged on the CRTC, and only one + * CRTC should be active. + * Also mixing of secure and non-secure is not allowed. + * + * RETURNS + * Zero for success or -errorno. + */ +int msm_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct msm_drm_private *priv; + + priv = dev->dev_private; + if (priv && priv->kms && priv->kms->funcs && + priv->kms->funcs->atomic_check) + return priv->kms->funcs->atomic_check(priv->kms, state); + + return drm_atomic_helper_check(dev, state); +} + static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = msm_framebuffer_create, - .output_poll_changed = drm_fb_helper_output_poll_changed, - .atomic_check = drm_atomic_helper_check, - .atomic_commit = drm_atomic_helper_commit, + .output_poll_changed = msm_fb_output_poll_changed, + .atomic_check = msm_atomic_check, + .atomic_commit = msm_atomic_commit, + .atomic_state_alloc = msm_atomic_state_alloc, + .atomic_state_clear = msm_atomic_state_clear, + .atomic_state_free = msm_atomic_state_free, }; static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = { @@ -180,23 +253,48 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, } if (reglog) - printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size); + dev_dbg(&pdev->dev, "IO:region %s %pK %08lx\n", + dbgname, ptr, size); return ptr; } +unsigned long msm_iomap_size(struct platform_device *pdev, const char *name) +{ + struct resource *res; + + if (name) + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + else + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (!res) { + dev_err(&pdev->dev, "failed to get memory resource: %s\n", + name); + return 0; + } + + return resource_size(res); +} + +void msm_iounmap(struct platform_device *pdev, void __iomem *addr) +{ + devm_iounmap(&pdev->dev, addr); +} + void msm_writel(u32 data, void __iomem *addr) { if (reglog) - printk(KERN_DEBUG "IO:W %p %08x\n", addr, data); + pr_debug("IO:W %pK %08x\n", addr, data); writel(data, addr); } u32 msm_readl(const void __iomem *addr) { u32 val = readl(addr); + if (reglog) - pr_err("IO:R %p %08x\n", addr, val); + pr_err("IO:R %pK %08x\n", addr, val); return val; } @@ -206,7 +304,7 @@ struct vblank_event { bool enable; }; -static void vblank_ctrl_worker(struct work_struct *work) +static void vblank_ctrl_worker(struct kthread_work *work) { struct msm_vblank_ctrl *vbl_ctrl = container_of(work, struct msm_vblank_ctrl, work); @@ -215,12 +313,16 @@ static void vblank_ctrl_worker(struct work_struct *work) struct msm_kms *kms = priv->kms; struct vblank_event *vbl_ev, *tmp; unsigned long flags; + LIST_HEAD(tmp_head); spin_lock_irqsave(&vbl_ctrl->lock, flags); list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { list_del(&vbl_ev->node); - spin_unlock_irqrestore(&vbl_ctrl->lock, flags); + list_add_tail(&vbl_ev->node, &tmp_head); + } + spin_unlock_irqrestore(&vbl_ctrl->lock, flags); + list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) { if (vbl_ev->enable) kms->funcs->enable_vblank(kms, priv->crtcs[vbl_ev->crtc_id]); @@ -229,11 +331,7 @@ static void vblank_ctrl_worker(struct work_struct *work) priv->crtcs[vbl_ev->crtc_id]); kfree(vbl_ev); - - spin_lock_irqsave(&vbl_ctrl->lock, flags); } - - spin_unlock_irqrestore(&vbl_ctrl->lock, flags); } static int vblank_ctrl_queue_work(struct msm_drm_private *priv, @@ -254,7 +352,8 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv, list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list); spin_unlock_irqrestore(&vbl_ctrl->lock, flags); - queue_work(priv->wq, &vbl_ctrl->work); + kthread_queue_work(&priv->disp_thread[crtc_id].worker, + &vbl_ctrl->work); return 0; } @@ -265,24 +364,46 @@ static int msm_drm_uninit(struct device *dev) struct drm_device *ddev = platform_get_drvdata(pdev); struct msm_drm_private *priv = ddev->dev_private; struct msm_kms *kms = priv->kms; + struct msm_gpu *gpu = priv->gpu; struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; struct vblank_event *vbl_ev, *tmp; + int i; /* We must cancel and cleanup any pending vblank enable/disable * work before drm_irq_uninstall() to avoid work re-enabling an * irq after uninstall has disabled it. */ - cancel_work_sync(&vbl_ctrl->work); + kthread_flush_work(&vbl_ctrl->work); list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { list_del(&vbl_ev->node); kfree(vbl_ev); } + /* clean up display commit/event worker threads */ + for (i = 0; i < priv->num_crtcs; i++) { + if (priv->disp_thread[i].thread) { + kthread_flush_worker(&priv->disp_thread[i].worker); + kthread_stop(priv->disp_thread[i].thread); + priv->disp_thread[i].thread = NULL; + } + + if (priv->event_thread[i].thread) { + kthread_flush_worker(&priv->event_thread[i].worker); + kthread_stop(priv->event_thread[i].thread); + priv->event_thread[i].thread = NULL; + } + } + msm_gem_shrinker_cleanup(ddev); drm_kms_helper_poll_fini(ddev); - drm_dev_unregister(ddev); + drm_mode_config_cleanup(ddev); + + if (priv->registered) { + drm_dev_unregister(ddev); + priv->registered = false; + } msm_perf_debugfs_cleanup(priv); msm_rd_debugfs_cleanup(priv); @@ -300,12 +421,18 @@ static int msm_drm_uninit(struct device *dev) flush_workqueue(priv->wq); destroy_workqueue(priv->wq); - flush_workqueue(priv->atomic_wq); - destroy_workqueue(priv->atomic_wq); - if (kms && kms->funcs) kms->funcs->destroy(kms); + if (gpu) { + mutex_lock(&ddev->struct_mutex); + // XXX what do we do here? + //pm_runtime_enable(&pdev->dev); + gpu->funcs->pm_suspend(gpu); + mutex_unlock(&ddev->struct_mutex); + gpu->funcs->destroy(gpu); + } + if (priv->vram.paddr) { unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING; drm_mm_takedown(&priv->vram.mm); @@ -315,28 +442,49 @@ static int msm_drm_uninit(struct device *dev) component_unbind_all(dev, ddev); + sde_dbg_destroy(); + debugfs_remove_recursive(priv->debug_root); + + sde_power_client_destroy(&priv->phandle, priv->pclient); + sde_power_resource_deinit(pdev, &priv->phandle); + msm_mdss_destroy(ddev); ddev->dev_private = NULL; - drm_dev_unref(ddev); - kfree(priv); + drm_dev_put(ddev); + return 0; } #define KMS_MDP4 4 #define KMS_MDP5 5 +#define KMS_SDE 3 static int get_mdp_ver(struct platform_device *pdev) { +#ifdef CONFIG_OF + static const struct of_device_id match_types[] = { { + .compatible = "qcom,mdss_mdp", + .data = (void *)KMS_MDP5, + }, + { + .compatible = "qcom,sde-kms", + .data = (void *)KMS_SDE, + }, + {}, + }; struct device *dev = &pdev->dev; + const struct of_device_id *match; - return (int) (unsigned long) of_device_get_match_data(dev); + match = of_match_node(match_types, dev->of_node); + if (match) + return (int)(unsigned long)match->data; +#endif + return KMS_MDP4; } -#include - static int msm_init_vram(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; @@ -411,99 +559,264 @@ static int msm_init_vram(struct drm_device *dev) return ret; } +#ifdef CONFIG_OF +static int msm_component_bind_all(struct device *dev, + struct drm_device *drm_dev) +{ + int ret; + + ret = component_bind_all(dev, drm_dev); + if (ret) + DRM_ERROR("component_bind_all failed: %d\n", ret); + + return ret; +} +#else +static int msm_component_bind_all(struct device *dev, + struct drm_device *drm_dev) +{ + return 0; +} +#endif + +static int msm_power_enable_wrapper(void *handle, void *client, bool enable) +{ + return sde_power_resource_enable(handle, client, enable); +} + +static int _msm_drm_init_helper(struct msm_drm_private *priv, + struct sched_param param, struct drm_device *ddev, + struct device *dev, struct platform_device *pdev, + struct msm_kms *kms) +{ + int i, ret = 0; + + switch (get_mdp_ver(pdev)) { + case KMS_MDP4: + kms = mdp4_kms_init(ddev); + break; + case KMS_MDP5: + kms = mdp5_kms_init(ddev); + break; + case KMS_SDE: + kms = sde_kms_init(ddev); + break; + default: + kms = ERR_PTR(-ENODEV); + break; + } + + if (IS_ERR(kms)) { + /* + * NOTE: once we have GPU support, having no kms should not + * be considered fatal.. ideally we would still support gpu + * and (for example) use dmabuf/prime to share buffers with + * imx drm driver on iMX5 + */ + priv->kms = NULL; + dev_err(dev, "failed to load kms\n"); + return PTR_ERR(kms); + ; + } + priv->kms = kms; + pm_runtime_enable(dev); + + /** + * Since kms->funcs->hw_init(kms) might call + * drm_object_property_set_value to initialize some custom + * properties we need to make sure mode_config.funcs are populated + * beforehand to avoid dereferencing an unset value during the + * drm_drv_uses_atomic_modeset check. + */ + ddev->mode_config.funcs = &mode_config_funcs; + + if (kms) { + ret = kms->funcs->hw_init(kms); + if (ret) { + dev_err(dev, "kms hw init failed: %d\n", ret); + return ret; + } + } + + /** + * this priority was found during empiric testing to have appropriate + * realtime scheduling to process display updates and interact with + * other real time and normal priority task + */ + param.sched_priority = 16; + for (i = 0; i < priv->num_crtcs; i++) { + + /* initialize display thread */ + priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id; + kthread_init_worker(&priv->disp_thread[i].worker); + priv->disp_thread[i].dev = ddev; + priv->disp_thread[i].thread = + kthread_run(kthread_worker_fn, + &priv->disp_thread[i].worker, + "crtc_commit:%d", priv->disp_thread[i].crtc_id); + ret = sched_setscheduler(priv->disp_thread[i].thread, + SCHED_FIFO, ¶m); + if (ret) + pr_warn("display thread priority update failed: %d\n", + ret); + + if (IS_ERR(priv->disp_thread[i].thread)) { + dev_err(dev, "failed to create crtc_commit kthread\n"); + priv->disp_thread[i].thread = NULL; + } + + /* initialize event thread */ + priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id; + kthread_init_worker(&priv->event_thread[i].worker); + priv->event_thread[i].dev = ddev; + priv->event_thread[i].thread = + kthread_run(kthread_worker_fn, + &priv->event_thread[i].worker, + "crtc_event:%d", priv->event_thread[i].crtc_id); + /** + * event thread should also run at same priority as disp_thread + * because it is handling frame_done events. A lower priority + * event thread and higher priority disp_thread can causes + * frame_pending counters beyond 2. This can lead to commit + * failure at crtc commit level. + */ + ret = sched_setscheduler(priv->event_thread[i].thread, + SCHED_FIFO, ¶m); + if (ret) + pr_warn("display event thread priority update failed: %d\n", + ret); + + if (IS_ERR(priv->event_thread[i].thread)) { + dev_err(dev, "failed to create crtc_event kthread\n"); + priv->event_thread[i].thread = NULL; + } + + if ((!priv->disp_thread[i].thread) || + !priv->event_thread[i].thread) { + /* clean up previously created threads if any */ + for ( ; i >= 0; i--) { + if (priv->disp_thread[i].thread) { + kthread_stop( + priv->disp_thread[i].thread); + priv->disp_thread[i].thread = NULL; + } + + if (priv->event_thread[i].thread) { + kthread_stop( + priv->event_thread[i].thread); + priv->event_thread[i].thread = NULL; + } + } + return -EINVAL; + } + } + + /** + * Since pp interrupt is heavy weight, try to queue the work + * into a dedicated worker thread, so that they dont interrupt + * other important events. + */ + kthread_init_worker(&priv->pp_event_worker); + priv->pp_event_thread = kthread_run(kthread_worker_fn, + &priv->pp_event_worker, "pp_event"); + + ret = sched_setscheduler(priv->pp_event_thread, + SCHED_FIFO, ¶m); + if (ret) + pr_warn("pp_event thread priority update failed: %d\n", + ret); + + if (IS_ERR(priv->pp_event_thread)) { + dev_err(dev, "failed to create pp_event kthread\n"); + ret = PTR_ERR(priv->pp_event_thread); + priv->pp_event_thread = NULL; + return ret; + } + + return 0; +} + static int msm_drm_init(struct device *dev, struct drm_driver *drv) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *ddev; struct msm_drm_private *priv; struct msm_kms *kms; + struct sde_dbg_power_ctrl dbg_power_ctrl = { 0 }; int ret; + struct sched_param param; ddev = drm_dev_alloc(drv, dev); - if (IS_ERR(ddev)) { + if (!ddev) { dev_err(dev, "failed to allocate drm_device\n"); - return PTR_ERR(ddev); + return -ENOMEM; } + drm_mode_config_init(ddev); platform_set_drvdata(pdev, ddev); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { - drm_dev_unref(ddev); - return -ENOMEM; + ret = -ENOMEM; + goto priv_alloc_fail; } ddev->dev_private = priv; priv->dev = ddev; ret = msm_mdss_init(ddev); - if (ret) { - kfree(priv); - drm_dev_unref(ddev); - return ret; - } + if (ret) + goto mdss_init_fail; - priv->wq = alloc_ordered_workqueue("msm", 0); - priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0); + priv->wq = alloc_ordered_workqueue("msm_drm", 0); + init_waitqueue_head(&priv->pending_crtcs_event); + INIT_LIST_HEAD(&priv->client_event_list); INIT_LIST_HEAD(&priv->inactive_list); INIT_LIST_HEAD(&priv->vblank_ctrl.event_list); - INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker); + kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker); spin_lock_init(&priv->vblank_ctrl.lock); - drm_mode_config_init(ddev); + ret = sde_power_resource_init(pdev, &priv->phandle); + if (ret) { + pr_err("sde power resource init failed\n"); + goto power_init_fail; + } - /* Bind all our sub-components: */ - ret = component_bind_all(dev, ddev); + priv->pclient = sde_power_client_create(&priv->phandle, "sde"); + if (IS_ERR_OR_NULL(priv->pclient)) { + pr_err("sde power client create failed\n"); + ret = -EINVAL; + goto power_client_fail; + } + + dbg_power_ctrl.handle = &priv->phandle; + dbg_power_ctrl.client = priv->pclient; + dbg_power_ctrl.enable_fn = msm_power_enable_wrapper; + ret = sde_dbg_init(&pdev->dev, &dbg_power_ctrl); if (ret) { - msm_mdss_destroy(ddev); - kfree(priv); - drm_dev_unref(ddev); - return ret; + dev_err(dev, "failed to init sde dbg: %d\n", ret); + goto dbg_init_fail; } + /* Bind all our sub-components: */ + ret = msm_component_bind_all(dev, ddev); + if (ret) + goto bind_fail; + ret = msm_init_vram(ddev); if (ret) goto fail; - msm_gem_shrinker_init(ddev); - - switch (get_mdp_ver(pdev)) { - case KMS_MDP4: - kms = mdp4_kms_init(ddev); - priv->kms = kms; - break; - case KMS_MDP5: - kms = mdp5_kms_init(ddev); - break; - default: - kms = ERR_PTR(-ENODEV); - break; - } + ddev->mode_config.funcs = &mode_config_funcs; + ddev->mode_config.helper_private = &mode_config_helper_funcs; - if (IS_ERR(kms)) { - /* - * NOTE: once we have GPU support, having no kms should not - * be considered fatal.. ideally we would still support gpu - * and (for example) use dmabuf/prime to share buffers with - * imx drm driver on iMX5 - */ - dev_err(dev, "failed to load kms\n"); - ret = PTR_ERR(kms); + ret = _msm_drm_init_helper(priv, param, ddev, dev, pdev, kms); + if (ret) { + dev_err(dev, "msm_drm_init_helper failed\n"); goto fail; } - if (kms) { - ret = kms->funcs->hw_init(kms); - if (ret) { - dev_err(dev, "kms hw init failed: %d\n", ret); - goto fail; - } - } - - ddev->mode_config.funcs = &mode_config_funcs; - ddev->mode_config.helper_private = &mode_config_helper_funcs; - ret = drm_vblank_init(ddev, priv->num_crtcs); if (ret < 0) { dev_err(dev, "failed to initialize vblank\n"); @@ -512,7 +825,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) if (kms) { pm_runtime_get_sync(dev); - ret = drm_irq_install(ddev, kms->irq); + ret = drm_irq_install(ddev, platform_get_irq(pdev, 0)); pm_runtime_put_sync(dev); if (ret < 0) { dev_err(dev, "failed to install IRQ handler\n"); @@ -523,9 +836,18 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) ret = drm_dev_register(ddev, 0); if (ret) goto fail; + priv->registered = true; drm_mode_config_reset(ddev); + if (kms && kms->funcs && kms->funcs->cont_splash_config) { + ret = kms->funcs->cont_splash_config(kms); + if (ret) { + dev_err(dev, "kms cont_splash config failed.\n"); + goto fail; + } + } + #ifdef CONFIG_DRM_FBDEV_EMULATION if (fbdev) priv->fbdev = msm_fbdev_init(ddev); @@ -535,6 +857,30 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) if (ret) goto fail; + priv->debug_root = debugfs_create_dir("debug", + ddev->primary->debugfs_root); + if (IS_ERR_OR_NULL(priv->debug_root)) { + pr_err("debugfs_root create_dir fail, error %ld\n", + PTR_ERR(priv->debug_root)); + priv->debug_root = NULL; + goto fail; + } + + ret = sde_dbg_debugfs_register(priv->debug_root); + if (ret) { + dev_err(dev, "failed to reg sde dbg debugfs: %d\n", ret); + goto fail; + } + + /* perform subdriver post initialization */ + if (kms && kms->funcs && kms->funcs->postinit) { + ret = kms->funcs->postinit(kms); + if (ret) { + pr_err("kms post init failed: %d\n", ret); + goto fail; + } + } + drm_kms_helper_poll_init(ddev); return 0; @@ -542,12 +888,30 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) fail: msm_drm_uninit(dev); return ret; +bind_fail: + sde_dbg_destroy(); +dbg_init_fail: + sde_power_client_destroy(&priv->phandle, priv->pclient); +power_client_fail: + sde_power_resource_deinit(pdev, &priv->phandle); +power_init_fail: + msm_mdss_destroy(ddev); +mdss_init_fail: + kfree(priv); +priv_alloc_fail: + drm_dev_put(ddev); + return ret; } /* * DRM operations: */ +#ifdef CONFIG_QCOM_KGSL +static void load_gpu(struct drm_device *dev) +{ +} +#else static void load_gpu(struct drm_device *dev) { static DEFINE_MUTEX(init_lock); @@ -560,6 +924,7 @@ static void load_gpu(struct drm_device *dev) mutex_unlock(&init_lock); } +#endif static int context_init(struct drm_device *dev, struct drm_file *file) { @@ -573,6 +938,15 @@ static int context_init(struct drm_device *dev, struct drm_file *file) file->driver_priv = ctx; + if (dev && dev->dev_private) { + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms; + + kms = priv->kms; + if (kms && kms->funcs && kms->funcs->postopen) + kms->funcs->postopen(kms, file); + } + return 0; } @@ -592,10 +966,23 @@ static void context_close(struct msm_file_private *ctx) kfree(ctx); } +static void msm_preclose(struct drm_device *dev, struct drm_file *file) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + + if (kms && kms->funcs && kms->funcs->preclose) + kms->funcs->preclose(kms, file); +} + static void msm_postclose(struct drm_device *dev, struct drm_file *file) { struct msm_drm_private *priv = dev->dev_private; struct msm_file_private *ctx = file->driver_priv; + struct msm_kms *kms = priv->kms; + + if (kms && kms->funcs && kms->funcs->postclose) + kms->funcs->postclose(kms, file); mutex_lock(&dev->struct_mutex); if (ctx == priv->lastctx) @@ -605,6 +992,154 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file) context_close(ctx); } +static int msm_disable_all_modes_commit( + struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_plane *plane; + struct drm_crtc *crtc; + unsigned int plane_mask; + int ret; + + plane_mask = 0; + drm_for_each_plane(plane, dev) { + struct drm_plane_state *plane_state; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto fail; + } + + plane_state->rotation = 0; + + plane->old_fb = plane->fb; + plane_mask |= 1 << drm_plane_index(plane); + + /* disable non-primary: */ + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + continue; + + DRM_DEBUG("disabling plane %d\n", plane->base.id); + + ret = __drm_atomic_helper_disable_plane(plane, plane_state); + if (ret != 0) + DRM_ERROR("error %d disabling plane %d\n", ret, + plane->base.id); + } + + drm_for_each_crtc(crtc, dev) { + struct drm_mode_set mode_set; + + memset(&mode_set, 0, sizeof(struct drm_mode_set)); + mode_set.crtc = crtc; + + DRM_DEBUG("disabling crtc %d\n", crtc->base.id); + + ret = __drm_atomic_helper_set_config(&mode_set, state); + if (ret != 0) + DRM_ERROR("error %d disabling crtc %d\n", ret, + crtc->base.id); + } + + DRM_DEBUG("committing disables\n"); + ret = drm_atomic_commit(state); + +fail: + DRM_DEBUG("disables result %d\n", ret); + return ret; +} + +/** + * msm_clear_all_modes - disables all planes and crtcs via an atomic commit + * based on restore_fbdev_mode_atomic in drm_fb_helper.c + * @dev: device pointer + * @Return: 0 on success, otherwise -error + */ +static int msm_disable_all_modes( + struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + int ret, i; + + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = ctx; + + for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) { + ret = msm_disable_all_modes_commit(dev, state); + if (ret != -EDEADLK || ret != -ERESTARTSYS) + break; + drm_atomic_state_clear(state); + drm_modeset_backoff(ctx); + } + + drm_atomic_state_put(state); + + return ret; +} + +static void msm_lastclose(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + struct drm_modeset_acquire_ctx ctx; + int i, rc; + + /* check for splash status before triggering cleanup + * if we end up here with splash status ON i.e before first + * commit then ignore the last close call + */ + if (kms && kms->funcs && kms->funcs->check_for_splash + && kms->funcs->check_for_splash(kms)) + return; + + /* + * clean up vblank disable immediately as this is the last close. + */ + for (i = 0; i < dev->num_crtcs; i++) { + struct drm_vblank_crtc *vblank = &dev->vblank[i]; + struct timer_list *disable_timer = &vblank->disable_timer; + + if (del_timer_sync(disable_timer)) + disable_timer->function(disable_timer); + } + + /* wait for pending vblank requests to be executed by worker thread */ + flush_workqueue(priv->wq); + + if (priv->fbdev) { + drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev); + return; + } + + drm_modeset_acquire_init(&ctx, 0); +retry: + rc = drm_modeset_lock_all_ctx(dev, &ctx); + if (rc) + goto fail; + + rc = msm_disable_all_modes(dev, &ctx); + if (rc) + goto fail; + + if (kms && kms->funcs && kms->funcs->lastclose) + kms->funcs->lastclose(kms, &ctx); + +fail: + if (rc == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry; + } else if (rc) { + pr_err("last close failed: %d\n", rc); + } + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); +} + static irqreturn_t msm_irq(int irq, void *arg) { struct drm_device *dev = arg; @@ -851,6 +1386,328 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, return ret; } +static int msm_drm_object_supports_event(struct drm_device *dev, + struct drm_msm_event_req *req) +{ + int ret = -EINVAL; + struct drm_mode_object *arg_obj; + + arg_obj = drm_mode_object_find(dev, NULL, req->object_id, + req->object_type); + if (!arg_obj) + return -ENOENT; + + switch (arg_obj->type) { + case DRM_MODE_OBJECT_CRTC: + case DRM_MODE_OBJECT_CONNECTOR: + ret = 0; + break; + default: + ret = -EOPNOTSUPP; + break; + } + + drm_mode_object_put(arg_obj); + + return ret; +} + +static int msm_register_event(struct drm_device *dev, + struct drm_msm_event_req *req, struct drm_file *file, bool en) +{ + int ret = -EINVAL; + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + struct drm_mode_object *arg_obj; + + arg_obj = drm_mode_object_find(dev, file, req->object_id, + req->object_type); + if (!arg_obj) + return -ENOENT; + + ret = kms->funcs->register_events(kms, arg_obj, req->event, en); + + drm_mode_object_put(arg_obj); + + return ret; +} + +static int msm_event_client_count(struct drm_device *dev, + struct drm_msm_event_req *req_event, bool locked) +{ + struct msm_drm_private *priv = dev->dev_private; + unsigned long flag = 0; + struct msm_drm_event *node; + int count = 0; + + if (!locked) + spin_lock_irqsave(&dev->event_lock, flag); + list_for_each_entry(node, &priv->client_event_list, base.link) { + if (node->event.type == req_event->event && + node->info.object_id == req_event->object_id) + count++; + } + if (!locked) + spin_unlock_irqrestore(&dev->event_lock, flag); + + return count; +} + +static int msm_ioctl_register_event(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_msm_event_req *req_event = data; + struct msm_drm_event *client, *node; + unsigned long flag = 0; + bool dup_request = false; + int ret = 0, count = 0; + + ret = msm_drm_object_supports_event(dev, req_event); + if (ret) { + DRM_ERROR("unsupported event %x object %x object id %d\n", + req_event->event, req_event->object_type, + req_event->object_id); + return ret; + } + + spin_lock_irqsave(&dev->event_lock, flag); + list_for_each_entry(node, &priv->client_event_list, base.link) { + if (node->base.file_priv != file) + continue; + if (node->event.type == req_event->event && + node->info.object_id == req_event->object_id) { + DRM_DEBUG("duplicate request for event %x obj id %d\n", + node->event.type, node->info.object_id); + dup_request = true; + break; + } + } + spin_unlock_irqrestore(&dev->event_lock, flag); + + if (dup_request) + return -EALREADY; + + client = kzalloc(sizeof(*client), GFP_KERNEL); + if (!client) + return -ENOMEM; + + client->base.file_priv = file; + client->base.event = &client->event; + client->event.type = req_event->event; + memcpy(&client->info, req_event, sizeof(client->info)); + + /* Get the count of clients that have registered for event. + * Event should be enabled for first client, for subsequent enable + * calls add to client list and return. + */ + count = msm_event_client_count(dev, req_event, false); + /* Add current client to list */ + spin_lock_irqsave(&dev->event_lock, flag); + list_add_tail(&client->base.link, &priv->client_event_list); + spin_unlock_irqrestore(&dev->event_lock, flag); + + if (count) + return 0; + + ret = msm_register_event(dev, req_event, file, true); + if (ret) { + DRM_ERROR("failed to enable event %x object %x object id %d\n", + req_event->event, req_event->object_type, + req_event->object_id); + spin_lock_irqsave(&dev->event_lock, flag); + list_del(&client->base.link); + spin_unlock_irqrestore(&dev->event_lock, flag); + kfree(client); + } + return ret; +} + +static int msm_ioctl_deregister_event(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_msm_event_req *req_event = data; + struct msm_drm_event *client = NULL, *node, *temp; + unsigned long flag = 0; + int count = 0; + bool found = false; + int ret = 0; + + ret = msm_drm_object_supports_event(dev, req_event); + if (ret) { + DRM_ERROR("unsupported event %x object %x object id %d\n", + req_event->event, req_event->object_type, + req_event->object_id); + return ret; + } + + spin_lock_irqsave(&dev->event_lock, flag); + list_for_each_entry_safe(node, temp, &priv->client_event_list, + base.link) { + if (node->event.type == req_event->event && + node->info.object_id == req_event->object_id && + node->base.file_priv == file) { + client = node; + list_del(&client->base.link); + found = true; + kfree(client); + break; + } + } + spin_unlock_irqrestore(&dev->event_lock, flag); + + if (!found) + return -ENOENT; + + count = msm_event_client_count(dev, req_event, false); + if (!count) + ret = msm_register_event(dev, req_event, file, false); + + return ret; +} + +void msm_mode_object_event_notify(struct drm_mode_object *obj, + struct drm_device *dev, struct drm_event *event, u8 *payload) +{ + struct msm_drm_private *priv = NULL; + unsigned long flags; + struct msm_drm_event *notify, *node; + int len = 0, ret; + + if (!obj || !event || !event->length || !payload) { + DRM_ERROR("err param obj %pK event %pK len %d payload %pK\n", + obj, event, ((event) ? (event->length) : -1), + payload); + return; + } + priv = (dev) ? dev->dev_private : NULL; + if (!dev || !priv) { + DRM_ERROR("invalid dev %pK priv %pK\n", dev, priv); + return; + } + + spin_lock_irqsave(&dev->event_lock, flags); + list_for_each_entry(node, &priv->client_event_list, base.link) { + if (node->event.type != event->type || + obj->id != node->info.object_id) + continue; + len = event->length + sizeof(struct msm_drm_event); + if (node->base.file_priv->event_space < len) { + DRM_ERROR("Insufficient space %d for event %x len %d\n", + node->base.file_priv->event_space, event->type, + len); + continue; + } + notify = kzalloc(len, GFP_ATOMIC); + if (!notify) + continue; + notify->base.file_priv = node->base.file_priv; + notify->base.event = ¬ify->event; + notify->event.type = node->event.type; + notify->event.length = event->length + + sizeof(struct drm_msm_event_resp); + memcpy(¬ify->info, &node->info, sizeof(notify->info)); + memcpy(notify->data, payload, event->length); + ret = drm_event_reserve_init_locked(dev, node->base.file_priv, + ¬ify->base, ¬ify->event); + if (ret) { + kfree(notify); + continue; + } + drm_send_event_locked(dev, ¬ify->base); + } + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +static int msm_release(struct inode *inode, struct file *filp) +{ + struct drm_file *file_priv = filp->private_data; + struct drm_minor *minor = file_priv->minor; + struct drm_device *dev = minor->dev; + struct msm_drm_private *priv = dev->dev_private; + struct msm_drm_event *node, *temp, *tmp_node; + u32 count; + unsigned long flags; + LIST_HEAD(tmp_head); + + spin_lock_irqsave(&dev->event_lock, flags); + list_for_each_entry_safe(node, temp, &priv->client_event_list, + base.link) { + if (node->base.file_priv != file_priv) + continue; + list_del(&node->base.link); + list_add_tail(&node->base.link, &tmp_head); + } + spin_unlock_irqrestore(&dev->event_lock, flags); + + list_for_each_entry_safe(node, temp, &tmp_head, + base.link) { + list_del(&node->base.link); + count = msm_event_client_count(dev, &node->info, false); + + list_for_each_entry(tmp_node, &tmp_head, base.link) { + if (tmp_node->event.type == node->info.event && + tmp_node->info.object_id == + node->info.object_id) + count++; + } + if (!count) + msm_register_event(dev, &node->info, file_priv, false); + kfree(node); + } + + return drm_release(inode, filp); +} + +/** + * msm_ioctl_rmfb2 - remove an FB from the configuration + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Remove the FB specified by the user. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int msm_ioctl_rmfb2(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_framebuffer *fb = NULL; + struct drm_framebuffer *fbl = NULL; + uint32_t *id = data; + int found = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + fb = drm_framebuffer_lookup(dev, file_priv, *id); + if (!fb) + return -ENOENT; + + /* drop extra ref from traversing drm_framebuffer_lookup */ + drm_framebuffer_put(fb); + + mutex_lock(&file_priv->fbs_lock); + list_for_each_entry(fbl, &file_priv->fbs, filp_head) + if (fb == fbl) + found = 1; + if (!found) { + mutex_unlock(&file_priv->fbs_lock); + return -ENOENT; + } + + list_del_init(&fb->filp_head); + mutex_unlock(&file_priv->fbs_lock); + + drm_framebuffer_put(fb); + + return 0; +} +EXPORT_SYMBOL(msm_ioctl_rmfb2); static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data, struct drm_file *file) @@ -884,6 +1741,12 @@ static const struct drm_ioctl_desc msm_ioctls[] = { DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(SDE_WB_CONFIG, sde_wb_config, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(MSM_REGISTER_EVENT, msm_ioctl_register_event, + DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(MSM_DEREGISTER_EVENT, msm_ioctl_deregister_event, + DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(MSM_RMFB2, msm_ioctl_rmfb2, DRM_UNLOCKED), }; static const struct vm_operations_struct vm_ops = { @@ -895,7 +1758,7 @@ static const struct vm_operations_struct vm_ops = { static const struct file_operations fops = { .owner = THIS_MODULE, .open = drm_open, - .release = drm_release, + .release = msm_release, .unlocked_ioctl = drm_ioctl, .compat_ioctl = drm_compat_ioctl, .poll = drm_poll, @@ -912,6 +1775,7 @@ static struct drm_driver msm_driver = { DRIVER_ATOMIC | DRIVER_MODESET, .open = msm_open, + .preclose = msm_preclose, .postclose = msm_postclose, .lastclose = drm_fb_helper_lastclose, .irq_handler = msm_irq, @@ -927,7 +1791,7 @@ static struct drm_driver msm_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, - .gem_prime_import = drm_gem_prime_import, + .gem_prime_import = msm_gem_prime_import, .gem_prime_res_obj = msm_gem_prime_res_obj, .gem_prime_pin = msm_gem_prime_pin, .gem_prime_unpin = msm_gem_prime_unpin, @@ -942,7 +1806,7 @@ static struct drm_driver msm_driver = { .ioctls = msm_ioctls, .num_ioctls = ARRAY_SIZE(msm_ioctls), .fops = &fops, - .name = "msm", + .name = "msm_drm", .desc = "MSM Snapdragon DRM", .date = "20130625", .major = MSM_VERSION_MAJOR, @@ -953,26 +1817,49 @@ static struct drm_driver msm_driver = { #ifdef CONFIG_PM_SLEEP static int msm_pm_suspend(struct device *dev) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct msm_drm_private *priv = ddev->dev_private; + struct drm_device *ddev; + struct msm_drm_private *priv; + struct msm_kms *kms; - drm_kms_helper_poll_disable(ddev); + if (!dev) + return -EINVAL; - priv->pm_state = drm_atomic_helper_suspend(ddev); - if (IS_ERR(priv->pm_state)) { - drm_kms_helper_poll_enable(ddev); - return PTR_ERR(priv->pm_state); - } + ddev = dev_get_drvdata(dev); + if (!ddev || !ddev->dev_private) + return -EINVAL; + + priv = ddev->dev_private; + kms = priv->kms; + + if (kms && kms->funcs && kms->funcs->pm_suspend) + return kms->funcs->pm_suspend(dev); + + /* disable hot-plug polling */ + drm_kms_helper_poll_disable(ddev); return 0; } static int msm_pm_resume(struct device *dev) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct msm_drm_private *priv = ddev->dev_private; + struct drm_device *ddev; + struct msm_drm_private *priv; + struct msm_kms *kms; + + if (!dev) + return -EINVAL; + + ddev = dev_get_drvdata(dev); + if (!ddev || !ddev->dev_private) + return -EINVAL; + + priv = ddev->dev_private; + kms = priv->kms; + + if (kms && kms->funcs && kms->funcs->pm_resume) + return kms->funcs->pm_resume(dev); - drm_atomic_helper_resume(ddev, priv->pm_state); + /* enable hot-plug polling */ drm_kms_helper_poll_enable(ddev); return 0; @@ -1090,15 +1977,31 @@ static int add_components_mdp(struct device *mdp_dev, static int compare_name_mdp(struct device *dev, void *data) { - return (strstr(dev_name(dev), "mdp") != NULL); + return (strnstr(dev_name(dev), "mdp", strlen("mdp")) != NULL); } static int add_display_components(struct device *dev, struct component_match **matchptr) { - struct device *mdp_dev; + struct device *mdp_dev = NULL; + struct device_node *node; int ret; + if (of_device_is_compatible(dev->of_node, "qcom,sde-kms")) { + struct device_node *np = dev->of_node; + unsigned int i; + + for (i = 0; ; i++) { + node = of_parse_phandle(np, "connectors", i); + if (!node) + break; + + component_match_add(dev, matchptr, compare_of, node); + } + + return 0; + } + /* * MDP5 based devices don't have a flat hierarchy. There is a top level * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the @@ -1122,8 +2025,8 @@ static int add_display_components(struct device *dev, put_device(mdp_dev); /* add the MDP component itself */ - drm_of_component_match_add(dev, matchptr, compare_of, - mdp_dev->of_node); + component_match_add(dev, matchptr, compare_of, + mdp_dev->of_node); } else { /* MDP4 */ mdp_dev = dev; @@ -1136,6 +2039,30 @@ static int add_display_components(struct device *dev, return ret; } +struct msm_gem_address_space * +msm_gem_smmu_address_space_get(struct drm_device *dev, + unsigned int domain) +{ + struct msm_drm_private *priv = NULL; + struct msm_kms *kms; + const struct msm_kms_funcs *funcs; + + if ((!dev) || (!dev->dev_private)) + return NULL; + + priv = dev->dev_private; + kms = priv->kms; + if (!kms) + return NULL; + + funcs = kms->funcs; + + if ((!funcs) || (!funcs->get_address_space)) + return NULL; + + return funcs->get_address_space(priv->kms, domain); +} + /* * We don't know what's the best binding to link the gpu with the drm device. * Fow now, we just hunt for all the possible gpus that we support, and add them @@ -1148,6 +2075,13 @@ static const struct of_device_id msm_gpu_match[] = { { }, }; +#ifdef CONFIG_QCOM_KGSL +static int add_gpu_components(struct device *dev, + struct component_match **matchptr) +{ + return 0; +} +#else static int add_gpu_components(struct device *dev, struct component_match **matchptr) { @@ -1163,6 +2097,7 @@ static int add_gpu_components(struct device *dev, return 0; } +#endif static int msm_drm_bind(struct device *dev) { @@ -1185,8 +2120,8 @@ static const struct component_master_ops msm_drm_ops = { static int msm_pdev_probe(struct platform_device *pdev) { - struct component_match *match = NULL; int ret; + struct component_match *match = NULL; ret = add_display_components(&pdev->dev, &match); if (ret) @@ -1196,13 +2131,7 @@ static int msm_pdev_probe(struct platform_device *pdev) if (ret) return ret; - /* on all devices that I am aware of, iommu's which can map - * any address the cpu can see are used: - */ - ret = dma_set_mask_and_coherent(&pdev->dev, ~0); - if (ret) - return ret; - + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); } @@ -1211,33 +2140,70 @@ static int msm_pdev_remove(struct platform_device *pdev) component_master_del(&pdev->dev, &msm_drm_ops); of_platform_depopulate(&pdev->dev); + msm_drm_unbind(&pdev->dev); + component_master_del(&pdev->dev, &msm_drm_ops); return 0; } +static void msm_pdev_shutdown(struct platform_device *pdev) +{ + struct drm_device *ddev = platform_get_drvdata(pdev); + struct msm_drm_private *priv = NULL; + + if (!ddev) { + DRM_ERROR("invalid drm device node\n"); + return; + } + + priv = ddev->dev_private; + if (!priv) { + DRM_ERROR("invalid msm drm private node\n"); + return; + } + + msm_lastclose(ddev); + + /* set this after lastclose to allow kickoff from lastclose */ + priv->shutdown_in_progress = true; +} + static const struct of_device_id dt_match[] = { { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 }, { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 }, - {} + { .compatible = "qcom,sde-kms", .data = (void *)KMS_SDE }, + {}, }; MODULE_DEVICE_TABLE(of, dt_match); static struct platform_driver msm_platform_driver = { .probe = msm_pdev_probe, .remove = msm_pdev_remove, + .shutdown = msm_pdev_shutdown, .driver = { - .name = "msm", + .name = "msm_drm", .of_match_table = dt_match, .pm = &msm_pm_ops, + .suppress_bind_attrs = true, }, }; +#ifdef CONFIG_QCOM_KGSL +void __init adreno_register(void) +{ +} + +void __exit adreno_unregister(void) +{ +} +#endif + static int __init msm_drm_register(void) { if (!modeset) return -EINVAL; DBG("init"); - msm_mdp_register(); + msm_smmu_driver_init(); msm_dsi_register(); msm_edp_register(); msm_hdmi_register(); @@ -1253,7 +2219,7 @@ static void __exit msm_drm_unregister(void) adreno_unregister(); msm_edp_unregister(); msm_dsi_unregister(); - msm_mdp_unregister(); + msm_smmu_driver_cleanup(); } module_init(msm_drm_register); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index a635cbf10e395bfd90cc53c2cf9d964736149f11..ad27e8580e44d02296a6a61531801e6352a00d33 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -33,7 +33,9 @@ #include #include #include +#include #include +#include #include #include @@ -44,6 +46,12 @@ #include #include +#include "sde_power_handle.h" + +#define GET_MAJOR_REV(rev) ((rev) >> 28) +#define GET_MINOR_REV(rev) (((rev) >> 16) & 0xFFF) +#define GET_STEP_REV(rev) ((rev) & 0xFFFF) + struct msm_kms; struct msm_gpu; struct msm_mmu; @@ -52,35 +60,497 @@ struct msm_rd_state; struct msm_perf_state; struct msm_gem_submit; struct msm_fence_context; +struct msm_fence_cb; struct msm_gem_address_space; struct msm_gem_vma; +#define NUM_DOMAINS 4 /* one for KMS, then one per gpu core (?) */ #define MAX_CRTCS 8 -#define MAX_PLANES 16 +#define MAX_PLANES 20 #define MAX_ENCODERS 8 #define MAX_BRIDGES 8 #define MAX_CONNECTORS 8 +#define TEARDOWN_DEADLOCK_RETRY_MAX 5 + struct msm_file_private { rwlock_t queuelock; struct list_head submitqueues; + int queueid; }; enum msm_mdp_plane_property { - PLANE_PROP_ZPOS, + /* blob properties, always put these first */ + PLANE_PROP_CSC_V1, + PLANE_PROP_CSC_DMA_V1, + PLANE_PROP_INFO, + PLANE_PROP_SCALER_LUT_ED, + PLANE_PROP_SCALER_LUT_CIR, + PLANE_PROP_SCALER_LUT_SEP, + PLANE_PROP_SKIN_COLOR, + PLANE_PROP_SKY_COLOR, + PLANE_PROP_FOLIAGE_COLOR, + PLANE_PROP_VIG_GAMUT, + PLANE_PROP_VIG_IGC, + PLANE_PROP_DMA_IGC, + PLANE_PROP_DMA_GC, + PLANE_PROP_ROT_CAPS_V1, + + /* # of blob properties */ + PLANE_PROP_BLOBCOUNT, + + /* range properties */ + PLANE_PROP_ZPOS = PLANE_PROP_BLOBCOUNT, PLANE_PROP_ALPHA, - PLANE_PROP_PREMULTIPLIED, - PLANE_PROP_MAX_NUM + PLANE_PROP_COLOR_FILL, + PLANE_PROP_H_DECIMATE, + PLANE_PROP_V_DECIMATE, + PLANE_PROP_INPUT_FENCE, + PLANE_PROP_HUE_ADJUST, + PLANE_PROP_SATURATION_ADJUST, + PLANE_PROP_VALUE_ADJUST, + PLANE_PROP_CONTRAST_ADJUST, + PLANE_PROP_EXCL_RECT_V1, + PLANE_PROP_ROT_DST_X, + PLANE_PROP_ROT_DST_Y, + PLANE_PROP_ROT_DST_W, + PLANE_PROP_ROT_DST_H, + PLANE_PROP_PREFILL_SIZE, + PLANE_PROP_PREFILL_TIME, + PLANE_PROP_SCALER_V1, + PLANE_PROP_SCALER_V2, + PLANE_PROP_ROT_OUT_FB, + PLANE_PROP_INVERSE_PMA, + + /* enum/bitmask properties */ + PLANE_PROP_BLEND_OP, + PLANE_PROP_SRC_CONFIG, + PLANE_PROP_FB_TRANSLATION_MODE, + PLANE_PROP_MULTIRECT_MODE, + + /* total # of properties */ + PLANE_PROP_COUNT +}; + +enum msm_mdp_crtc_property { + CRTC_PROP_INFO, + CRTC_PROP_DEST_SCALER_LUT_ED, + CRTC_PROP_DEST_SCALER_LUT_CIR, + CRTC_PROP_DEST_SCALER_LUT_SEP, + + /* # of blob properties */ + CRTC_PROP_BLOBCOUNT, + + /* range properties */ + CRTC_PROP_INPUT_FENCE_TIMEOUT = CRTC_PROP_BLOBCOUNT, + CRTC_PROP_OUTPUT_FENCE, + CRTC_PROP_OUTPUT_FENCE_OFFSET, + CRTC_PROP_DIM_LAYER_V1, + CRTC_PROP_CORE_CLK, + CRTC_PROP_CORE_AB, + CRTC_PROP_CORE_IB, + CRTC_PROP_LLCC_AB, + CRTC_PROP_LLCC_IB, + CRTC_PROP_DRAM_AB, + CRTC_PROP_DRAM_IB, + CRTC_PROP_ROT_PREFILL_BW, + CRTC_PROP_ROT_CLK, + CRTC_PROP_ROI_V1, + CRTC_PROP_SECURITY_LEVEL, + CRTC_PROP_IDLE_TIMEOUT, + CRTC_PROP_DEST_SCALER, + CRTC_PROP_CAPTURE_OUTPUT, + + CRTC_PROP_IDLE_PC_STATE, + + /* total # of properties */ + CRTC_PROP_COUNT +}; + +enum msm_mdp_conn_property { + /* blob properties, always put these first */ + CONNECTOR_PROP_SDE_INFO, + CONNECTOR_PROP_MODE_INFO, + CONNECTOR_PROP_HDR_INFO, + CONNECTOR_PROP_EXT_HDR_INFO, + CONNECTOR_PROP_PP_DITHER, + CONNECTOR_PROP_HDR_METADATA, + + /* # of blob properties */ + CONNECTOR_PROP_BLOBCOUNT, + + /* range properties */ + CONNECTOR_PROP_OUT_FB = CONNECTOR_PROP_BLOBCOUNT, + CONNECTOR_PROP_RETIRE_FENCE, + CONNECTOR_PROP_DST_X, + CONNECTOR_PROP_DST_Y, + CONNECTOR_PROP_DST_W, + CONNECTOR_PROP_DST_H, + CONNECTOR_PROP_ROI_V1, + CONNECTOR_PROP_BL_SCALE, + CONNECTOR_PROP_AD_BL_SCALE, + + /* enum/bitmask properties */ + CONNECTOR_PROP_TOPOLOGY_NAME, + CONNECTOR_PROP_TOPOLOGY_CONTROL, + CONNECTOR_PROP_AUTOREFRESH, + CONNECTOR_PROP_LP, + CONNECTOR_PROP_FB_TRANSLATION_MODE, + CONNECTOR_PROP_QSYNC_MODE, + + /* total # of properties */ + CONNECTOR_PROP_COUNT }; struct msm_vblank_ctrl { - struct work_struct work; + struct kthread_work work; struct list_head event_list; spinlock_t lock; }; #define MSM_GPU_MAX_RINGS 4 +#define MAX_H_TILES_PER_DISPLAY 2 + +/** + * enum msm_display_compression_type - compression method used for pixel stream + * @MSM_DISPLAY_COMPRESSION_NONE: Pixel data is not compressed + * @MSM_DISPLAY_COMPRESSION_DSC: DSC compresison is used + */ +enum msm_display_compression_type { + MSM_DISPLAY_COMPRESSION_NONE, + MSM_DISPLAY_COMPRESSION_DSC, +}; + +/** + * enum msm_display_compression_ratio - compression ratio + * @MSM_DISPLAY_COMPRESSION_NONE: no compression + * @MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1: 2 to 1 compression + * @MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1: 3 to 1 compression + */ +enum msm_display_compression_ratio { + MSM_DISPLAY_COMPRESSION_RATIO_NONE, + MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1, + MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1, + MSM_DISPLAY_COMPRESSION_RATIO_MAX, +}; + +/** + * enum msm_display_caps - features/capabilities supported by displays + * @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported + * @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported + * @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported + * @MSM_DISPLAY_CAP_EDID: EDID supported + * @MSM_DISPLAY_ESD_ENABLED: ESD feature enabled + * @MSM_DISPLAY_CAP_MST_MODE: Display with MST support + */ +enum msm_display_caps { + MSM_DISPLAY_CAP_VID_MODE = BIT(0), + MSM_DISPLAY_CAP_CMD_MODE = BIT(1), + MSM_DISPLAY_CAP_HOT_PLUG = BIT(2), + MSM_DISPLAY_CAP_EDID = BIT(3), + MSM_DISPLAY_ESD_ENABLED = BIT(4), + MSM_DISPLAY_CAP_MST_MODE = BIT(5), +}; + +/** + * enum msm_event_wait - type of HW events to wait for + * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW + * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel + * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters) + * @MSM_ENC_ACTIVE_REGION - wait for the TG to be in active pixel region + */ +enum msm_event_wait { + MSM_ENC_COMMIT_DONE = 0, + MSM_ENC_TX_COMPLETE, + MSM_ENC_VBLANK, + MSM_ENC_ACTIVE_REGION, +}; + +/** + * struct msm_roi_alignment - region of interest alignment restrictions + * @xstart_pix_align: left x offset alignment restriction + * @width_pix_align: width alignment restriction + * @ystart_pix_align: top y offset alignment restriction + * @height_pix_align: height alignment restriction + * @min_width: minimum width restriction + * @min_height: minimum height restriction + */ +struct msm_roi_alignment { + uint32_t xstart_pix_align; + uint32_t width_pix_align; + uint32_t ystart_pix_align; + uint32_t height_pix_align; + uint32_t min_width; + uint32_t min_height; +}; + +/** + * struct msm_roi_caps - display's region of interest capabilities + * @enabled: true if some region of interest is supported + * @merge_rois: merge rois before sending to display + * @num_roi: maximum number of rois supported + * @align: roi alignment restrictions + */ +struct msm_roi_caps { + bool enabled; + bool merge_rois; + uint32_t num_roi; + struct msm_roi_alignment align; +}; + +/** + * struct msm_display_dsc_info - defines dsc configuration + * @version: DSC version. + * @scr_rev: DSC revision. + * @pic_height: Picture height in pixels. + * @pic_width: Picture width in pixels. + * @initial_lines: Number of initial lines stored in encoder. + * @pkt_per_line: Number of packets per line. + * @bytes_in_slice: Number of bytes in slice. + * @eol_byte_num: Valid bytes at the end of line. + * @pclk_per_line: Compressed width. + * @full_frame_slices: Number of slice per interface. + * @slice_height: Slice height in pixels. + * @slice_width: Slice width in pixels. + * @chunk_size: Chunk size in bytes for slice multiplexing. + * @slice_last_group_size: Size of last group in pixels. + * @bpp: Target bits per pixel. + * @bpc: Number of bits per component. + * @line_buf_depth: Line buffer bit depth. + * @block_pred_enable: Block prediction enabled/disabled. + * @vbr_enable: VBR mode. + * @enable_422: Indicates if input uses 4:2:2 sampling. + * @convert_rgb: DSC color space conversion. + * @input_10_bits: 10 bit per component input. + * @slice_per_pkt: Number of slices per packet. + * @initial_dec_delay: Initial decoding delay. + * @initial_xmit_delay: Initial transmission delay. + * @initial_scale_value: Scale factor value at the beginning of a slice. + * @scale_decrement_interval: Scale set up at the beginning of a slice. + * @scale_increment_interval: Scale set up at the end of a slice. + * @first_line_bpg_offset: Extra bits allocated on the first line of a slice. + * @nfl_bpg_offset: Slice specific settings. + * @slice_bpg_offset: Slice specific settings. + * @initial_offset: Initial offset at the start of a slice. + * @final_offset: Maximum end-of-slice value. + * @rc_model_size: Number of bits in RC model. + * @det_thresh_flatness: Flatness threshold. + * @max_qp_flatness: Maximum QP for flatness adjustment. + * @min_qp_flatness: Minimum QP for flatness adjustment. + * @edge_factor: Ratio to detect presence of edge. + * @quant_incr_limit0: QP threshold. + * @quant_incr_limit1: QP threshold. + * @tgt_offset_hi: Upper end of variability range. + * @tgt_offset_lo: Lower end of variability range. + * @buf_thresh: Thresholds in RC model + * @range_min_qp: Min QP allowed. + * @range_max_qp: Max QP allowed. + * @range_bpg_offset: Bits per group adjustment. + */ +struct msm_display_dsc_info { + u8 version; + u8 scr_rev; + + int pic_height; + int pic_width; + int slice_height; + int slice_width; + + int initial_lines; + int pkt_per_line; + int bytes_in_slice; + int bytes_per_pkt; + int eol_byte_num; + int pclk_per_line; + int full_frame_slices; + int slice_last_group_size; + int bpp; + int bpc; + int line_buf_depth; + + int slice_per_pkt; + int chunk_size; + bool block_pred_enable; + int vbr_enable; + int enable_422; + int convert_rgb; + int input_10_bits; + + int initial_dec_delay; + int initial_xmit_delay; + int initial_scale_value; + int scale_decrement_interval; + int scale_increment_interval; + int first_line_bpg_offset; + int nfl_bpg_offset; + int slice_bpg_offset; + int initial_offset; + int final_offset; + + int rc_model_size; + int det_thresh_flatness; + int max_qp_flatness; + int min_qp_flatness; + int edge_factor; + int quant_incr_limit0; + int quant_incr_limit1; + int tgt_offset_hi; + int tgt_offset_lo; + + u32 *buf_thresh; + char *range_min_qp; + char *range_max_qp; + char *range_bpg_offset; +}; + +/** + * struct msm_compression_info - defined panel compression + * @comp_type: type of compression supported + * @comp_ratio: compression ratio + * @dsc_info: dsc configuration if the compression + * supported is DSC + */ +struct msm_compression_info { + enum msm_display_compression_type comp_type; + enum msm_display_compression_ratio comp_ratio; + + union{ + struct msm_display_dsc_info dsc_info; + }; +}; + +/** + * struct msm_display_topology - defines a display topology pipeline + * @num_lm: number of layer mixers used + * @num_enc: number of compression encoder blocks used + * @num_intf: number of interfaces the panel is mounted on + */ +struct msm_display_topology { + u32 num_lm; + u32 num_enc; + u32 num_intf; +}; + +/** + * struct msm_mode_info - defines all msm custom mode info + * @frame_rate: frame_rate of the mode + * @vtotal: vtotal calculated for the mode + * @prefill_lines: prefill lines based on porches. + * @jitter_numer: display panel jitter numerator configuration + * @jitter_denom: display panel jitter denominator configuration + * @clk_rate: DSI bit clock per lane in HZ. + * @topology: supported topology for the mode + * @comp_info: compression info supported + * @roi_caps: panel roi capabilities + * @wide_bus_en: wide-bus mode cfg for interface module + */ +struct msm_mode_info { + uint32_t frame_rate; + uint32_t vtotal; + uint32_t prefill_lines; + uint32_t jitter_numer; + uint32_t jitter_denom; + uint64_t clk_rate; + struct msm_display_topology topology; + struct msm_compression_info comp_info; + struct msm_roi_caps roi_caps; + bool wide_bus_en; +}; + +/** + * struct msm_display_info - defines display properties + * @intf_type: DRM_MODE_CONNECTOR_ display type + * @capabilities: Bitmask of display flags + * @num_of_h_tiles: Number of horizontal tiles in case of split interface + * @h_tile_instance: Controller instance used per tile. Number of elements is + * based on num_of_h_tiles + * @is_connected: Set to true if display is connected + * @width_mm: Physical width + * @height_mm: Physical height + * @max_width: Max width of display. In case of hot pluggable display + * this is max width supported by controller + * @max_height: Max height of display. In case of hot pluggable display + * this is max height supported by controller + * @clk_rate: DSI bit clock per lane in HZ. + * @is_primary: Set to true if display is primary display + * @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is + * used instead of panel TE in cmd mode panels + * @roi_caps: Region of interest capability info + * @qsync_min_fps Minimum fps supported by Qsync feature + * @te_source vsync source pin information + */ +struct msm_display_info { + int intf_type; + uint32_t capabilities; + + uint32_t num_of_h_tiles; + uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY]; + + bool is_connected; + + unsigned int width_mm; + unsigned int height_mm; + + uint32_t max_width; + uint32_t max_height; + uint64_t clk_rate; + + bool is_primary; + bool is_te_using_watchdog_timer; + struct msm_roi_caps roi_caps; + + uint32_t qsync_min_fps; + uint32_t te_source; +}; + +#define MSM_MAX_ROI 4 + +/** + * struct msm_roi_list - list of regions of interest for a drm object + * @num_rects: number of valid rectangles in the roi array + * @roi: list of roi rectangles + */ +struct msm_roi_list { + uint32_t num_rects; + struct drm_clip_rect roi[MSM_MAX_ROI]; +}; + +/** + * struct - msm_display_kickoff_params - info for display features at kickoff + * @rois: Regions of interest structure for mapping CRTC to Connector output + * @qsync_mode: Qsync mode, where 0: disabled 1: continuous mode + * @qsync_update: Qsync settings were changed/updated + */ +struct msm_display_kickoff_params { + struct msm_roi_list *rois; + struct drm_msm_ext_hdr_metadata *hdr_meta; + uint32_t qsync_mode; + bool qsync_update; +}; + +/** + * struct msm_drm_event - defines custom event notification struct + * @base: base object required for event notification by DRM framework. + * @event: event object required for event notification by DRM framework. + * @info: contains information of DRM object for which events has been + * requested. + * @data: memory location which contains response payload for event. + */ +struct msm_drm_event { + struct drm_pending_event base; + struct drm_event event; + struct drm_msm_event_req info; + u8 data[]; +}; + +/* Commit/Event thread specific structure */ +struct msm_drm_thread { + struct drm_device *dev; + struct task_struct *thread; + unsigned int crtc_id; + struct kthread_worker worker; +}; struct msm_drm_private { @@ -88,6 +558,9 @@ struct msm_drm_private { struct msm_kms *kms; + struct sde_power_handle phandle; + struct sde_power_client *pclient; + /* subordinate devices, if present: */ struct platform_device *gpu_pdev; @@ -122,7 +595,10 @@ struct msm_drm_private { struct list_head inactive_list; struct workqueue_struct *wq; - struct workqueue_struct *atomic_wq; + + /* crtcs pending async atomic updates: */ + uint32_t pending_crtcs; + wait_queue_head_t pending_crtcs_event; unsigned int num_planes; struct drm_plane *planes[MAX_PLANES]; @@ -130,6 +606,12 @@ struct msm_drm_private { unsigned int num_crtcs; struct drm_crtc *crtcs[MAX_CRTCS]; + struct msm_drm_thread disp_thread[MAX_CRTCS]; + struct msm_drm_thread event_thread[MAX_CRTCS]; + + struct task_struct *pp_event_thread; + struct kthread_worker pp_event_worker; + unsigned int num_encoders; struct drm_encoder *encoders[MAX_ENCODERS]; @@ -140,7 +622,12 @@ struct msm_drm_private { struct drm_connector *connectors[MAX_CONNECTORS]; /* Properties */ - struct drm_property *plane_property[PLANE_PROP_MAX_NUM]; + struct drm_property *plane_property[PLANE_PROP_COUNT]; + struct drm_property *crtc_property[CRTC_PROP_COUNT]; + struct drm_property *conn_property[CONNECTOR_PROP_COUNT]; + + /* Color processing properties for the crtc */ + struct drm_property **cp_property; /* VRAM carveout, used when no IOMMU: */ struct { @@ -158,8 +645,30 @@ struct msm_drm_private { struct msm_vblank_ctrl vblank_ctrl; struct drm_atomic_state *pm_state; + + /* task holding struct_mutex.. currently only used in submit path + * to detect and reject faults from copy_from_user() for submit + * ioctl. + */ + struct task_struct *struct_mutex_task; + + /* list of clients waiting for events */ + struct list_head client_event_list; + + /* whether registered and drm_dev_unregister should be called */ + bool registered; + + /* msm drv debug root node */ + struct dentry *debug_root; + + /* update the flag when msm driver receives shutdown notification */ + bool shutdown_in_progress; }; +/* get struct msm_kms * from drm_device * */ +#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \ + ((struct msm_drm_private *)((D)->dev_private))->kms : NULL) + struct msm_format { uint32_t pixel_format; }; @@ -167,14 +676,35 @@ struct msm_format { int msm_atomic_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state); void msm_atomic_commit_tail(struct drm_atomic_state *state); +int msm_atomic_commit(struct drm_device *dev, + struct drm_atomic_state *state, bool nonblock); + +/* callback from wq once fence has passed: */ +struct msm_fence_cb { + struct work_struct work; + uint32_t fence; + void (*func)(struct msm_fence_cb *cb); +}; + +void __msm_fence_worker(struct work_struct *work); + +#define INIT_FENCE_CB(_cb, _func) do { \ + INIT_WORK(&(_cb)->work, __msm_fence_worker); \ + (_cb)->func = _func; \ + } while (0) + struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev); void msm_atomic_state_clear(struct drm_atomic_state *state); void msm_atomic_state_free(struct drm_atomic_state *state); void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, struct sg_table *sgt); + struct msm_gem_vma *vma, struct sg_table *sgt, + unsigned int flags); int msm_gem_map_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, struct sg_table *sgt, int npages); + struct msm_gem_vma *vma, struct sg_table *sgt, int npages, + unsigned int flags); + +struct device *msm_gem_get_aspace_device(struct msm_gem_address_space *aspace); void msm_gem_address_space_put(struct msm_gem_address_space *aspace); @@ -182,6 +712,61 @@ struct msm_gem_address_space * msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, const char *name); +/* For SDE display */ +struct msm_gem_address_space * +msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu, + const char *name); + +/** + * msm_gem_add_obj_to_aspace_active_list: adds obj to active obj list in aspace + */ +void msm_gem_add_obj_to_aspace_active_list( + struct msm_gem_address_space *aspace, + struct drm_gem_object *obj); + +/** + * msm_gem_remove_obj_from_aspace_active_list: removes obj from active obj + * list in aspace + */ +void msm_gem_remove_obj_from_aspace_active_list( + struct msm_gem_address_space *aspace, + struct drm_gem_object *obj); + +/** + * msm_gem_smmu_address_space_get: returns the aspace pointer for the requested + * domain + */ +struct msm_gem_address_space * +msm_gem_smmu_address_space_get(struct drm_device *dev, + unsigned int domain); +int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); +void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu); + +/** + * msm_gem_aspace_domain_attach_detach: function to inform the attach/detach + * of the domain for this aspace + */ +void msm_gem_aspace_domain_attach_detach_update( + struct msm_gem_address_space *aspace, + bool is_detach); + +/** + * msm_gem_address_space_register_cb: function to register callback for attach + * and detach of the domain + */ +int msm_gem_address_space_register_cb( + struct msm_gem_address_space *aspace, + void (*cb)(void *, bool), + void *cb_data); + +/** + * msm_gem_address_space_register_cb: function to unregister callback + */ +int msm_gem_address_space_unregister_cb( + struct msm_gem_address_space *aspace, + void (*cb)(void *, bool), + void *cb_data); + void msm_gem_submit_free(struct msm_gem_submit *submit); int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file); @@ -189,6 +774,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, void msm_gem_shrinker_init(struct drm_device *dev); void msm_gem_shrinker_cleanup(struct drm_device *dev); +void msm_gem_sync(struct drm_gem_object *obj); int msm_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma); int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); @@ -202,6 +788,7 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj); void msm_gem_put_pages(struct drm_gem_object *obj); void msm_gem_put_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace); +dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj); int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, @@ -215,6 +802,8 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int msm_gem_prime_pin(struct drm_gem_object *obj); void msm_gem_prime_unpin(struct drm_gem_object *obj); +struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); void *msm_gem_get_vaddr(struct drm_gem_object *obj); void *msm_gem_get_vaddr_active(struct drm_gem_object *obj); void msm_gem_put_vaddr(struct drm_gem_object *obj); @@ -241,15 +830,22 @@ void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, struct drm_gem_object **bo, uint64_t *iova); struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct dma_buf *dmabuf, struct sg_table *sgt); +int msm_gem_delayed_import(struct drm_gem_object *obj); +void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable); +void msm_framebuffer_set_keepattrs(struct drm_framebuffer *fb, bool enable); int msm_framebuffer_prepare(struct drm_framebuffer *fb, struct msm_gem_address_space *aspace); void msm_framebuffer_cleanup(struct drm_framebuffer *fb, struct msm_gem_address_space *aspace); uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, struct msm_gem_address_space *aspace, int plane); +uint32_t msm_framebuffer_phys(struct drm_framebuffer *fb, int plane); struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); +struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object **bos); struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev, @@ -289,6 +885,16 @@ static inline void __exit msm_edp_unregister(void) #endif struct msm_dsi; + +/* * + * msm_mode_object_event_notify - notify user-space clients of drm object + * events. + * @obj: mode object (crtc/connector) that is generating the event. + * @event: event that needs to be notified. + * @payload: payload for the event. + */ +void msm_mode_object_event_notify(struct drm_mode_object *obj, + struct drm_device *dev, struct drm_event *event, u8 *payload); #ifdef CONFIG_DRM_MSM_DSI void __init msm_dsi_register(void); void __exit msm_dsi_unregister(void); @@ -347,6 +953,8 @@ struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count, const char *name); void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, const char *dbgname); +unsigned long msm_iomap_size(struct platform_device *pdev, const char *name); +void msm_iounmap(struct platform_device *dev, void __iomem *addr); void msm_writel(u32 data, void __iomem *addr); u32 msm_readl(const void __iomem *addr); diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 2a7348aeb38d1a785c79b874a4d6fef9bb080017..45cd190d89e0ced5d911da210b394c9279c1a26d 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark * @@ -15,6 +16,8 @@ * this program. If not, see . */ +#include +#include #include #include #include @@ -23,15 +26,17 @@ #include "msm_kms.h" #include "msm_gem.h" +#define MSM_FRAMEBUFFER_FLAG_KMAP BIT(0) + struct msm_framebuffer { struct drm_framebuffer base; const struct msm_format *format; + void *vaddr[MAX_PLANE]; + atomic_t kmap_count; + u32 flags; }; #define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base) -static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, - const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); - static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { .create_handle = drm_gem_fb_create_handle, .destroy = drm_gem_fb_destroy, @@ -40,8 +45,16 @@ static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { #ifdef CONFIG_DEBUG_FS void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) { - int i, n = fb->format->num_planes; + struct msm_framebuffer *msm_fb; + int i, n; + + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return; + } + msm_fb = to_msm_framebuffer(fb); + n = fb->format->num_planes; seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n", fb->width, fb->height, (char *)&fb->format->format, drm_framebuffer_read_refcount(fb), fb->base.id); @@ -54,6 +67,113 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) } #endif +void msm_framebuffer_set_keepattrs(struct drm_framebuffer *fb, bool enable) +{ + struct msm_framebuffer *msm_fb; + int i, n; + struct drm_gem_object *bo; + struct msm_gem_object *msm_obj; + + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return; + } + + if (!fb->format) { + DRM_ERROR("from:%pS null fb->format\n", + __builtin_return_address(0)); + return; + } + + msm_fb = to_msm_framebuffer(fb); + n = fb->format->num_planes; + for (i = 0; i < n; i++) { + bo = msm_framebuffer_bo(fb, i); + if (bo) { + msm_obj = to_msm_bo(bo); + if (enable) + msm_obj->flags |= MSM_BO_KEEPATTRS; + else + msm_obj->flags &= ~MSM_BO_KEEPATTRS; + } + } +} + +void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable) +{ + struct msm_framebuffer *msm_fb; + + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return; + } + + msm_fb = to_msm_framebuffer(fb); + if (enable) + msm_fb->flags |= MSM_FRAMEBUFFER_FLAG_KMAP; + else + msm_fb->flags &= ~MSM_FRAMEBUFFER_FLAG_KMAP; +} + +static int msm_framebuffer_kmap(struct drm_framebuffer *fb) +{ + struct msm_framebuffer *msm_fb; + int i, n; + struct drm_gem_object *bo; + + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return -EINVAL; + } + + msm_fb = to_msm_framebuffer(fb); + n = fb->format->num_planes; + if (atomic_inc_return(&msm_fb->kmap_count) > 1) + return 0; + + for (i = 0; i < n; i++) { + bo = msm_framebuffer_bo(fb, i); + if (!bo || !bo->dma_buf) { + msm_fb->vaddr[i] = NULL; + continue; + } + dma_buf_begin_cpu_access(bo->dma_buf, DMA_BIDIRECTIONAL); + msm_fb->vaddr[i] = dma_buf_kmap(bo->dma_buf, 0); + DRM_INFO("FB[%u]: vaddr[%d]:%ux%u:0x%llx\n", fb->base.id, i, + fb->width, fb->height, (u64) msm_fb->vaddr[i]); + } + + return 0; +} + +static void msm_framebuffer_kunmap(struct drm_framebuffer *fb) +{ + struct msm_framebuffer *msm_fb; + int i, n; + struct drm_gem_object *bo; + + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return; + } + + msm_fb = to_msm_framebuffer(fb); + n = fb->format->num_planes; + if (atomic_dec_return(&msm_fb->kmap_count) > 0) + return; + + for (i = 0; i < n; i++) { + bo = msm_framebuffer_bo(fb, i); + if (!bo || !msm_fb->vaddr[i]) + continue; + if (bo->dma_buf) { + dma_buf_kunmap(bo->dma_buf, 0, msm_fb->vaddr[i]); + dma_buf_end_cpu_access(bo->dma_buf, DMA_BIDIRECTIONAL); + } + msm_fb->vaddr[i] = NULL; + } +} + /* prepare/pin all the fb's bo's for scanout. Note that it is not valid * to prepare an fb more multiple different initiator 'id's. But that * should be fine, since only the scanout (mdpN) side of things needs @@ -62,9 +182,17 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) int msm_framebuffer_prepare(struct drm_framebuffer *fb, struct msm_gem_address_space *aspace) { - int ret, i, n = fb->format->num_planes; + struct msm_framebuffer *msm_fb; + int ret, i, n; uint64_t iova; + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return -EINVAL; + } + + msm_fb = to_msm_framebuffer(fb); + n = fb->format->num_planes; for (i = 0; i < n; i++) { ret = msm_gem_get_iova(fb->obj[i], aspace, &iova); DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret); @@ -72,13 +200,28 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, return ret; } + if (msm_fb->flags & MSM_FRAMEBUFFER_FLAG_KMAP) + msm_framebuffer_kmap(fb); + return 0; } void msm_framebuffer_cleanup(struct drm_framebuffer *fb, struct msm_gem_address_space *aspace) { - int i, n = fb->format->num_planes; + struct msm_framebuffer *msm_fb; + int i, n; + + if (fb == NULL) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return; + } + + msm_fb = to_msm_framebuffer(fb); + n = fb->format->num_planes; + + if (msm_fb->flags & MSM_FRAMEBUFFER_FLAG_KMAP) + msm_framebuffer_kunmap(fb); for (i = 0; i < n; i++) msm_gem_put_iova(fb->obj[i], aspace); @@ -87,20 +230,54 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb, uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, struct msm_gem_address_space *aspace, int plane) { + + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return -EINVAL; + } + if (!fb->obj[plane]) return 0; + return msm_gem_iova(fb->obj[plane], aspace) + fb->offsets[plane]; } +uint32_t msm_framebuffer_phys(struct drm_framebuffer *fb, + int plane) +{ + struct msm_framebuffer *msm_fb; + dma_addr_t phys_addr; + + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return -EINVAL; + } + + msm_fb = to_msm_framebuffer(fb); + + if (!msm_fb->base.obj[plane]) + return 0; + + phys_addr = msm_gem_get_dma_addr(msm_fb->base.obj[plane]); + if (!phys_addr) + return 0; + + return phys_addr + fb->offsets[plane]; +} + struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane) { + if (!fb) { + DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0)); + return ERR_PTR(-EINVAL); + } + return drm_gem_fb_get_obj(fb, plane); } const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb) { - struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); - return msm_fb->format; + return fb ? (to_msm_framebuffer(fb))->format : NULL; } struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, @@ -132,7 +309,7 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, return ERR_PTR(ret); } -static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, +struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) { struct msm_drm_private *priv = dev->dev_private; @@ -140,14 +317,15 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, struct msm_framebuffer *msm_fb = NULL; struct drm_framebuffer *fb; const struct msm_format *format; - int ret, i, n; + int ret, i, num_planes; unsigned int hsub, vsub; + bool is_modified = false; DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)", dev, mode_cmd, mode_cmd->width, mode_cmd->height, (char *)&mode_cmd->pixel_format); - n = drm_format_num_planes(mode_cmd->pixel_format); + num_planes = drm_format_num_planes(mode_cmd->pixel_format); hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); @@ -169,28 +347,55 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, fb = &msm_fb->base; msm_fb->format = format; + atomic_set(&msm_fb->kmap_count, 0); + + if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { + for (i = 0; i < ARRAY_SIZE(mode_cmd->modifier); i++) { + if (mode_cmd->modifier[i]) { + is_modified = true; + break; + } + } + } - if (n > ARRAY_SIZE(fb->obj)) { + if (num_planes > ARRAY_SIZE(fb->obj)) { ret = -EINVAL; goto fail; } - for (i = 0; i < n; i++) { - unsigned int width = mode_cmd->width / (i ? hsub : 1); - unsigned int height = mode_cmd->height / (i ? vsub : 1); - unsigned int min_size; - - min_size = (height - 1) * mode_cmd->pitches[i] - + width * drm_format_plane_cpp(mode_cmd->pixel_format, i) - + mode_cmd->offsets[i]; - - if (bos[i]->size < min_size) { + if (is_modified) { + if (!kms->funcs->check_modified_format) { + dev_err(dev->dev, "can't check modified fb format\n"); ret = -EINVAL; goto fail; + } else { + ret = kms->funcs->check_modified_format( + kms, msm_fb->format, mode_cmd, bos); + if (ret) + goto fail; + } + } else { + for (i = 0; i < num_planes; i++) { + unsigned int width = mode_cmd->width / (i ? hsub : 1); + unsigned int height = mode_cmd->height / (i ? vsub : 1); + unsigned int min_size; + unsigned int cpp; + + cpp = drm_format_plane_cpp(mode_cmd->pixel_format, i); + + min_size = (height - 1) * mode_cmd->pitches[i] + + width * cpp + + mode_cmd->offsets[i]; + + if (bos[i]->size < min_size) { + ret = -EINVAL; + goto fail; + } } + } + for (i = 0; i < num_planes; i++) msm_fb->base.obj[i] = bos[i]; - } drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index f59ca27a4a357492f96d0b7e37c76536037b40d6..f6a8ea29d2e4434e1b022b60a77adcbb95eb78ce 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark * @@ -19,12 +20,14 @@ #include #include #include +#include #include "msm_drv.h" #include "msm_fence.h" #include "msm_gem.h" #include "msm_gpu.h" #include "msm_mmu.h" +#include "sde_dbg.h" static void msm_gem_vunmap_locked(struct drm_gem_object *obj); @@ -77,6 +80,9 @@ static struct page **get_pages(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); + if (obj->import_attach) + return msm_obj->pages; + if (!msm_obj->pages) { struct drm_device *dev = obj->dev; struct page **p; @@ -104,12 +110,13 @@ static struct page **get_pages(struct drm_gem_object *obj) return ptr; } - /* For non-cached buffers, ensure the new pages are clean - * because display controller, GPU, etc. are not coherent: + /* + * Make sure to flush the CPU cache for newly allocated memory + * so we don't get ourselves into trouble with a dirty cache */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_map_sg(dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); } return msm_obj->pages; @@ -177,6 +184,24 @@ void msm_gem_put_pages(struct drm_gem_object *obj) /* when we start tracking the pin count, then do something here */ } +void msm_gem_sync(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj; + + if (!obj) + return; + + msm_obj = to_msm_bo(obj); + + /* + * dma_sync_sg_for_device synchronises a single contiguous or + * scatter/gather mapping for the CPU and device. + */ + dma_sync_sg_for_device(obj->dev->dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); +} + + int msm_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma) { @@ -298,6 +323,25 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) return offset; } +dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct sg_table *sgt; + + if (!msm_obj->sgt) { + sgt = dma_buf_map_attachment(obj->import_attach, + DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(sgt)) { + DRM_ERROR("dma_buf_map_attachment failure, err=%d\n", + PTR_ERR(sgt)); + return 0; + } + msm_obj->sgt = sgt; + } + + return sg_phys(msm_obj->sgt->sgl); +} + static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, struct msm_gem_address_space *aspace) { @@ -352,7 +396,14 @@ put_iova(struct drm_gem_object *obj) WARN_ON(!mutex_is_locked(&msm_obj->lock)); list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { - msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt); + msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt, + msm_obj->flags); + /* + * put_iova removes the domain connected to the obj which makes + * the aspace inaccessible. Store the aspace, as it is used to + * update the active_list during gem_free_obj and gem_purge. + */ + msm_obj->aspace = vma->aspace; del_vma(vma); } } @@ -376,6 +427,51 @@ int msm_gem_get_iova(struct drm_gem_object *obj, if (!vma) { struct page **pages; + struct device *dev; + struct dma_buf *dmabuf; + bool reattach = false; + + /* + * both secure/non-secure domains are attached with the default + * devive (non-sec) with dma_buf_attach during + * msm_gem_prime_import. detach and attach the correct device + * to the dma_buf based on the aspace domain. + */ + dev = msm_gem_get_aspace_device(aspace); + if (dev && obj->import_attach && + (dev != obj->import_attach->dev)) { + dmabuf = obj->import_attach->dmabuf; + + DRM_DEBUG("detach nsec-dev:%pK attach sec-dev:%pK\n", + obj->import_attach->dev, dev); + SDE_EVT32(obj->import_attach->dev, dev, msm_obj->sgt); + + + if (msm_obj->sgt) + dma_buf_unmap_attachment(obj->import_attach, + msm_obj->sgt, + DMA_BIDIRECTIONAL); + dma_buf_detach(dmabuf, obj->import_attach); + + obj->import_attach = dma_buf_attach(dmabuf, dev); + if (IS_ERR(obj->import_attach)) { + DRM_ERROR("dma_buf_attach failure, err=%ld\n", + PTR_ERR(obj->import_attach)); + goto unlock; + } + reattach = true; + } + + /* perform delayed import for buffers without existing sgt */ + if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt)) + || reattach) { + ret = msm_gem_delayed_import(obj); + if (ret) { + DRM_ERROR("delayed dma-buf import failed %d\n", + ret); + goto unlock; + } + } vma = add_vma(obj, aspace); if (IS_ERR(vma)) { @@ -390,13 +486,20 @@ int msm_gem_get_iova(struct drm_gem_object *obj, } ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt, - obj->size >> PAGE_SHIFT); + obj->size >> PAGE_SHIFT, + msm_obj->flags); if (ret) goto fail; } *iova = vma->iova; + if (aspace && !msm_obj->in_active_list) { + mutex_lock(&aspace->list_lock); + msm_gem_add_obj_to_aspace_active_list(aspace, obj); + mutex_unlock(&aspace->list_lock); + } + mutex_unlock(&msm_obj->lock); return 0; @@ -435,6 +538,60 @@ void msm_gem_put_iova(struct drm_gem_object *obj, // things that are no longer needed.. } +void msm_gem_aspace_domain_attach_detach_update( + struct msm_gem_address_space *aspace, + bool is_detach) +{ + struct msm_gem_object *msm_obj; + struct drm_gem_object *obj; + struct aspace_client *aclient; + int ret; + uint64_t iova; + + if (!aspace) + return; + + mutex_lock(&aspace->list_lock); + if (is_detach) { + /* Indicate to clients domain is getting detached */ + list_for_each_entry(aclient, &aspace->clients, list) { + if (aclient->cb) + aclient->cb(aclient->cb_data, + is_detach); + } + + /** + * Unmap active buffers, + * typically clients should do this when the callback is called, + * but this needs to be done for the buffers which are not + * attached to any planes. + */ + list_for_each_entry(msm_obj, &aspace->active_list, iova_list) { + obj = &msm_obj->base; + if (obj->import_attach) + put_iova(obj); + } + } else { + /* map active buffers */ + list_for_each_entry(msm_obj, &aspace->active_list, iova_list) { + obj = &msm_obj->base; + ret = msm_gem_get_iova(obj, aspace, &iova); + if (ret) { + mutex_unlock(&aspace->list_lock); + return; + } + } + + /* Indicate to clients domain is attached */ + list_for_each_entry(aclient, &aspace->clients, list) { + if (aclient->cb) + aclient->cb(aclient->cb_data, + is_detach); + } + } + mutex_unlock(&aspace->list_lock); +} + int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { @@ -493,8 +650,20 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) ret = PTR_ERR(pages); goto fail; } - msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, + + if (obj->import_attach) { + ret = dma_buf_begin_cpu_access( + obj->import_attach->dmabuf, DMA_BIDIRECTIONAL); + if (ret) + goto fail; + + msm_obj->vaddr = + dma_buf_vmap(obj->import_attach->dmabuf); + } else { + msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + } + if (msm_obj->vaddr == NULL) { ret = -ENOMEM; goto fail; @@ -569,6 +738,12 @@ void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) mutex_lock_nested(&msm_obj->lock, subclass); put_iova(obj); + if (msm_obj->aspace) { + mutex_lock(&msm_obj->aspace->list_lock); + msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace, + obj); + mutex_unlock(&msm_obj->aspace->list_lock); + } msm_gem_vunmap_locked(obj); @@ -601,7 +776,14 @@ static void msm_gem_vunmap_locked(struct drm_gem_object *obj) if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) return; - vunmap(msm_obj->vaddr); + if (obj->import_attach) { + dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); + dma_buf_end_cpu_access(obj->import_attach->dmabuf, + DMA_BIDIRECTIONAL); + } else { + vunmap(msm_obj->vaddr); + } + msm_obj->vaddr = NULL; } @@ -803,6 +985,12 @@ void msm_gem_free_object(struct drm_gem_object *obj) mutex_lock(&msm_obj->lock); put_iova(obj); + if (msm_obj->aspace) { + mutex_lock(&msm_obj->aspace->list_lock); + msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace, + obj); + mutex_unlock(&msm_obj->aspace->list_lock); + } if (obj->import_attach) { if (msm_obj->vaddr) @@ -887,6 +1075,9 @@ static int msm_gem_new_impl(struct drm_device *dev, INIT_LIST_HEAD(&msm_obj->submit_entry); INIT_LIST_HEAD(&msm_obj->vmas); + INIT_LIST_HEAD(&msm_obj->iova_list); + msm_obj->aspace = NULL; + msm_obj->in_active_list = false; if (struct_mutex_locked) { WARN_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -980,13 +1171,61 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, return _msm_gem_new(dev, size, flags, false); } +int msm_gem_delayed_import(struct drm_gem_object *obj) +{ + struct dma_buf_attachment *attach; + struct sg_table *sgt; + struct msm_gem_object *msm_obj; + int ret = 0; + + if (!obj) { + DRM_ERROR("NULL drm gem object\n"); + return -EINVAL; + } + + msm_obj = to_msm_bo(obj); + + if (!obj->import_attach) { + DRM_ERROR("NULL dma_buf_attachment in drm gem object\n"); + return -EINVAL; + } + + attach = obj->import_attach; + attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP; + + if (msm_obj->flags & MSM_BO_SKIPSYNC) + attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; + + if (msm_obj->flags & MSM_BO_KEEPATTRS) + attach->dma_map_attrs |= + DMA_ATTR_IOMMU_USE_UPSTREAM_HINT; + + /* + * dma_buf_map_attachment will call dma_map_sg for ion buffer + * mapping, and iova will get mapped when the function returns. + */ + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + DRM_ERROR("dma_buf_map_attachment failure, err=%d\n", + ret); + goto fail_import; + } + msm_obj->sgt = sgt; + msm_obj->pages = NULL; + +fail_import: + return ret; +} + struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct dma_buf *dmabuf, struct sg_table *sgt) { struct msm_gem_object *msm_obj; - struct drm_gem_object *obj; + struct drm_gem_object *obj = NULL; uint32_t size; - int ret, npages; + int ret; + unsigned long flags = 0; /* if we don't have IOMMU, don't bother pretending we can import: */ if (!iommu_present(&platform_bus_type)) { @@ -996,28 +1235,39 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, size = PAGE_ALIGN(dmabuf->size); - ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false); + ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, + false); if (ret) goto fail; drm_gem_private_object_init(dev, obj, size); - npages = size / PAGE_SIZE; - msm_obj = to_msm_bo(obj); mutex_lock(&msm_obj->lock); msm_obj->sgt = sgt; - msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); - if (!msm_obj->pages) { - mutex_unlock(&msm_obj->lock); - ret = -ENOMEM; - goto fail; - } + msm_obj->pages = NULL; + /* + * 1) If sg table is NULL, user should call msm_gem_delayed_import + * to add back the sg table to the drm gem object. + * + * 2) Add buffer flag unconditionally for all import cases. + * # Cached buffer will be attached immediately hence sgt will + * be available upon gem obj creation. + * # Un-cached buffer will follow delayed attach hence sgt + * will be NULL upon gem obj creation. + */ + msm_obj->flags |= MSM_BO_EXTBUF; - ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); + /* + * For all uncached buffers, there is no need to perform cache + * maintenance on dma map/unmap time. + */ + ret = dma_buf_get_flags(dmabuf, &flags); if (ret) { - mutex_unlock(&msm_obj->lock); - goto fail; + DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret); + } else if ((flags & ION_FLAG_CACHED) == 0) { + DRM_DEBUG("Buffer is uncached type\n"); + msm_obj->flags |= MSM_BO_SKIPSYNC; } mutex_unlock(&msm_obj->lock); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index c5d9bd3e47a8d20100f4c29da961aba368fb8752..49a44124155e417da655cacf2db8cbec209083e2 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -24,6 +24,39 @@ /* Additional internal-use only BO flags: */ #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ +#define MSM_BO_KEEPATTRS 0x20000000 /* keep h/w bus attributes */ +#define MSM_BO_SKIPSYNC 0x40000000 /* skip dmabuf cpu sync */ +#define MSM_BO_EXTBUF 0x80000000 /* indicate BO is an import buffer */ + +struct msm_gem_object; + +struct msm_gem_aspace_ops { + int (*map)(struct msm_gem_address_space *space, struct msm_gem_vma *vma, + struct sg_table *sgt, int npages, unsigned int flags); + + void (*unmap)(struct msm_gem_address_space *space, + struct msm_gem_vma *vma, struct sg_table *sgt, + unsigned int flags); + + void (*destroy)(struct msm_gem_address_space *space); + void (*add_to_active)(struct msm_gem_address_space *space, + struct msm_gem_object *obj); + void (*remove_from_active)(struct msm_gem_address_space *space, + struct msm_gem_object *obj); + int (*register_cb)(struct msm_gem_address_space *space, + void (*cb)(void *cb, bool data), + void *cb_data); + int (*unregister_cb)(struct msm_gem_address_space *space, + void (*cb)(void *cb, bool data), + void *cb_data); +}; + +struct aspace_client { + void (*cb)(void *cb, bool data); + void *cb_data; + struct list_head list; +}; + struct msm_gem_address_space { const char *name; @@ -34,6 +67,14 @@ struct msm_gem_address_space { spinlock_t lock; /* Protects drm_mm node allocation/removal */ struct msm_mmu *mmu; struct kref kref; + bool domain_attached; + const struct msm_gem_aspace_ops *ops; + struct drm_device *dev; + /* list of mapped objects */ + struct list_head active_list; + /* list of clients */ + struct list_head clients; + struct mutex list_lock; /* Protects active_list & clients */ }; struct msm_gem_vma { @@ -91,6 +132,10 @@ struct msm_gem_object { */ struct drm_mm_node *vram_node; struct mutex lock; /* Protects resources associated with bo */ + struct list_head iova_list; + + struct msm_gem_address_space *aspace; + bool in_active_list; }; #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 13403c6da6c75012fa5f17f4b0b63075ddf20874..c12fc7bfd11bf2b38bdcf308640cc52f6cd02d4f 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark * @@ -19,6 +20,7 @@ #include "msm_gem.h" #include +#include struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) { @@ -77,3 +79,91 @@ struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj) return msm_obj->resv; } + + +struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct dma_buf_attachment *attach; + struct sg_table *sgt = NULL; + struct drm_gem_object *obj; + struct device *attach_dev; + unsigned long flags = 0; + int ret; + + if (!dma_buf) + return ERR_PTR(-EINVAL); + + if (dma_buf->priv && !dma_buf->ops->begin_cpu_access) { + obj = dma_buf->priv; + if (obj->dev == dev) { + /* + * Importing dmabuf exported from out own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_get(obj); + return obj; + } + } + + if (!dev->driver->gem_prime_import_sg_table) { + DRM_ERROR("NULL gem_prime_import_sg_table\n"); + return ERR_PTR(-EINVAL); + } + + attach_dev = dev->dev; + attach = dma_buf_attach(dma_buf, attach_dev); + if (IS_ERR(attach)) { + DRM_ERROR("dma_buf_attach failure, err=%ld\n", PTR_ERR(attach)); + return ERR_CAST(attach); + } + + get_dma_buf(dma_buf); + + /* + * For cached buffers where CPU access is required, dma_map_attachment + * must be called now to allow user-space to perform cpu sync begin/end + * otherwise do delayed mapping during the commit. + */ + ret = dma_buf_get_flags(dma_buf, &flags); + if (ret) { + DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret); + goto fail_put; + } else if (flags & ION_FLAG_CACHED) { + attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP; + sgt = dma_buf_map_attachment( + attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + DRM_ERROR( + "dma_buf_map_attachment failure, err=%d\n", + ret); + goto fail_detach; + } + } + + /* + * If importing a NULL sg table (i.e. for uncached buffers), + * create a drm gem object with only the dma buf attachment. + */ + obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt); + if (IS_ERR(obj)) { + ret = PTR_ERR(obj); + DRM_ERROR("gem_prime_import_sg_table failure, err=%d\n", ret); + goto fail_unmap; + } + + obj->import_attach = attach; + + return obj; + +fail_unmap: + if (sgt) + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); +fail_detach: + dma_buf_detach(dma_buf, attach); +fail_put: + dma_buf_put(dma_buf); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index ffbec224551b52f927eebb92b4319134b3399e2c..da9ae7a69fbcc047b01d4a6dc88344eb72c185e5 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (C) 2016 Red Hat * Author: Rob Clark * @@ -19,15 +20,184 @@ #include "msm_gem.h" #include "msm_mmu.h" +/* SDE address space operations */ +static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, + unsigned int flags) +{ + if (!vma->iova) + return; + + if (aspace) { + aspace->mmu->funcs->unmap_dma_buf(aspace->mmu, sgt, + DMA_BIDIRECTIONAL, flags); + } + + vma->iova = 0; + msm_gem_address_space_put(aspace); +} + +static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, + int npages, unsigned int flags) +{ + int ret = -EINVAL; + + if (!aspace || !aspace->domain_attached) + return ret; + + ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, + DMA_BIDIRECTIONAL, flags); + if (!ret) + vma->iova = sg_dma_address(sgt->sgl); + + /* Get a reference to the aspace to keep it around */ + kref_get(&aspace->kref); + + return ret; +} + +static void smmu_aspace_destroy(struct msm_gem_address_space *aspace) +{ + if (aspace->mmu) + aspace->mmu->funcs->destroy(aspace->mmu); +} + +static void smmu_aspace_add_to_active( + struct msm_gem_address_space *aspace, + struct msm_gem_object *msm_obj) +{ + WARN_ON(!mutex_is_locked(&aspace->list_lock)); + list_move_tail(&msm_obj->iova_list, &aspace->active_list); +} + +static void smmu_aspace_remove_from_active( + struct msm_gem_address_space *aspace, + struct msm_gem_object *obj) +{ + struct msm_gem_object *msm_obj, *next; + + WARN_ON(!mutex_is_locked(&aspace->list_lock)); + + list_for_each_entry_safe(msm_obj, next, &aspace->active_list, + iova_list) { + if (msm_obj == obj) { + list_del(&msm_obj->iova_list); + break; + } + } +} + +static int smmu_aspace_register_cb( + struct msm_gem_address_space *aspace, + void (*cb)(void *, bool), + void *cb_data) +{ + struct aspace_client *aclient = NULL; + struct aspace_client *temp; + + if (!aspace) + return -EINVAL; + + if (!aspace->domain_attached) + return -EACCES; + + aclient = kzalloc(sizeof(*aclient), GFP_KERNEL); + if (!aclient) + return -ENOMEM; + + aclient->cb = cb; + aclient->cb_data = cb_data; + INIT_LIST_HEAD(&aclient->list); + + /* check if callback is already registered */ + mutex_lock(&aspace->list_lock); + list_for_each_entry(temp, &aspace->clients, list) { + if ((temp->cb == aclient->cb) && + (temp->cb_data == aclient->cb_data)) { + kfree(aclient); + mutex_unlock(&aspace->list_lock); + return -EEXIST; + } + } + + list_move_tail(&aclient->list, &aspace->clients); + mutex_unlock(&aspace->list_lock); + + return 0; +} + +static int smmu_aspace_unregister_cb( + struct msm_gem_address_space *aspace, + void (*cb)(void *, bool), + void *cb_data) +{ + struct aspace_client *aclient = NULL; + int rc = -ENOENT; + + if (!aspace || !cb) + return -EINVAL; + + mutex_lock(&aspace->list_lock); + list_for_each_entry(aclient, &aspace->clients, list) { + if ((aclient->cb == cb) && + (aclient->cb_data == cb_data)) { + list_del(&aclient->list); + kfree(aclient); + rc = 0; + break; + } + } + mutex_unlock(&aspace->list_lock); + + return rc; +} + +static const struct msm_gem_aspace_ops smmu_aspace_ops = { + .map = smmu_aspace_map_vma, + .unmap = smmu_aspace_unmap_vma, + .destroy = smmu_aspace_destroy, + .add_to_active = smmu_aspace_add_to_active, + .remove_from_active = smmu_aspace_remove_from_active, + .register_cb = smmu_aspace_register_cb, + .unregister_cb = smmu_aspace_unregister_cb, +}; + +struct msm_gem_address_space * +msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu, + const char *name) +{ + struct msm_gem_address_space *aspace; + + if (!mmu) + return ERR_PTR(-EINVAL); + + aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); + if (!aspace) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&aspace->lock); + aspace->dev = dev; + aspace->name = name; + aspace->mmu = mmu; + aspace->ops = &smmu_aspace_ops; + INIT_LIST_HEAD(&aspace->active_list); + INIT_LIST_HEAD(&aspace->clients); + kref_init(&aspace->kref); + mutex_init(&aspace->list_lock); + + return aspace; +} + static void msm_gem_address_space_destroy(struct kref *kref) { struct msm_gem_address_space *aspace = container_of(kref, struct msm_gem_address_space, kref); - drm_mm_takedown(&aspace->mm); - if (aspace->mmu) - aspace->mmu->funcs->destroy(aspace->mmu); + if (aspace && aspace->ops->destroy) + aspace->ops->destroy(aspace); + kfree(aspace); } @@ -38,9 +208,10 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace) kref_put(&aspace->kref, msm_gem_address_space_destroy); } -void -msm_gem_unmap_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, struct sg_table *sgt) +/* GPU address space operations */ +static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, + unsigned int flags) { if (!aspace || !vma->iova) return; @@ -59,9 +230,19 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace, msm_gem_address_space_put(aspace); } -int -msm_gem_map_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, struct sg_table *sgt, int npages) +void +msm_gem_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, + unsigned int flags) +{ + if (aspace && aspace->ops->unmap) + aspace->ops->unmap(aspace, vma, sgt, flags); +} + + +static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, + int npages, unsigned int flags) { int ret; @@ -91,6 +272,19 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, return ret; } +static void iommu_aspace_destroy(struct msm_gem_address_space *aspace) +{ + drm_mm_takedown(&aspace->mm); + if (aspace->mmu) + aspace->mmu->funcs->destroy(aspace->mmu); +} + +static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = { + .map = iommu_aspace_map_vma, + .unmap = iommu_aspace_unmap_vma, + .destroy = iommu_aspace_destroy, +}; + struct msm_gem_address_space * msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, const char *name) @@ -106,6 +300,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, spin_lock_init(&aspace->lock); aspace->name = name; aspace->mmu = msm_iommu_new(dev, domain); + aspace->ops = &msm_iommu_aspace_ops; drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT), size >> PAGE_SHIFT); @@ -114,3 +309,65 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, return aspace; } + +int +msm_gem_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, int npages, + unsigned int flags) +{ + if (aspace && aspace->ops->map) + return aspace->ops->map(aspace, vma, sgt, npages, flags); + + return -EINVAL; +} + +struct device *msm_gem_get_aspace_device(struct msm_gem_address_space *aspace) +{ + struct device *client_dev = NULL; + + if (aspace && aspace->mmu && aspace->mmu->funcs->get_dev) + client_dev = aspace->mmu->funcs->get_dev(aspace->mmu); + + return client_dev; +} + +void msm_gem_add_obj_to_aspace_active_list( + struct msm_gem_address_space *aspace, + struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + if (aspace && aspace->ops && aspace->ops->add_to_active) + aspace->ops->add_to_active(aspace, msm_obj); +} + +void msm_gem_remove_obj_from_aspace_active_list( + struct msm_gem_address_space *aspace, + struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + if (aspace && aspace->ops && aspace->ops->remove_from_active) + aspace->ops->remove_from_active(aspace, msm_obj); +} + +int msm_gem_address_space_register_cb(struct msm_gem_address_space *aspace, + void (*cb)(void *, bool), + void *cb_data) +{ + if (aspace && aspace->ops && aspace->ops->register_cb) + return aspace->ops->register_cb(aspace, cb, cb_data); + + return -EINVAL; +} + +int msm_gem_address_space_unregister_cb(struct msm_gem_address_space *aspace, + void (*cb)(void *, bool), + void *cb_data) +{ + if (aspace && aspace->ops && aspace->ops->unregister_cb) + return aspace->ops->unregister_cb(aspace, cb, cb_data); + + return -EINVAL; +} + diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 4bbcfb579a19b0589243b4c58d5750bbd4f9905b..dacb1c54ec944bc8772f1ad07ec9e96ac27a447f 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -26,6 +26,19 @@ #define MAX_PLANE 4 +/** + * Device Private DRM Mode Flags + * drm_mode->private_flags + */ +/* Connector has interpreted seamless transition request as dynamic fps */ +#define MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS (1<<0) +/* Transition to new mode requires a wait-for-vblank before the modeset */ +#define MSM_MODE_FLAG_VBLANK_PRE_MODESET (1<<1) +/* Request to switch the connector mode */ +#define MSM_MODE_FLAG_SEAMLESS_DMS (1<<2) +/* Request to switch the fps */ +#define MSM_MODE_FLAG_SEAMLESS_VRR (1<<3) + /* As there are different display controller blocks depending on the * snapdragon version, the kms support is split out and the appropriate * implementation is loaded at runtime. The kms module is responsible @@ -34,6 +47,7 @@ struct msm_kms_funcs { /* hw initialization: */ int (*hw_init)(struct msm_kms *kms); + int (*postinit)(struct msm_kms *kms); /* irq handling: */ void (*irq_preinstall)(struct msm_kms *kms); int (*irq_postinstall)(struct msm_kms *kms); @@ -42,15 +56,31 @@ struct msm_kms_funcs { int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); /* modeset, bracketing atomic_commit(): */ - void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state); - void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state); + void (*prepare_fence)(struct msm_kms *kms, + struct drm_atomic_state *state); + void (*prepare_commit)(struct msm_kms *kms, + struct drm_atomic_state *state); + void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state); + void (*complete_commit)(struct msm_kms *kms, + struct drm_atomic_state *state); /* functions to wait for atomic commit completed on each CRTC */ void (*wait_for_crtc_commit_done)(struct msm_kms *kms, struct drm_crtc *crtc); + /* function pointer to wait for pixel transfer to panel to complete*/ + void (*wait_for_tx_complete)(struct msm_kms *kms, + struct drm_crtc *crtc); /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */ const struct msm_format *(*get_format)(struct msm_kms *kms, const uint32_t format, - const uint64_t modifiers); + const uint64_t modifier); + /* do format checking on format modified through fb_cmd2 modifiers */ + int (*check_modified_format)(const struct msm_kms *kms, + const struct msm_format *msm_fmt, + const struct drm_mode_fb_cmd2 *cmd, + struct drm_gem_object **bos); + /* perform complete atomic check of given atomic state */ + int (*atomic_check)(struct msm_kms *kms, + struct drm_atomic_state *state); /* misc: */ long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder); @@ -58,15 +88,33 @@ struct msm_kms_funcs { struct drm_encoder *encoder, struct drm_encoder *slave_encoder, bool is_cmd_mode); + void (*postopen)(struct msm_kms *kms, struct drm_file *file); + void (*preclose)(struct msm_kms *kms, struct drm_file *file); + void (*postclose)(struct msm_kms *kms, struct drm_file *file); + void (*lastclose)(struct msm_kms *kms, + struct drm_modeset_acquire_ctx *ctx); + int (*register_events)(struct msm_kms *kms, + struct drm_mode_object *obj, u32 event, bool en); void (*set_encoder_mode)(struct msm_kms *kms, struct drm_encoder *encoder, bool cmd_mode); + /* pm suspend/resume hooks */ + int (*pm_suspend)(struct device *dev); + int (*pm_resume)(struct device *dev); /* cleanup: */ void (*destroy)(struct msm_kms *kms); + /* get address space */ + struct msm_gem_address_space *(*get_address_space)( + struct msm_kms *kms, + unsigned int domain); #ifdef CONFIG_DEBUG_FS /* debugfs: */ int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor); #endif + /* handle continuous splash */ + int (*cont_splash_config)(struct msm_kms *kms); + /* check for continuous splash status */ + bool (*check_for_splash)(struct msm_kms *kms); }; struct msm_kms { @@ -79,6 +127,18 @@ struct msm_kms { struct msm_gem_address_space *aspace; }; +/** + * Subclass of drm_atomic_state, to allow kms backend to have driver + * private global state. The kms backend can do whatever it wants + * with the ->state ptr. On ->atomic_state_clear() the ->state ptr + * is kfree'd and set back to NULL. + */ +struct msm_kms_state { + struct drm_atomic_state base; + void *state; +}; +#define to_kms_state(x) container_of(x, struct msm_kms_state, base) + static inline void msm_kms_init(struct msm_kms *kms, const struct msm_kms_funcs *funcs) { @@ -95,6 +155,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) { return NULL; }; struct msm_kms *mdp5_kms_init(struct drm_device *dev); int msm_mdss_init(struct drm_device *dev); void msm_mdss_destroy(struct drm_device *dev); +struct msm_kms *mdp5_kms_init(struct drm_device *dev); int msm_mdss_enable(struct msm_mdss *mdss); int msm_mdss_disable(struct msm_mdss *mdss); #else @@ -118,4 +179,39 @@ static inline int msm_mdss_disable(struct msm_mdss *mdss) return 0; } #endif + +struct msm_kms *sde_kms_init(struct drm_device *dev); + + +/** + * Mode Set Utility Functions + */ +static inline bool msm_is_mode_seamless(const struct drm_display_mode *mode) +{ + return (mode->flags & DRM_MODE_FLAG_SEAMLESS); +} + +static inline bool msm_is_mode_seamless_dms(const struct drm_display_mode *mode) +{ + return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DMS) + : false; +} + +static inline bool msm_is_mode_dynamic_fps(const struct drm_display_mode *mode) +{ + return ((mode->flags & DRM_MODE_FLAG_SEAMLESS) && + (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS)); +} + +static inline bool msm_is_mode_seamless_vrr(const struct drm_display_mode *mode) +{ + return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_VRR) + : false; +} + +static inline bool msm_needs_vblank_pre_modeset( + const struct drm_display_mode *mode) +{ + return (mode->private_flags & MSM_MODE_FLAG_VBLANK_PRE_MODESET); +} #endif /* __MSM_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index aa2c5d4580c820b2e54c16317c121994bd7f37a9..ee6cbcd580794c477adeb61766b13ac4914f90f0 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -20,14 +20,40 @@ #include +struct msm_mmu; + +enum msm_mmu_domain_type { + MSM_SMMU_DOMAIN_UNSECURE, + MSM_SMMU_DOMAIN_NRT_UNSECURE, + MSM_SMMU_DOMAIN_SECURE, + MSM_SMMU_DOMAIN_NRT_SECURE, + MSM_SMMU_DOMAIN_MAX, +}; + struct msm_mmu_funcs { int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt); void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt); int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, - unsigned len, int prot); + unsigned int len, int prot); int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, - unsigned len); + unsigned int len); + int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt, + enum dma_data_direction dir); + void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt, + enum dma_data_direction dir); + int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt, + int dir, u32 flags); + void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt, + int dir, u32 flags); void (*destroy)(struct msm_mmu *mmu); + bool (*is_domain_secure)(struct msm_mmu *mmu); + int (*set_attribute)(struct msm_mmu *mmu, + enum iommu_attr attr, void *data); + int (*one_to_one_map)(struct msm_mmu *mmu, uint32_t iova, + uint32_t dest_address, uint32_t size, int prot); + int (*one_to_one_unmap)(struct msm_mmu *mmu, uint32_t dest_address, + uint32_t size); + struct device *(*get_dev)(struct msm_mmu *mmu); }; struct msm_mmu { @@ -45,7 +71,8 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, } struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain); -struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu); +struct msm_mmu *msm_smmu_new(struct device *dev, + enum msm_mmu_domain_type domain); static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, int (*handler)(void *arg, unsigned long iova, int flags)) @@ -54,4 +81,8 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, mmu->handler = handler; } +/* SDE smmu driver initialize and cleanup functions */ +int __init msm_smmu_driver_init(void); +void __exit msm_smmu_driver_cleanup(void); + #endif /* __MSM_MMU_H__ */ diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c index 22c494bbe5fbafcf779f3b67a4b7bcec3851372e..b7ed1ab4c27efaf311b596d93ca14efa210093b7 100644 --- a/drivers/gpu/drm/msm/msm_smmu.c +++ b/drivers/gpu/drm/msm/msm_smmu.c @@ -173,13 +173,12 @@ static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova, { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); - size_t ret; - - ret = iommu_map_sg(client->mmu_mapping->domain, iova, sgt->sgl, - sgt->nents, prot); - WARN_ON((int)ret < 0); + size_t ret = 0; if (sgt && sgt->sgl) { + ret = iommu_map_sg(client->mmu_mapping->domain, iova, sgt->sgl, + sgt->nents, prot); + WARN_ON((int)ret < 0); DRM_DEBUG("%pad/0x%x/0x%x/\n", &sgt->sgl->dma_address, sgt->sgl->dma_length, prot); SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length, @@ -408,11 +407,19 @@ struct msm_mmu *msm_smmu_new(struct device *dev, { struct msm_smmu *smmu; struct device *client_dev; + bool smmu_full_map; smmu = kzalloc(sizeof(*smmu), GFP_KERNEL); if (!smmu) return ERR_PTR(-ENOMEM); + smmu_full_map = of_property_read_bool(dev->of_node, + "qcom,fullsize-va-map"); + if (smmu_full_map) { + msm_smmu_domains[domain].va_start = SZ_128K; + msm_smmu_domains[domain].va_size = SZ_4G - SZ_128K; + } + client_dev = msm_smmu_device_create(dev, domain, smmu); if (IS_ERR(client_dev)) { kfree(smmu); @@ -489,6 +496,13 @@ static int _msm_smmu_create_mapping(struct msm_smmu_client *client, } } + if (!client->dev->dma_parms) + client->dev->dma_parms = devm_kzalloc(client->dev, + sizeof(*client->dev->dma_parms), GFP_KERNEL); + + dma_set_max_seg_size(client->dev, DMA_BIT_MASK(32)); + dma_set_seg_boundary(client->dev, DMA_BIT_MASK(64)); + iommu_set_fault_handler(client->mmu_mapping->domain, msm_smmu_fault_handler, (void *)client); @@ -565,6 +579,7 @@ static struct platform_driver msm_smmu_driver = { .driver = { .name = "msmdrm_smmu", .of_match_table = msm_smmu_dt_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c index 6aa23e10f995d3fe3810606ac772aed5ea4ca62c..3a1b6c24dd1749ce27bee9cd69f0af5e5ab59d94 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.c +++ b/drivers/gpu/drm/msm/sde/sde_connector.c @@ -1099,7 +1099,7 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector, /* convert fb val to drm framebuffer and prepare it */ c_state->out_fb = - drm_framebuffer_lookup(connector->dev, val); + drm_framebuffer_lookup(connector->dev, NULL, val); if (!c_state->out_fb && val) { SDE_ERROR("failed to look up fb %lld\n", val); rc = -EFAULT; @@ -2129,7 +2129,7 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, mutex_init(&c_conn->lock); - rc = drm_mode_connector_attach_encoder(&c_conn->base, encoder); + rc = drm_connector_attach_encoder(&c_conn->base, encoder); if (rc) { SDE_ERROR("failed to attach encoder to connector, %d\n", rc); goto error_cleanup_fence; diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 57d22b840e9ef3e422fb3cd216cbb10949a1bc0f..5fd55313681f2fef2266ca486ec8168d55fc210a 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -1211,7 +1211,7 @@ static int _sde_crtc_set_crtc_roi(struct drm_crtc *crtc, is_crtc_roi_dirty = sde_crtc_is_crtc_roi_dirty(state); is_any_conn_roi_dirty = false; - for_each_connector_in_state(state->state, conn, conn_state, i) { + for_each_new_connector_in_state(state->state, conn, conn_state, i) { struct sde_connector *sde_conn; struct sde_connector_state *sde_conn_state; struct sde_rect conn_roi; @@ -1301,7 +1301,7 @@ static int _sde_crtc_check_autorefresh(struct drm_crtc *crtc, return 0; /* partial update active, check if autorefresh is also requested */ - for_each_connector_in_state(state->state, conn, conn_state, i) { + for_each_new_connector_in_state(state->state, conn, conn_state, i) { uint64_t autorefresh; if (!conn_state || conn_state->crtc != crtc) diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 7b62ec6c96e14ddec80ad9bf4e95ae4580676e8a..00159f0c0de4f3066e6198eb9d30de98b8d1da61 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -1894,7 +1894,8 @@ static int _sde_encoder_update_rsc_client( SDE_EVTLOG_FUNC_ENTRY); if (crtc->base.id != wait_vblank_crtc_id) { - primary_crtc = drm_crtc_find(drm_enc->dev, wait_vblank_crtc_id); + primary_crtc = drm_crtc_find(drm_enc->dev, + NULL, wait_vblank_crtc_id); if (!primary_crtc) { SDE_ERROR_ENC(sde_enc, "failed to find primary crtc id %d\n", @@ -4124,24 +4125,27 @@ static int _sde_encoder_wakeup_time(struct drm_encoder *drm_enc, return 0; } -static void sde_encoder_vsync_event_handler(unsigned long data) +static void sde_encoder_vsync_event_handler(struct timer_list *t) { - struct drm_encoder *drm_enc = (struct drm_encoder *) data; - struct sde_encoder_virt *sde_enc; + struct drm_encoder *drm_enc; + struct sde_encoder_virt *sde_enc = + from_timer(sde_enc, t, vsync_event_timer); struct msm_drm_private *priv; struct msm_drm_thread *event_thread; + if (!sde_enc || !sde_enc->crtc) { + SDE_ERROR("invalid encoder parameters %d\n", !sde_enc); + return; + } + + drm_enc = &sde_enc->base; + if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) { SDE_ERROR("invalid encoder parameters\n"); return; } - sde_enc = to_sde_encoder_virt(drm_enc); priv = drm_enc->dev->dev_private; - if (!sde_enc->crtc) { - SDE_ERROR("invalid crtc"); - return; - } if (sde_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) { SDE_ERROR("invalid crtc index:%u\n", @@ -5122,9 +5126,8 @@ struct drm_encoder *sde_encoder_init( if ((disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) && disp_info->is_primary) - setup_timer(&sde_enc->vsync_event_timer, - sde_encoder_vsync_event_handler, - (unsigned long)sde_enc); + timer_setup(&sde_enc->vsync_event_timer, + sde_encoder_vsync_event_handler, 0); snprintf(name, SDE_NAME_SIZE, "rsc_enc%u", drm_enc->base.id); sde_enc->rsc_client = sde_rsc_client_create(SDE_RSC_INDEX, name, diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c index bcaeb3dd27cde9689e30acebcfe5815eadde0419..939c1cf8e267b64d7a1eb5f2e4a87eb3c6efb34e 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c @@ -2140,7 +2140,7 @@ static int sde_rot_parse_dt(struct device_node *np, pdev = of_find_device_by_node(phargs.np); if (pdev) { - slice = llcc_slice_getd(&pdev->dev, "rotator"); + slice = llcc_slice_getd(pdev->id); if (IS_ERR_OR_NULL(slice)) { rot->pdev = NULL; SDE_ERROR("failed to get system cache %ld\n", @@ -3617,6 +3617,19 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev) sde_cfg->sui_ns_allowed = true; sde_cfg->sui_misr_supported = true; sde_cfg->sui_block_xin_mask = 0x2EE1; + } else if (IS_KONA_TARGET(hw_rev)) { + sde_cfg->has_cwb_support = true; + sde_cfg->has_wb_ubwc = true; + sde_cfg->has_qsync = true; + sde_cfg->perf.min_prefill_lines = 24; + sde_cfg->vbif_qos_nlvl = 8; + sde_cfg->ts_prefill_rev = 2; + sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0; + sde_cfg->delay_prg_fetch_start = true; + sde_cfg->sui_ns_allowed = true; + sde_cfg->sui_misr_supported = true; + sde_cfg->sui_block_xin_mask = 0x3F71; + sde_cfg->has_3d_merge_reset = true; } else { SDE_ERROR("unsupported chipset id:%X\n", hw_rev); sde_cfg->perf.min_prefill_lines = 0xffff; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h index 31a4ea3d302a7fed68853c063a07b095b9fccca8..2491022b3080b3dfa860a90ff5303303ad56d403 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h @@ -47,6 +47,7 @@ #define SDE_HW_VER_501 SDE_HW_VER(5, 0, 1) /* sm8150 v2.0 */ #define SDE_HW_VER_510 SDE_HW_VER(5, 1, 0) /* sdmshrike v1.0 */ #define SDE_HW_VER_530 SDE_HW_VER(5, 3, 0) /* sm6150 v1.0 */ +#define SDE_HW_VER_600 SDE_HW_VER(6, 0, 0) /* kona */ #define IS_MSM8996_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_170) #define IS_MSM8998_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_300) @@ -55,6 +56,7 @@ #define IS_SM8150_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_500) #define IS_SDMSHRIKE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_510) #define IS_SM6150_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_530) +#define IS_KONA_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_600) #define SDE_HW_BLK_NAME_LEN 16 diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c index 0ff08a35376c1961e1f1a035ee04fd9a61112be7..751a9632f2fc76ad0607bf36481b228d7c05ecc2 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c @@ -258,7 +258,8 @@ static void _setup_mixer_ops(struct sde_mdss_cfg *m, if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion) || IS_SM8150_TARGET(m->hwversion) || IS_SDMSHRIKE_TARGET(m->hwversion) || - IS_SM6150_TARGET(m->hwversion)) + IS_SM6150_TARGET(m->hwversion) || + IS_KONA_TARGET(m->hwversion)) ops->setup_blend_config = sde_hw_lm_setup_blend_config_sdm845; else ops->setup_blend_config = sde_hw_lm_setup_blend_config; diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 333f22ac52b0f721169ece0de32f806312969422..89622b70050d2762e1a048fee41abfffa5b08e30 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -725,7 +725,7 @@ static int sde_kms_prepare_secure_transition(struct msm_kms *kms, int i, ops = 0, ret = 0; bool old_valid_fb = false; - for_each_crtc_in_state(state, crtc, old_crtc_state, i) { + for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { if (!crtc->state || !crtc->state->active) continue; /* @@ -740,7 +740,7 @@ static int sde_kms_prepare_secure_transition(struct msm_kms *kms, * 1. Check if old state on the CRTC has planes * staged with valid fbs */ - for_each_plane_in_state(state, plane, plane_state, i) { + for_each_old_plane_in_state(state, plane, plane_state, i) { if (!plane_state->crtc) continue; if (plane_state->fb) { @@ -999,7 +999,7 @@ static void sde_kms_prepare_commit(struct msm_kms *kms, sde_kms->first_kickoff = false; } - for_each_crtc_in_state(state, crtc, crtc_state, i) { + for_each_old_crtc_in_state(state, crtc, crtc_state, i) { list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) @@ -1037,7 +1037,7 @@ static void sde_kms_commit(struct msm_kms *kms, } SDE_ATRACE_BEGIN("sde_kms_commit"); - for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { + for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { if (crtc->state->active) { SDE_EVT32(DRMID(crtc)); sde_crtc_commit_kickoff(crtc, old_crtc_state); @@ -1153,7 +1153,7 @@ static void sde_kms_complete_commit(struct msm_kms *kms, SDE_ATRACE_BEGIN("sde_kms_complete_commit"); - for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { + for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { sde_crtc_complete_commit(crtc, old_crtc_state); /* complete secure transitions if any */ @@ -1161,7 +1161,8 @@ static void sde_kms_complete_commit(struct msm_kms *kms, _sde_kms_secure_ctrl(sde_kms, crtc, true); } - for_each_connector_in_state(old_state, connector, old_conn_state, i) { + for_each_old_connector_in_state(old_state, connector, + old_conn_state, i) { struct sde_connector *c_conn; c_conn = to_sde_connector(connector); @@ -1176,7 +1177,7 @@ static void sde_kms_complete_commit(struct msm_kms *kms, sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false); - for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) + for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) _sde_kms_release_splash_resource(sde_kms, crtc); SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT); @@ -1253,7 +1254,7 @@ static void sde_kms_prepare_fence(struct msm_kms *kms, } /* old_state actually contains updated crtc pointers */ - for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { + for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { if (crtc->state->active) sde_crtc_prepare_commit(crtc, old_crtc_state); } @@ -2059,8 +2060,6 @@ static int _sde_kms_remove_fbs(struct sde_kms *sde_kms, struct drm_file *file, } end: - drm_atomic_clean_old_fb(dev, plane_mask, ret); - return ret; } @@ -2252,7 +2251,7 @@ static int sde_kms_check_secure_transition(struct msm_kms *kms, dev = sde_kms->dev; /* iterate state object for active secure/non-secure crtc */ - for_each_crtc_in_state(state, crtc, crtc_state, i) { + for_each_old_crtc_in_state(state, crtc, crtc_state, i) { if (!crtc_state->active) continue; diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index 97298994a82b54b3727a2184d315ab434aecf154..6a095c9ca0460e1a8b86b1c850afc4144d2fd8c8 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -2192,7 +2192,7 @@ static void _sde_plane_rot_get_fb(struct drm_plane *plane, SDE_DEBUG("cleared fb_id\n"); rstate->out_fb = NULL; } else if (!rstate->out_fb) { - fb = drm_framebuffer_lookup(plane->dev, fb_id); + fb = drm_framebuffer_lookup(plane->dev, NULL, fb_id); if (fb) { SDE_DEBUG("plane%d.%d get fb:%d\n", plane->base.id, rstate->sequence_id, fb_id); @@ -4881,7 +4881,7 @@ static void sde_plane_destroy(struct drm_plane *plane) msm_property_destroy(&psde->property_info); mutex_destroy(&psde->lock); - drm_plane_helper_disable(plane); + drm_plane_helper_disable(plane, NULL); /* this will destroy the states as well */ drm_plane_cleanup(plane); @@ -5358,7 +5358,7 @@ struct drm_plane *sde_plane_init(struct drm_device *dev, psde->pipe = pipe; psde->is_virtual = (master_plane_id != 0); INIT_LIST_HEAD(&psde->mplane_list); - master_plane = drm_plane_find(dev, master_plane_id); + master_plane = drm_plane_find(dev, NULL, master_plane_id); if (master_plane) { struct sde_plane *mpsde = to_sde_plane(master_plane); diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h index 53d72809208e958ab5234becb2e9a0279a427117..406289e8d56bd397d51faec4fac36172477ab2df 100644 --- a/drivers/gpu/drm/msm/sde/sde_trace.h +++ b/drivers/gpu/drm/msm/sde/sde_trace.h @@ -173,7 +173,7 @@ TRACE_EVENT(sde_trace_counter, #define SDE_TRACE_EVTLOG_SIZE 15 TRACE_EVENT(sde_evtlog, - TP_PROTO(const char *tag, u32 tag_id, u32 cnt, u32 data[]), + TP_PROTO(const char *tag, u32 tag_id, u32 cnt, u32 *data), TP_ARGS(tag, tag_id, cnt, data), TP_STRUCT__entry( __field(int, pid) diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c index 7d2fde4893b847fdd022eec0112c0a49242dd43d..525252e0ffad61b728abb25b7da046240f0a5c14 100644 --- a/drivers/gpu/drm/msm/sde/sde_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_wb.c @@ -84,7 +84,7 @@ int sde_wb_connector_get_modes(struct drm_connector *connector, void *display) SDE_ERROR("failed to create mode\n"); break; } - ret = drm_mode_convert_umode(mode, + ret = drm_mode_convert_umode(wb_dev->drm_dev, mode, &wb_dev->modes[i]); if (ret) { SDE_ERROR("failed to convert mode %d\n", ret); @@ -193,8 +193,8 @@ int sde_wb_connector_set_modes(struct sde_wb_device *wb_dev, struct drm_display_mode dispmode; memset(&dispmode, 0, sizeof(dispmode)); - ret = drm_mode_convert_umode(&dispmode, - &modeinfo[i]); + ret = drm_mode_convert_umode(wb_dev->drm_dev, + &dispmode, &modeinfo[i]); if (ret) { SDE_ERROR( "failed to convert mode %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x status:%d rc:%d\n", @@ -537,7 +537,7 @@ int sde_wb_config(struct drm_device *drm_dev, void *data, priv = drm_dev->dev_private; - connector = drm_connector_lookup(drm_dev, connector_id); + connector = drm_connector_lookup(drm_dev, file_priv, connector_id); if (!connector) { SDE_ERROR("failed to find connector\n"); rc = -ENOENT; diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c index e3e99d9c872e3b5ad6c1f0fb5f273e6a73d94ad3..a924de915322483d14c1ab38b5ff6f4055b7b766 100644 --- a/drivers/gpu/drm/msm/sde_dbg.c +++ b/drivers/gpu/drm/msm/sde_dbg.c @@ -5075,7 +5075,8 @@ void sde_dbg_init_dbg_buses(u32 hwversion) ARRAY_SIZE(vbif_dbg_bus_msm8998); dbg->dbgbus_dsi.entries = dsi_dbg_bus_sdm845; dbg->dbgbus_dsi.size = ARRAY_SIZE(dsi_dbg_bus_sdm845); - } else if (IS_SM8150_TARGET(hwversion) || IS_SM6150_TARGET(hwversion)) { + } else if (IS_SM8150_TARGET(hwversion) || IS_SM6150_TARGET(hwversion) || + IS_KONA_TARGET(hwversion)) { dbg->dbgbus_sde.entries = dbg_bus_sde_sm8150; dbg->dbgbus_sde.cmn.entries_size = ARRAY_SIZE(dbg_bus_sde_sm8150); diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index 41322ab658027f1d7aa0c571d01ea34444319570..7263e168d288bd1755078b1065255a9b8b132743 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -96,3 +96,4 @@ obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/ obj-y += meson/ obj-y += cros-ec-cec/ +obj-y += msm/ diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c index c46422ab6ddf8fb3625b864f7485cc348976840f..a5176d97b5218418e913416b8bffa040baa62e39 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c @@ -1371,7 +1371,7 @@ void *sde_rotator_inline_open(struct platform_device *pdev) goto rotator_open_error; } - ctx->slice = llcc_slice_getd(rot_dev->dev, "rotator"); + ctx->slice = llcc_slice_getd(LLCC_ROTATOR); if (IS_ERR(ctx->slice)) { rc = PTR_ERR(ctx->slice); SDEROT_ERR("failed to get system cache %d\n", rc); diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h index 39c36ffdf4f004dc2b85e867a0f0b50aad671c0a..81188e4659814d572d2da4ad5de8d2f4f70e2e39 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h @@ -96,6 +96,8 @@ struct sde_rotator_inline_cmd { void *priv_handle; }; +#if defined(CONFIG_MSM_SDE_ROTATOR) + void *sde_rotator_inline_open(struct platform_device *pdev); int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev, u32 src_pixfmt, u32 *dst_pixfmt); @@ -109,4 +111,51 @@ int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd, int sde_rotator_inline_release(void *handle); void sde_rotator_inline_reg_dump(struct platform_device *pdev); +#else + +void *sde_rotator_inline_open(struct platform_device *pdev) +{ + return NULL; +} + +int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev, + u32 src_pixfmt, u32 *dst_pixfmt) +{ + return 0; +} + +int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev, + char *downscale_caps, int len) +{ + return 0; +} + +int sde_rotator_inline_get_maxlinewidth(struct platform_device *pdev) +{ + return 0; +} + +int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev, + bool input, u32 *pixfmt, int len) +{ + return 0; +} + +int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd, + enum sde_rotator_inline_cmd_type cmd_type) +{ + return 0; +} + +int sde_rotator_inline_release(void *handle) +{ + return 0; +} + +void sde_rotator_inline_reg_dump(struct platform_device *pdev) +{ +} + +#endif + #endif /* __SDE_ROTATOR_INLINE_H__ */ diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig index d4c2e424a700cad439acfbf82ab2228a6755c9cd..c0db71f6eea9f17a499e276071b4455aa3f712c7 100644 --- a/drivers/platform/Kconfig +++ b/drivers/platform/Kconfig @@ -10,3 +10,5 @@ source "drivers/platform/goldfish/Kconfig" source "drivers/platform/chrome/Kconfig" source "drivers/platform/mellanox/Kconfig" + +source "drivers/platform/msm/Kconfig" diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index 4b2ce58bcd9c62d29810222391073579d51de74b..0fe9f8d1b645996bb30d245e2b93a1016c2829c0 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_MIPS) += mips/ obj-$(CONFIG_OLPC) += olpc/ obj-$(CONFIG_GOLDFISH) += goldfish/ obj-$(CONFIG_CHROME_PLATFORMS) += chrome/ +obj-$(CONFIG_ARCH_QCOM) += msm/ diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 97ea41dc678fe64e8bcb41928677028faf89a639..cdc6799f2456fe9ced545cb50671357e7deda7ec 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -1007,6 +1007,51 @@ struct drm_connector { /** @bad_edid_counter: track sinks that give us an EDID with invalid checksum */ unsigned bad_edid_counter; + /* + * @pt_scan_info: PT scan info obtained from the VCDB of EDID + * @it_scan_info: IT scan info obtained from the VCDB of EDID + * @ce_scan_info: CE scan info obtained from the VCDB of EDID + * @hdr_eotf: Electro optical transfer function obtained from HDR block + * @hdr_metadata_type_one: Metadata type one obtained from HDR block + * @hdr_max_luminance: desired max luminance obtained from HDR block + * @hdr_avg_luminance: desired avg luminance obtained from HDR block + * @hdr_min_luminance: desired min luminance obtained from HDR block + * @hdr_supported: does the sink support HDR content + * @max_tmds_char: indicates the maximum TMDS Character Rate supported + * @scdc_present: when set the sink supports SCDC functionality + * @rr_capable: when set the sink is capable of initiating an + * SCDC read request + * @supports_scramble: when set the sink supports less than + * 340Mcsc scrambling + * @flags_3d: 3D view(s) supported by the sink, see drm_edid.h + * DRM_EDID_3D_*) + */ + u8 pt_scan_info; + u8 it_scan_info; + u8 ce_scan_info; + u32 hdr_eotf; + bool hdr_metadata_type_one; + u32 hdr_max_luminance; + u32 hdr_avg_luminance; + u32 hdr_min_luminance; + bool hdr_supported; + + /* EDID bits HDMI 2.0 + * @max_tmds_char: indicates the maximum TMDS Character Rate supported + * @scdc_present: when set the sink supports SCDC functionality + * @rr_capable: when set the sink is capable of initiating an + * SCDC read request + * @supports_scramble: when set the sink supports less than + * 340Mcsc scrambling + * @flags_3d: 3D view(s) supported by the sink, see drm_edid.h + * (DRM_EDID_3D_*) + */ + int max_tmds_char; /* in Mcsc */ + bool scdc_present; + bool rr_capable; + bool supports_scramble; + int flags_3d; + /** * @edid_corrupt: Indicates whether the last read EDID was corrupt. Used * in Displayport compliance testing - Displayport Link CTS Core 1.2 diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 05cc31b5db161070734a65aacd7d45fad5b43884..decd918bb0cf6d4b7b719e28ddf13de9a31a558e 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -550,6 +550,8 @@ # define DP_TEST_LINK_EDID_READ (1 << 2) # define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */ # define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */ +# define DP_TEST_LINK_AUDIO_PATTERN (1 << 5) +# define DP_TEST_LINK_AUDIO_DISABLED_VIDEO (1 << 6) #define DP_TEST_LINK_RATE 0x219 # define DP_LINK_RATE_162 (0x6) @@ -627,6 +629,14 @@ # define DP_TEST_COUNT_MASK 0xf #define DP_TEST_PHY_PATTERN 0x248 +# define DP_TEST_PHY_PATTERN_NONE 0x0 +# define DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING 0x1 +# define DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT 0x2 +# define DP_TEST_PHY_PATTERN_PRBS7 0x3 +# define DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN 0x4 +# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_1 0x5 +# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_2 0x6 +# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_3 0x7 #define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250 #define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251 #define DP_TEST_80BIT_CUSTOM_PATTERN_23_16 0x252 @@ -648,6 +658,19 @@ #define DP_TEST_SINK 0x270 # define DP_TEST_SINK_START (1 << 0) +#define DP_TEST_AUDIO_MODE 0x271 + +#define DP_TEST_AUDIO_PATTERN_TYPE 0x272 + +#define DP_TEST_AUDIO_PERIOD_CH1 0x273 +#define DP_TEST_AUDIO_PERIOD_CH2 0x274 +#define DP_TEST_AUDIO_PERIOD_CH3 0x275 +#define DP_TEST_AUDIO_PERIOD_CH4 0x276 +#define DP_TEST_AUDIO_PERIOD_CH5 0x277 +#define DP_TEST_AUDIO_PERIOD_CH6 0x278 +#define DP_TEST_AUDIO_PERIOD_CH7 0x279 +#define DP_TEST_AUDIO_PERIOD_CH8 0x27A + #define DP_FEC_STATUS 0x280 /* 1.4 */ # define DP_FEC_DECODE_EN_DETECTED (1 << 0) # define DP_FEC_DECODE_DIS_DETECTED (1 << 1) diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index b25d12ef120a10d9cada8072a019d38d5aa17075..fe094c7209869dd1c5f8f05e554a85ac3408b846 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -279,6 +279,11 @@ struct detailed_timing { #define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) +/* HDMI 2.0 */ +#define DRM_EDID_3D_INDEPENDENT_VIEW (1 << 2) +#define DRM_EDID_3D_DUAL_VIEW (1 << 1) +#define DRM_EDID_3D_OSD_DISPARITY (1 << 0) + struct edid { u8 header[8]; /* Vendor & product info */ diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 4fef19064b0f12cad6b70614112493fd54e51729..328f232f33eef1cb5bd4c8c68c5f1fe9f8aa5bc9 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -21,12 +21,18 @@ struct mipi_dsi_device; #define MIPI_DSI_MSG_REQ_ACK BIT(0) /* use Low Power Mode to transmit message */ #define MIPI_DSI_MSG_USE_LPM BIT(1) +/* read mipi_dsi_msg.ctrl and unicast to only that ctrls */ +#define MIPI_DSI_MSG_UNICAST BIT(2) +/* Stack all commands until lastcommand bit and trigger all in one go */ +#define MIPI_DSI_MSG_LASTCOMMAND BIT(3) /** * struct mipi_dsi_msg - read/write DSI buffer * @channel: virtual channel id * @type: payload data type * @flags: flags controlling this message transmission + * @ctrl: ctrl index to transmit on + * @wait_ms: duration in ms to wait after message transmission * @tx_len: length of @tx_buf * @tx_buf: data to be written * @rx_len: length of @rx_buf @@ -36,6 +42,8 @@ struct mipi_dsi_msg { u8 channel; u8 type; u16 flags; + u32 ctrl; + u32 wait_ms; size_t tx_len; const void *tx_buf; @@ -134,6 +142,10 @@ struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node); #define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10) /* transmit data in low power */ #define MIPI_DSI_MODE_LPM BIT(11) +/* disable BLLP area */ +#define MIPI_DSI_MODE_VIDEO_BLLP BIT(12) +/* disable EOF BLLP area */ +#define MIPI_DSI_MODE_VIDEO_EOF_BLLP BIT(13) enum mipi_dsi_pixel_format { MIPI_DSI_FMT_RGB888, diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h index c34a3e8030e12c1cd2957c55780a11004edf967d..6292fa663844463d9d9e89044dad0e7dc198e14c 100644 --- a/include/drm/drm_mode_object.h +++ b/include/drm/drm_mode_object.h @@ -60,7 +60,7 @@ struct drm_mode_object { void (*free_cb)(struct kref *kref); }; -#define DRM_OBJECT_MAX_PROPERTY 24 +#define DRM_OBJECT_MAX_PROPERTY 64 /** * struct drm_object_properties - property tracking for &drm_mode_object */ diff --git a/include/linux/msm_drm_notify.h b/include/linux/msm_drm_notify.h new file mode 100644 index 0000000000000000000000000000000000000000..3db08cf28647d1d08e90fae06870f1c2026228a8 --- /dev/null +++ b/include/linux/msm_drm_notify.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ +#ifndef _MSM_DRM_NOTIFY_H_ +#define _MSM_DRM_NOTIFY_H_ + +#include + +/* A hardware display blank change occurred */ +#define MSM_DRM_EVENT_BLANK 0x01 +/* A hardware display blank early change occurred */ +#define MSM_DRM_EARLY_EVENT_BLANK 0x02 + +enum { + /* panel: power on */ + MSM_DRM_BLANK_UNBLANK, + /* panel: power off */ + MSM_DRM_BLANK_POWERDOWN, +}; + +enum msm_drm_display_id { + /* primary display */ + MSM_DRM_PRIMARY_DISPLAY, + /* external display */ + MSM_DRM_EXTERNAL_DISPLAY, + MSM_DRM_DISPLAY_MAX +}; + +struct msm_drm_notifier { + enum msm_drm_display_id id; + void *data; +}; + +int msm_drm_register_client(struct notifier_block *nb); +int msm_drm_unregister_client(struct notifier_block *nb); +#endif diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h index e1dc0cd35e22f5764fcb4ab12153757d367d3bda..a409da14e78a1d729dd0b0b2ff38b9dcc444c525 100644 --- a/include/linux/sde_rsc.h +++ b/include/linux/sde_rsc.h @@ -277,13 +277,13 @@ static inline int sde_rsc_client_state_update(struct sde_rsc_client *client, return 0; } -int sde_rsc_client_get_vsync_refcount( +static inline int sde_rsc_client_get_vsync_refcount( struct sde_rsc_client *caller_client) { return 0; } -int sde_rsc_client_reset_vsync_refcount( +static inline int sde_rsc_client_reset_vsync_refcount( struct sde_rsc_client *caller_client) { return 0; diff --git a/include/soc/qcom/cx_ipeak.h b/include/soc/qcom/cx_ipeak.h new file mode 100644 index 0000000000000000000000000000000000000000..8ebc1138d419b1b3c19407c6366d88174209c11f --- /dev/null +++ b/include/soc/qcom/cx_ipeak.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef __SOC_COM_CX_IPEAK_H +#define __SOC_COM_CX_IPEAK_H + +struct device_node; +struct cx_ipeak_client; + +#ifndef CONFIG_QCOM_CX_IPEAK + +static inline struct cx_ipeak_client *cx_ipeak_register( + struct device_node *dev_node, + const char *client_name) +{ + return NULL; +} + +static inline void cx_ipeak_unregister(struct cx_ipeak_client *client) +{ +} + +static inline int cx_ipeak_update(struct cx_ipeak_client *ipeak_client, + bool vote) +{ + return 0; +} +#else + +struct cx_ipeak_client *cx_ipeak_register(struct device_node *dev_node, + const char *client_name); +void cx_ipeak_unregister(struct cx_ipeak_client *client); +int cx_ipeak_update(struct cx_ipeak_client *ipeak_client, bool vote); + +#endif + +#endif /*__SOC_COM_CX_IPEAK_H*/