diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c index ea87d1f7ed61d324f2010a2927cb41ad198ce9fc..0be6cd4c4acdf0b7d7431aa61a993323ea36fa87 100644 --- a/drivers/input/touchscreen/st/fts.c +++ b/drivers/input/touchscreen/st/fts.c @@ -134,7 +134,9 @@ static int fts_command(struct fts_ts_info *info, unsigned char cmd); static int fts_chip_initialization(struct fts_ts_info *info); static int fts_enable_reg(struct fts_ts_info *info, bool enable); +#if !defined(CONFIG_FB_MSM) static struct drm_panel *active_panel; +#endif void touch_callback(unsigned int status) { @@ -4167,7 +4169,7 @@ static int fts_fb_state_chg_callback(struct notifier_block *nb, struct fb_event *evdata = data; unsigned int blank; - if (!evdata || (evdata->id != 0)) + if (!evdata) return 0; if (val != FB_EVENT_BLANK) @@ -4556,6 +4558,7 @@ static int parse_dt(struct device *dev, static int check_dt(struct device_node *np) { +#if !defined(CONFIG_FB_MSM) int i; int count; struct device_node *node; @@ -4575,6 +4578,7 @@ static int check_dt(struct device_node *np) } } +#endif return -ENODEV; } diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index f99558d006bf43501efb0d00213d5841951ff660..d70a0b35faab75603fa7d86571e867eb67d20304 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -2222,6 +2222,19 @@ config FB_PRE_INIT_FB Select this option if display contents should be inherited as set by the bootloader. +config FB_MSM + tristate "MSM Framebuffer support" + depends on FB && ARCH_QCOM + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + select SYNC_FILE + help + The MSM driver implements a frame buffer interface to + provide access to the display hardware and provide + a way for users to display graphics + on connected display panels. + config FB_MX3 tristate "MX3 Framebuffer support" depends on FB && MX3_IPU @@ -2339,3 +2352,4 @@ config FB_SM712 source "drivers/video/fbdev/omap/Kconfig" source "drivers/video/fbdev/omap2/Kconfig" source "drivers/video/fbdev/mmp/Kconfig" +source "drivers/video/fbdev/msm/Kconfig" diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index 13c900320c2cc375593d02bd0aa99381d03a6f81..850f441caa658e447c624b395f041bec576a3d54 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile @@ -125,6 +125,11 @@ obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o obj-$(CONFIG_FB_HYPERV) += hyperv_fb.o obj-$(CONFIG_FB_OPENCORES) += ocfb.o obj-$(CONFIG_FB_SM712) += sm712fb.o +ifeq ($(CONFIG_FB_MSM),y) +obj-y += msm/ +else +obj-$(CONFIG_MSM_DBA) += msm/msm_dba/ +endif # Platform or fallback drivers go here obj-$(CONFIG_FB_UVESA) += uvesafb.o diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 852731844c830dde3a333c31098d8f47f7a35b54..4000f227ad3cc8cfbba0e5ae3b9d92c734c48c32 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1082,7 +1082,7 @@ fb_blank(struct fb_info *info, int blank) EXPORT_SYMBOL(fb_blank); static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, - unsigned long arg) + unsigned long arg, struct file *file) { struct fb_ops *fb; struct fb_var_screeninfo var; @@ -1182,9 +1182,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, case FBIOPUT_CON2FBMAP: if (copy_from_user(&con2fb, argp, sizeof(con2fb))) return -EFAULT; - if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES) - return -EINVAL; - if (con2fb.framebuffer >= FB_MAX) + if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES || + (con2fb.framebuffer >= FB_MAX)) return -EINVAL; if (!registered_fb[con2fb.framebuffer]) request_module("fb%d", con2fb.framebuffer); @@ -1219,7 +1218,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, if (!lock_fb_info(info)) return -ENODEV; fb = info->fbops; - if (fb->fb_ioctl) + if (fb->fb_ioctl_v2) + ret = fb->fb_ioctl_v2(info, cmd, arg, file); + else if (fb->fb_ioctl) ret = fb->fb_ioctl(info, cmd, arg); else ret = -ENOTTY; @@ -1234,7 +1235,7 @@ static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (!info) return -ENODEV; - return do_fb_ioctl(info, cmd, arg); + return do_fb_ioctl(info, cmd, arg, file); } #ifdef CONFIG_COMPAT @@ -1265,7 +1266,7 @@ struct fb_cmap32 { }; static int fb_getput_cmap(struct fb_info *info, unsigned int cmd, - unsigned long arg) + unsigned long arg, struct file *file) { struct fb_cmap_user __user *cmap; struct fb_cmap32 __user *cmap32; @@ -1288,7 +1289,7 @@ static int fb_getput_cmap(struct fb_info *info, unsigned int cmd, put_user(compat_ptr(data), &cmap->transp)) return -EFAULT; - err = do_fb_ioctl(info, cmd, (unsigned long) cmap); + err = do_fb_ioctl(info, cmd, (unsigned long) cmap, file); if (!err) { if (copy_in_user(&cmap32->start, @@ -1333,7 +1334,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix, } static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd, - unsigned long arg) + unsigned long arg, struct file *file) { struct fb_fix_screeninfo fix; @@ -1363,20 +1364,22 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd, arg = (unsigned long) compat_ptr(arg); /* fall through */ case FBIOBLANK: - ret = do_fb_ioctl(info, cmd, arg); + ret = do_fb_ioctl(info, cmd, arg, file); break; case FBIOGET_FSCREENINFO: - ret = fb_get_fscreeninfo(info, cmd, arg); + ret = fb_get_fscreeninfo(info, cmd, arg, file); break; case FBIOGETCMAP: case FBIOPUTCMAP: - ret = fb_getput_cmap(info, cmd, arg); + ret = fb_getput_cmap(info, cmd, arg, file); break; default: - if (fb->fb_compat_ioctl) + if (fb->fb_compat_ioctl_v2) + ret = fb->fb_compat_ioctl_v2(info, cmd, arg, file); + else if (fb->fb_compat_ioctl) ret = fb->fb_compat_ioctl(info, cmd, arg); break; } diff --git a/drivers/video/fbdev/msm/Kconfig b/drivers/video/fbdev/msm/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..39819d201ae1cc08d76444cd81c5e04ed2d6eeaa --- /dev/null +++ b/drivers/video/fbdev/msm/Kconfig @@ -0,0 +1,173 @@ +# SPDX-License-Identifier: GPL-2.0-only +source "drivers/video/fbdev/msm/msm_dba/Kconfig" + +if FB_MSM + +config FB_MSM_MDSS_COMMON + bool + +choice + prompt "MDP HW version" + default FB_MSM_MDP + help + The Mobile Display Processor (MDP) hardware version used in + the chipset. The hardware version varies across multiple + chipsets. + + Support for MDP HW. + +config FB_MSM_MDP + bool "MDP HW" + select FB_MSM_MDP_HW + help + The Mobile Display Processor (MDP) driver support devices which + contain MDP hardware block. + + Support for MSM MDP HW revision 2.2. + Say Y here if this is msm7201 variant platform. + +config FB_MSM_MDSS + bool "MDSS HW" + select SYNC_FILE + select FB_MSM_MDSS_COMMON + help + The Mobile Display Sub System (MDSS) driver supports devices which + contain MDSS hardware block. + + The MDSS driver implements frame buffer interface to provide access to + the display hardware and provide a way for users to display graphics + on connected display panels. + +config FB_MSM_MDP_NONE + bool "MDP HW None" + help + This is used for platforms without Mobile Display Sub System (MDSS). + mdm platform don't have MDSS hardware block. + + Say Y here if this is mdm platform. + +endchoice + +config FB_MSM_QPIC + bool + select FB_MSM_MDSS_COMMON + +config FB_MSM_QPIC_ILI_QVGA_PANEL + bool "Qpic MIPI ILI QVGA Panel" + select FB_MSM_QPIC + ---help--- + Support for MIPI ILI QVGA (240x320) panel ILI TECHNOLOGY 9341 + with on-chip full display RAM use parallel interface. + +config FB_MSM_QPIC_PANEL_DETECT + bool "Qpic Panel Detect" + select FB_MSM_QPIC_ILI_QVGA_PANEL + ---help--- + Support for Qpic panel auto detect. + +config FB_MSM_MDSS_WRITEBACK + bool "MDSS Writeback Panel" + ---help--- + The MDSS Writeback Panel provides support for routing the output of + MDSS frame buffer driver and MDP processing to memory. + +config FB_MSM_MDSS_HDMI_PANEL + bool "MDSS HDMI Tx Panel" + depends on FB_MSM_MDSS + select MSM_EXT_DISPLAY + default n + ---help--- + The MDSS HDMI Panel provides support for transmitting TMDS signals of + MDSS frame buffer data to connected hdmi compliant TVs, monitors etc. + +config FB_MSM_MDSS_HDMI_MHL_SII8334 + depends on FB_MSM_MDSS_HDMI_PANEL + bool 'MHL SII8334 support ' + default n + ---help--- + Support the HDMI to MHL conversion. + MHL (Mobile High-Definition Link) technology + uses USB connector to output HDMI content + +config FB_MSM_MDSS_SPI_PANEL + depends on SPI_QUP + bool "Support SPI panel feature" + ---help--- + The MDSS SPI Panel provides support for transmittimg SPI signals of + MDSS frame buffer data to connected panel. Limited by SPI clock rate, + the current max fps only reach to ~30 fps with 240x240 resolution, and + limited by MDP hardware architecture only supply GPU compostition. + +config FB_MSM_MDSS_RGB_PANEL + depends on FB_MSM_MDSS + bool "Support RGB panel feature" + ---help--- + The MDSS RGB Panel provides support for transmitting + MDSS frame buffer data over RGB parallel interface + connected to RGB panel. Panel on/off sequence commands + are sent to RGB Panel through an SPI interface. + +config FB_MSM_MDSS_MHL3 + depends on FB_MSM_MDSS_HDMI_PANEL + bool "MHL3 SII8620 Support" + default n + ---help--- + Support the SiliconImage 8620 MHL Tx transmitter that uses + USB connector to output HDMI content. Transmitter is an + i2c device acting as an HDMI to MHL bridge. Chip supports + MHL 3.0 standard. + +config FB_MSM_MDSS_DSI_CTRL_STATUS + tristate "DSI controller status check feature" + ---help--- + Check DSI controller status periodically (default period is 5 + seconds) by sending Bus-Turn-Around (BTA) command. If DSI controller + fails to acknowledge the BTA command, it sends PANEL_ALIVE=0 status + to HAL layer to reset the controller. + +config FB_MSM_MDSS_DP_PANEL + depends on FB_MSM_MDSS + select MSM_EXT_DISPLAY + bool "MDSS DP Panel" + ---help--- + The MDSS DP Panel provides support for DP host controller driver + which runs in Video mode only and is responsible for transmitting + frame buffer from host SOC to DP display panel. + + support for DP display panel. + +config FB_MSM_MDSS_EDP_PANEL + depends on FB_MSM_MDSS + bool "MDSS eDP Panel" + ---help--- + The MDSS eDP Panel provides support for eDP host controller driver. + Which runs in Video mode only and is responsible for transmitting + frame buffer from host SOC to eDP display panel. + +config FB_MSM_MDSS_MDP3 + depends on FB_MSM_MDSS + bool "MDP3 display controller" + ---help--- + The MDP3 provides support for an older version display controller. + Included in latest display sub-system, known as MDSS. + +config FB_MSM_MDSS_XLOG_DEBUG + depends on FB_MSM_MDSS + bool "Enable MDSS debugging" + ---help--- + The MDSS debugging provides support to enable display debugging + features to: Dump MDSS registers during driver errors, panic + driver during fatal errors and enable some display-driver logging + into an internal buffer (this avoids logging overhead). + +config FB_MSM_MDSS_FRC_DEBUG + depends on DEBUG_FS && FB_MSM_MDSS + bool "Enable Video FRC debugging" + default n + ---help--- + The MDSS FRC debugging provides support to enable the deterministic + frame rate control (FRC) debugging features to: Collect video frame + statistics and check whether its output pattern matches expected + cadence. + +endif diff --git a/drivers/video/fbdev/msm/Makefile b/drivers/video/fbdev/msm/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..68c2a57d09e51b3f1926123f589e3285f2ac7182 --- /dev/null +++ b/drivers/video/fbdev/msm/Makefile @@ -0,0 +1,71 @@ +# SPDX-License-Identifier: GPL-2.0-only +ccflags-y += -I$(src) + +obj-$(CONFIG_FB_MSM_MDSS_MHL3) += mhl3/ +obj-$(CONFIG_MSM_DBA) += msm_dba/ + +ifeq ($(CONFIG_FB_MSM_MDSS_MDP3), y) +ccflags-y += -DTARGET_HW_MDSS_MDP3 +endif +mdss-mdp-objs := mdss_mdp.o mdss_mdp_ctl.o mdss_mdp_pipe.o mdss_mdp_util.o dsi_status_6g.o +mdss-mdp-objs += mdss_mdp_pp.o mdss_mdp_pp_debug.o mdss_mdp_pp_cache_config.o mdss_sync.o +mdss-mdp-objs += mdss_mdp_intf_video.o +mdss-mdp-objs += mdss_mdp_intf_cmd.o +mdss-mdp-objs += mdss_mdp_intf_writeback.o +mdss-mdp-objs += mdss_mdp_overlay.o +mdss-mdp-objs += mdss_mdp_layer.o +mdss-mdp-objs += mdss_mdp_splash_logo.o +mdss-mdp-objs += mdss_mdp_cdm.o +mdss-mdp-objs += mdss_smmu.o +mdss-mdp-objs += mdss_mdp_wfd.o +mdss-mdp-objs += mdss_io_util.o +obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o +obj-$(CONFIG_FB_MSM_MDSS) += mdss_mdp_debug.o + +mdss-mdp-objs += mdss_mdp_pp_v1_7.o +mdss-mdp-objs += mdss_mdp_pp_v3.o +mdss-mdp-objs += mdss_mdp_pp_common.o +mdss-mdp-objs += mdss_mdp_pp_stub.o + +ifeq ($(CONFIG_FB_MSM_MDSS),y) +obj-$(CONFIG_DEBUG_FS) += mdss_debug.o mdss_debug_xlog.o +endif + +ifeq ($(CONFIG_FB_MSM_MDSS_FRC_DEBUG),y) +obj-$(CONFIG_DEBUG_FS) += mdss_debug_frc.o +endif + +mdss-dsi-objs := mdss_dsi.o mdss_dsi_host.o mdss_dsi_cmd.o mdss_dsi_status.o +mdss-dsi-objs += mdss_dsi_panel.o +mdss-dsi-objs += msm_mdss_io_8974.o +mdss-dsi-objs += mdss_dsi_phy.o +mdss-dsi-objs += mdss_dsi_phy_v3.o +mdss-dsi-objs += mdss_dsi_clk.o +obj-$(CONFIG_FB_MSM_MDSS) += mdss-dsi.o +obj-$(CONFIG_FB_MSM_MDSS) += mdss_panel.o + +ifneq ($(CONFIG_FB_MSM_MDSS_MDP3), y) +obj-$(CONFIG_FB_MSM_MDSS) += mdss_hdmi_util.o +obj-$(CONFIG_FB_MSM_MDSS) += mdss_hdmi_edid.o +obj-$(CONFIG_FB_MSM_MDSS) += mdss_cec_core.o +obj-$(CONFIG_FB_MSM_MDSS) += mdss_dba_utils.o + +obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_tx.o +obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_panel.o +obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdcp_1x.o +obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdcp_2x.o +obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp2p2.o +obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_cec.o +obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_audio.o +obj-$(CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334) += mhl_sii8334.o mhl_msc.o +ccflags-y += -DTARGET_HW_MDSS_HDMI +endif + +obj-$(CONFIG_FB_MSM_MDSS_DP_PANEL) += mdss_dp.o mdss_dp_util.o +obj-$(CONFIG_FB_MSM_MDSS_DP_PANEL) += mdss_dp_aux.o +obj-$(CONFIG_FB_MSM_MDSS_DP_PANEL) += mdss_dp_hdcp2p2.o + +obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o + +obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o mdss_util.o +obj-$(CONFIG_COMPAT) += mdss_compat_utils.o diff --git a/drivers/video/fbdev/msm/dsi_status_6g.c b/drivers/video/fbdev/msm/dsi_status_6g.c new file mode 100644 index 0000000000000000000000000000000000000000..a977520ef3d01ea8b0acec0c80a3ea6dea3b54d6 --- /dev/null +++ b/drivers/video/fbdev/msm/dsi_status_6g.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2013-2018, 2020, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include + +#include "mdss_dsi.h" +#include "mdss_mdp.h" +#include "mdss_debug.h" + +/* + * mdss_check_te_status() - Check the status of panel for TE based ESD. + * @ctrl_pdata : dsi controller data + * @pstatus_data : dsi status data + * @interval : duration in milliseconds to schedule work queue + * + * This function is called when the TE signal from the panel doesn't arrive + * after 'interval' milliseconds. If the TE IRQ is not ready, the workqueue + * gets re-scheduled. Otherwise, report the panel to be dead due to ESD attack. + */ +static bool mdss_check_te_status(struct mdss_dsi_ctrl_pdata *ctrl_pdata, + struct dsi_status_data *pstatus_data, uint32_t interval) +{ + bool ret; + + /* + * During resume, the panel status will be ON but due to race condition + * between ESD thread and display UNBLANK (or rather can be put as + * asynchronuous nature between these two threads), the ESD thread might + * reach this point before the TE IRQ line is enabled or before the + * first TE interrupt arrives after the TE IRQ line is enabled. For such + * cases, re-schedule the ESD thread. + */ + ret = !atomic_read(&ctrl_pdata->te_irq_ready); + if (ret) { + schedule_delayed_work(&pstatus_data->check_status, + msecs_to_jiffies(interval)); + pr_debug("%s: TE IRQ line not enabled yet\n", __func__); + } + + return ret; +} + +/* + * mdss_check_dsi_ctrl_status() - Check MDP5 DSI controller status periodically. + * @work : dsi controller status data + * @interval : duration in milliseconds to schedule work queue + * + * This function calls check_status API on DSI controller to send the BTA + * command. If DSI controller fails to acknowledge the BTA command, it sends + * the PANEL_ALIVE=0 status to HAL layer. + */ +void mdss_check_dsi_ctrl_status(struct work_struct *work, uint32_t interval) +{ + struct dsi_status_data *pstatus_data = NULL; + struct mdss_panel_data *pdata = NULL; + struct mipi_panel_info *mipi = NULL; + struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; + struct mdss_overlay_private *mdp5_data = NULL; + struct mdss_mdp_ctl *ctl = NULL; + int ret = 0; + + pstatus_data = container_of(to_delayed_work(work), + struct dsi_status_data, check_status); + if (!pstatus_data || !(pstatus_data->mfd)) { + pr_err("%s: mfd not available\n", __func__); + return; + } + + pdata = dev_get_platdata(&pstatus_data->mfd->pdev->dev); + if (!pdata) { + pr_err("%s: Panel data not available\n", __func__); + return; + } + mipi = &pdata->panel_info.mipi; + + ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, + panel_data); + if (!ctrl_pdata || (!ctrl_pdata->check_status && + (ctrl_pdata->status_mode != ESD_TE))) { + pr_err("%s: DSI ctrl or status_check callback not available\n", + __func__); + return; + } + + if (!pdata->panel_info.esd_rdy) { + pr_debug("%s: unblank not complete, reschedule check status\n", + __func__); + schedule_delayed_work(&pstatus_data->check_status, + msecs_to_jiffies(interval)); + return; + } + + mdp5_data = mfd_to_mdp5_data(pstatus_data->mfd); + ctl = mfd_to_ctl(pstatus_data->mfd); + + if (!ctl) { + pr_err("%s: Display is off\n", __func__); + return; + } + + if (ctrl_pdata->status_mode == ESD_TE) { + if (mdss_check_te_status(ctrl_pdata, pstatus_data, interval)) + return; + goto status_dead; + } + + + /* + * TODO: Because mdss_dsi_cmd_mdp_busy has made sure DMA to + * be idle in mdss_dsi_cmdlist_commit, it is not necessary + * to acquire ov_lock in case of video mode. Removing this + * lock to fix issues so that ESD thread would not block other + * overlay operations. Need refine this lock for command mode + * + * If Burst mode is enabled then we dont have to acquire ov_lock as + * command and data arbitration is possible in h/w + */ + + if ((mipi->mode == DSI_CMD_MODE) && !ctrl_pdata->burst_mode_enabled) + mutex_lock(&mdp5_data->ov_lock); + mutex_lock(&ctl->offlock); + + if (mdss_panel_is_power_off(pstatus_data->mfd->panel_power_state) || + pstatus_data->mfd->shutdown_pending) { + mutex_unlock(&ctl->offlock); + if ((mipi->mode == DSI_CMD_MODE) && + !ctrl_pdata->burst_mode_enabled) + mutex_unlock(&mdp5_data->ov_lock); + pr_err("%s: DSI turning off, avoiding panel status check\n", + __func__); + return; + } + + /* + * For the command mode panels, we return pan display + * IOCTL on vsync interrupt. So, after vsync interrupt comes + * and when DMA_P is in progress, if the panel stops responding + * and if we trigger BTA before DMA_P finishes, then the DSI + * FIFO will not be cleared since the DSI data bus control + * doesn't come back to the host after BTA. This may cause the + * display reset not to be proper. Hence, wait for DMA_P done + * for command mode panels before triggering BTA. + */ + if (ctl->ops.wait_pingpong && !ctrl_pdata->burst_mode_enabled) + ctl->ops.wait_pingpong(ctl, NULL); + + pr_debug("%s: DSI ctrl wait for ping pong done\n", __func__); + MDSS_XLOG(mipi->mode); + + mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON); + ret = ctrl_pdata->check_status(ctrl_pdata); + mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF); + + mutex_unlock(&ctl->offlock); + if ((mipi->mode == DSI_CMD_MODE) && !ctrl_pdata->burst_mode_enabled) + mutex_unlock(&mdp5_data->ov_lock); + + if (pstatus_data->mfd->panel_power_state == MDSS_PANEL_POWER_ON) { + if (ret > 0) + schedule_delayed_work(&pstatus_data->check_status, + msecs_to_jiffies(interval)); + else + goto status_dead; + } + + if (pdata->panel_info.panel_force_dead) { + pr_debug("force_dead=%d\n", pdata->panel_info.panel_force_dead); + pdata->panel_info.panel_force_dead--; + if (!pdata->panel_info.panel_force_dead) + goto status_dead; + } + + return; + +status_dead: + mdss_fb_report_panel_dead(pstatus_data->mfd); +} diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h new file mode 100644 index 0000000000000000000000000000000000000000..501fd88ab3f51b04da342361afcf76b9141b7045 --- /dev/null +++ b/drivers/video/fbdev/msm/mdss.h @@ -0,0 +1,646 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. */ + +#ifndef MDSS_H +#define MDSS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mdss_panel.h" + +#define MAX_DRV_SUP_MMB_BLKS 44 +#define MAX_DRV_SUP_PIPES 10 +#define MAX_CLIENT_NAME_LEN 20 + +#define MDSS_PINCTRL_STATE_DEFAULT "mdss_default" +#define MDSS_PINCTRL_STATE_SLEEP "mdss_sleep" + +enum mdss_mdp_clk_type { + MDSS_CLK_AHB, + MDSS_CLK_AXI, + MDSS_CLK_MDP_CORE, + MDSS_CLK_MDP_LUT, + MDSS_CLK_MDP_VSYNC, + MDSS_CLK_MNOC_AHB, + MDSS_CLK_THROTTLE_AXI, + MDSS_CLK_BIMC, + MDSS_MAX_CLK +}; + +enum mdss_iommu_domain_type { + MDSS_IOMMU_DOMAIN_UNSECURE, + MDSS_IOMMU_DOMAIN_ROT_UNSECURE, + MDSS_IOMMU_DOMAIN_SECURE, + MDSS_IOMMU_DOMAIN_ROT_SECURE, + MDSS_IOMMU_MAX_DOMAIN +}; + +enum mdss_bus_vote_type { + VOTE_INDEX_DISABLE, + VOTE_INDEX_LOW, + VOTE_INDEX_MID, + VOTE_INDEX_HIGH, + VOTE_INDEX_MAX, +}; + +struct mdss_hw_settings { + char __iomem *reg; + u32 val; +}; + +struct mdss_max_bw_settings { + u32 mdss_max_bw_mode; + u32 mdss_max_bw_val; +}; + +struct mdss_debug_inf { + void *debug_data; + void (*debug_enable_clock)(int on); +}; + +struct mdss_perf_tune { + unsigned long min_mdp_clk; + u64 min_bus_vote; +}; + +#define MDSS_IRQ_SUSPEND -1 +#define MDSS_IRQ_RESUME 1 +#define MDSS_IRQ_REQ 0 + +struct mdss_intr { + /* requested intr */ + u32 req; + /* currently enabled intr */ + u32 curr; + int state; + spinlock_t lock; +}; + +struct simplified_prefill_factors { + u32 fmt_mt_nv12_factor; + u32 fmt_mt_factor; + u32 fmt_linear_factor; + u32 scale_factor; + u32 xtra_ff_factor; +}; + +struct mdss_prefill_data { + u32 ot_bytes; + u32 y_buf_bytes; + u32 y_scaler_lines_bilinear; + u32 y_scaler_lines_caf; + u32 post_scaler_pixels; + u32 pp_pixels; + u32 fbc_lines; + u32 ts_threshold; + u32 ts_end; + u32 ts_overhead; + struct mult_factor ts_rate; + struct simplified_prefill_factors prefill_factors; +}; + +struct mdss_mdp_dsc { + u32 num; + char __iomem *base; +}; + +enum mdss_hw_index { + MDSS_HW_MDP, + MDSS_HW_DSI0 = 1, + MDSS_HW_DSI1, + MDSS_HW_HDMI, + MDSS_HW_EDP, + MDSS_HW_MISC, + MDSS_MAX_HW_BLK +}; + +enum mdss_bus_clients { + MDSS_MDP_RT, + MDSS_DSI_RT, + MDSS_HW_RT, + MDSS_MDP_NRT, + MDSS_MAX_BUS_CLIENTS +}; + +struct mdss_pp_block_off { + u32 sspp_igc_lut_off; + u32 vig_pcc_off; + u32 rgb_pcc_off; + u32 dma_pcc_off; + u32 lm_pgc_off; + u32 dspp_gamut_off; + u32 dspp_pcc_off; + u32 dspp_pgc_off; +}; + +enum mdss_hw_quirk { + MDSS_QUIRK_BWCPANIC, + MDSS_QUIRK_ROTCDP, + MDSS_QUIRK_DOWNSCALE_HANG, + MDSS_QUIRK_DSC_RIGHT_ONLY_PU, + MDSS_QUIRK_DSC_2SLICE_PU_THRPUT, + MDSS_QUIRK_DMA_BI_DIR, + MDSS_QUIRK_FMT_PACK_PATTERN, + MDSS_QUIRK_NEED_SECURE_MAP, + MDSS_QUIRK_SRC_SPLIT_ALWAYS, + MDSS_QUIRK_MMSS_GDSC_COLLAPSE, + MDSS_QUIRK_MDP_CLK_SET_RATE, + MDSS_QUIRK_HDR_SUPPORT_ENABLED, + MDSS_QUIRK_MAX, +}; + +enum mdss_hw_capabilities { + MDSS_CAPS_YUV_CONFIG, + MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED, + MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED, + MDSS_CAPS_MIXER_1_FOR_WB, + MDSS_CAPS_QSEED3, + MDSS_CAPS_DEST_SCALER, + MDSS_CAPS_10_BIT_SUPPORTED, + MDSS_CAPS_CWB_SUPPORTED, + MDSS_CAPS_MDP_VOTE_CLK_NOT_SUPPORTED, + MDSS_CAPS_AVR_SUPPORTED, + MDSS_CAPS_SEC_DETACH_SMMU, + MDSS_CAPS_MAX, +}; + +enum mdss_qos_settings { + MDSS_QOS_PER_PIPE_IB, + MDSS_QOS_OVERHEAD_FACTOR, + MDSS_QOS_CDP, + MDSS_QOS_OTLIM, + MDSS_QOS_PER_PIPE_LUT, + MDSS_QOS_SIMPLIFIED_PREFILL, + MDSS_QOS_VBLANK_PANIC_CTRL, + MDSS_QOS_TS_PREFILL, + MDSS_QOS_REMAPPER, + MDSS_QOS_IB_NOCR, + MDSS_QOS_WB2_WRITE_GATHER_EN, + MDSS_QOS_WB_QOS, + MDSS_QOS_MAX, +}; + +enum mdss_mdp_pipe_type { + MDSS_MDP_PIPE_TYPE_INVALID = -1, + MDSS_MDP_PIPE_TYPE_VIG = 0, + MDSS_MDP_PIPE_TYPE_RGB, + MDSS_MDP_PIPE_TYPE_DMA, + MDSS_MDP_PIPE_TYPE_CURSOR, + MDSS_MDP_PIPE_TYPE_MAX, +}; + +enum mdss_mdp_intf_index { + MDSS_MDP_NO_INTF, + MDSS_MDP_INTF0, + MDSS_MDP_INTF1, + MDSS_MDP_INTF2, + MDSS_MDP_INTF3, + MDSS_MDP_MAX_INTF +}; + +struct reg_bus_client { + char name[MAX_CLIENT_NAME_LEN]; + short usecase_ndx; + u32 id; + struct list_head list; +}; + +struct mdss_smmu_client { + struct mdss_smmu_intf base; + struct iommu_domain *domain; + struct dss_module_power mp; + struct reg_bus_client *reg_bus_clt; + bool domain_attached; + bool domain_reattach; + bool handoff_pending; + void __iomem *mmu_base; + struct list_head _client; +}; + +struct mdss_mdp_qseed3_lut_tbl { + bool valid; + u32 *dir_lut; + u32 *cir_lut; + u32 *sep_lut; +}; + +struct mdss_scaler_block { + u32 vig_scaler_off; + u32 vig_scaler_lut_off; + u32 has_dest_scaler; + char __iomem *dest_base; + u32 ndest_scalers; + u32 *dest_scaler_off; + u32 *dest_scaler_lut_off; + struct mdss_mdp_qseed3_lut_tbl lut_tbl; + + /* + * Lock is mainly to serialize access to LUT. + * LUT values come asynchronously from userspace + * via ioctl. + */ + struct mutex scaler_lock; +}; + +struct mdss_data_type; + +struct mdss_smmu_ops { + int (*smmu_attach)(struct mdss_data_type *mdata); + int (*smmu_detach)(struct mdss_data_type *mdata); + int (*smmu_get_domain_id)(u32 type); + struct dma_buf_attachment * (*smmu_dma_buf_attach)( + struct dma_buf *dma_buf, struct device *devce, + int domain); + int (*smmu_map_dma_buf)(struct dma_buf *dma_buf, + struct sg_table *table, int domain, + dma_addr_t *iova, unsigned long *size, int dir); + void (*smmu_unmap_dma_buf)(struct sg_table *table, int domain, + int dir, struct dma_buf *dma_buf); + int (*smmu_dma_alloc_coherent)(struct device *dev, size_t size, + dma_addr_t *phys, dma_addr_t *iova, void **cpu_addr, + gfp_t gfp, int domain); + void (*smmu_dma_free_coherent)(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t phys, dma_addr_t iova, + int domain); + int (*smmu_map)(int domain, phys_addr_t iova, phys_addr_t phys, int + gfp_order, int prot); + void (*smmu_unmap)(int domain, unsigned long iova, int gfp_order); + char * (*smmu_dsi_alloc_buf)(struct device *dev, int size, + dma_addr_t *dmap, gfp_t gfp); + int (*smmu_dsi_map_buffer)(phys_addr_t phys, unsigned int domain, + unsigned long size, dma_addr_t *dma_addr, + void *cpu_addr, int dir); + void (*smmu_dsi_unmap_buffer)(dma_addr_t dma_addr, int domain, + unsigned long size, int dir); + void (*smmu_deinit)(struct mdss_data_type *mdata); +}; + +struct mdss_data_type { + u32 mdp_rev; + struct clk *mdp_clk[MDSS_MAX_CLK]; + struct regulator *fs; + struct regulator *core_gdsc; + struct regulator *vdd_cx; + u32 vdd_cx_min_uv; + u32 vdd_cx_max_uv; + bool batfet_required; + struct regulator *batfet; + bool en_svs_high; + u32 max_mdp_clk_rate; + struct mdss_util_intf *mdss_util; + unsigned long mdp_clk_rate; + + struct platform_device *pdev; + struct dss_io_data mdss_io; + struct dss_io_data vbif_io; + struct dss_io_data vbif_nrt_io; + char __iomem *mdp_base; + + struct mdss_smmu_client mdss_smmu[MDSS_IOMMU_MAX_DOMAIN]; + struct mdss_smmu_ops smmu_ops; + struct mutex reg_lock; + + /* bitmap to track pipes that have BWC enabled */ + DECLARE_BITMAP(bwc_enable_map, MAX_DRV_SUP_PIPES); + /* bitmap to track hw workarounds */ + DECLARE_BITMAP(mdss_quirk_map, MDSS_QUIRK_MAX); + /* bitmap to track total mmbs in use */ + DECLARE_BITMAP(mmb_alloc_map, MAX_DRV_SUP_MMB_BLKS); + /* bitmap to track qos applicable settings */ + DECLARE_BITMAP(mdss_qos_map, MDSS_QOS_MAX); + /* bitmap to track hw capabilities/features */ + DECLARE_BITMAP(mdss_caps_map, MDSS_CAPS_MAX); + + u32 has_bwc; + /* values used when HW has a common panic/robust LUT */ + u32 default_panic_lut0; + u32 default_panic_lut1; + u32 default_robust_lut; + + /* values used when HW has panic/robust LUTs per pipe */ + u32 default_panic_lut_per_pipe_linear; + u32 default_panic_lut_per_pipe_tile; + u32 default_robust_lut_per_pipe_linear; + u32 default_robust_lut_per_pipe_tile; + + u32 has_decimation; + bool has_fixed_qos_arbiter_enabled; + bool has_panic_ctrl; + u32 wfd_mode; + u32 has_no_lut_read; + atomic_t sd_client_count; + atomic_t sc_client_count; + u8 has_wb_ad; + u8 has_non_scalar_rgb; + bool has_src_split; + bool idle_pc_enabled; + bool has_pingpong_split; + bool has_pixel_ram; + bool needs_hist_vote; + bool has_ubwc; + bool has_wb_ubwc; + bool has_separate_rotator; + + u32 default_ot_rd_limit; + u32 default_ot_wr_limit; + + struct irq_domain *irq_domain; + u32 *mdp_irq_raw; + u32 *mdp_irq_export; + u32 *mdp_irq_mask; + u32 mdp_hist_irq_mask; + u32 mdp_intf_irq_mask; + + int suspend_fs_ena; + u8 clk_ena; + u8 fs_ena; + u8 vsync_ena; + + struct notifier_block gdsc_cb; + + u32 res_init; + + u32 highest_bank_bit; + u32 smp_mb_cnt; + u32 smp_mb_size; + u32 smp_mb_per_pipe; + u32 pixel_ram_size; + + u32 rot_block_size; + + /* HW RT bus (AXI) */ + u32 hw_rt_bus_hdl; + u32 hw_rt_bus_ref_cnt; + + /* data bus (AXI) */ + u32 bus_hdl; + u32 bus_ref_cnt; + struct mutex bus_lock; + + /* register bus (AHB) */ + u32 reg_bus_hdl; + u32 reg_bus_usecase_ndx; + struct list_head reg_bus_clist; + struct mutex reg_bus_lock; + struct reg_bus_client *reg_bus_clt; + struct reg_bus_client *pp_reg_bus_clt; + + u32 axi_port_cnt; + u32 nrt_axi_port_cnt; + u32 bus_channels; + u32 curr_bw_uc_idx; + u32 ao_bw_uc_idx; /* active only idx */ + struct msm_bus_scale_pdata *bus_scale_table; + struct msm_bus_scale_pdata *reg_bus_scale_table; + struct msm_bus_scale_pdata *hw_rt_bus_scale_table; + u32 max_bw_low; + u32 max_bw_high; + u32 max_bw_per_pipe; + u32 *vbif_rt_qos; + u32 *vbif_nrt_qos; + u32 npriority_lvl; + + struct mult_factor ab_factor; + struct mult_factor ib_factor; + struct mult_factor ib_factor_overlap; + struct mult_factor clk_factor; + struct mult_factor per_pipe_ib_factor; + bool apply_post_scale_bytes; + bool hflip_buffer_reused; + + u32 disable_prefill; + u32 *clock_levels; + u32 nclk_lvl; + + u32 enable_gate; + u32 enable_bw_release; + u32 enable_rotator_bw_release; + u32 enable_cdp; + u32 serialize_wait4pp; + u32 wait4autorefresh; + u32 lines_before_active; + + struct mdss_hw_settings *hw_settings; + + int rects_per_sspp[MDSS_MDP_PIPE_TYPE_MAX]; + struct mdss_mdp_pipe *vig_pipes; + struct mdss_mdp_pipe *rgb_pipes; + struct mdss_mdp_pipe *dma_pipes; + struct mdss_mdp_pipe *cursor_pipes; + u32 nvig_pipes; + u32 nrgb_pipes; + u32 ndma_pipes; + u32 max_target_zorder; + u8 ncursor_pipes; + u32 max_cursor_size; + + u32 nppb_ctl; + u32 *ppb_ctl; + u32 nppb_cfg; + u32 *ppb_cfg; + char __iomem *slave_pingpong_base; + + struct mdss_mdp_mixer *mixer_intf; + struct mdss_mdp_mixer *mixer_wb; + u32 nmixers_intf; + u32 nmixers_wb; + u32 max_mixer_width; + u32 max_pipe_width; + + struct mdss_mdp_writeback *wb; + u32 nwb; + u32 *wb_offsets; + u32 nwb_offsets; + struct mutex wb_lock; + + struct mdss_mdp_ctl *ctl_off; + u32 nctl; + u32 ndspp; + + struct mdss_mdp_dp_intf *dp_off; + u32 ndp; + void *video_intf; + u32 nintf; + + struct mdss_mdp_ad *ad_off; + struct mdss_ad_info *ad_cfgs; + u32 nad_cfgs; + u32 nmax_concurrent_ad_hw; + struct workqueue_struct *ad_calc_wq; + u32 ad_debugen; + bool mem_retain; + + struct mdss_intr hist_intr; + + int iommu_attached; + + u32 dbg_bus_flags; + struct debug_bus *dbg_bus; + u32 dbg_bus_size; + struct vbif_debug_bus *vbif_dbg_bus; + u32 vbif_dbg_bus_size; + struct vbif_debug_bus *nrt_vbif_dbg_bus; + u32 nrt_vbif_dbg_bus_size; + struct mdss_debug_inf debug_inf; + bool mixer_switched; + struct mdss_panel_cfg pan_cfg; + struct mdss_prefill_data prefill_data; + u32 min_prefill_lines; /* this changes within different chipsets */ + u32 props; + + int handoff_pending; + bool idle_pc; + struct mdss_perf_tune perf_tune; + bool traffic_shaper_en; + int iommu_ref_cnt; + u32 latency_buff_per; + atomic_t active_intf_cnt; + bool has_rot_dwnscale; + bool regulator_notif_register; + + u64 ab[MDSS_MAX_BUS_CLIENTS]; + u64 ib[MDSS_MAX_BUS_CLIENTS]; + struct mdss_pp_block_off pp_block_off; + + struct mdss_mdp_cdm *cdm_off; + u32 ncdm; + struct mutex cdm_lock; + + struct mdss_mdp_dsc *dsc_off; + u32 ndsc; + + struct mdss_max_bw_settings *max_bw_settings; + u32 bw_mode_bitmap; + u32 max_bw_settings_cnt; + bool bw_limit_pending; + + struct mdss_max_bw_settings *max_per_pipe_bw_settings; + u32 mdss_per_pipe_bw_cnt; + u32 min_bw_per_pipe; + + u32 bcolor0; + u32 bcolor1; + u32 bcolor2; + struct mdss_scaler_block *scaler_off; + + u32 max_dest_scaler_input_width; + u32 max_dest_scaler_output_width; + struct mdss_mdp_destination_scaler *ds; + u32 sec_disp_en; + u32 sec_cam_en; + u32 sec_session_cnt; + wait_queue_head_t secure_waitq; + struct cx_ipeak_client *mdss_cx_ipeak; + struct mult_factor bus_throughput_factor; +}; + +extern struct mdss_data_type *mdss_res; + +struct irq_info { + u32 irq; + u32 irq_mask; + u32 irq_wake_mask; + u32 irq_ena; + u32 irq_wake_ena; + u32 irq_buzy; +}; + +struct mdss_hw { + u32 hw_ndx; + void *ptr; + struct irq_info *irq_info; + irqreturn_t (*irq_handler)(int irq, void *ptr); +}; + +struct irq_info *mdss_intr_line(void); +void mdss_bus_bandwidth_ctrl(int enable); +int mdss_iommu_ctrl(int enable); +int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota); +int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, + u32 usecase_ndx); +struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name); +void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *bus_client); + +struct mdss_util_intf { + bool mdp_probe_done; + int (*register_irq)(struct mdss_hw *hw); + void (*enable_irq)(struct mdss_hw *hw); + void (*disable_irq)(struct mdss_hw *hw); + void (*enable_wake_irq)(struct mdss_hw *hw); + void (*disable_wake_irq)(struct mdss_hw *hw); + void (*disable_irq_nosync)(struct mdss_hw *hw); + int (*irq_dispatch)(u32 hw_ndx, int irq, void *ptr); + int (*get_iommu_domain)(u32 type); + int (*iommu_attached)(void); + int (*iommu_ctrl)(int enable); + void (*iommu_lock)(void); + void (*iommu_unlock)(void); + void (*vbif_reg_lock)(void); + void (*vbif_reg_unlock)(void); + int (*secure_session_ctrl)(int enable); + void (*bus_bandwidth_ctrl)(int enable); + int (*bus_scale_set_quota)(int client, u64 ab_quota, u64 ib_quota); + int (*panel_intf_status)(u32 disp_num, u32 intf_type); + struct mdss_panel_cfg* (*panel_intf_type)(int intf_val); + int (*dyn_clk_gating_ctrl)(int enable); + bool (*mdp_handoff_pending)(void); +}; + +struct mdss_util_intf *mdss_get_util_intf(void); +bool mdss_get_irq_enable_state(struct mdss_hw *hw); + +static inline int mdss_get_sd_client_cnt(void) +{ + if (!mdss_res) + return 0; + else + return atomic_read(&mdss_res->sd_client_count); +} + +static inline int mdss_get_sc_client_cnt(void) +{ + if (!mdss_res) + return 0; + else + return atomic_read(&mdss_res->sc_client_count); +} + +static inline void mdss_set_quirk(struct mdss_data_type *mdata, + enum mdss_hw_quirk bit) +{ + set_bit(bit, mdata->mdss_quirk_map); +} + +static inline bool mdss_has_quirk(struct mdss_data_type *mdata, + enum mdss_hw_quirk bit) +{ + return test_bit(bit, mdata->mdss_quirk_map); +} + +#define MDSS_VBIF_WRITE(mdata, offset, value, nrt_vbif) \ + (nrt_vbif ? dss_reg_w(&mdata->vbif_nrt_io, offset, value, 0) :\ + dss_reg_w(&mdata->vbif_io, offset, value, 0)) +#define MDSS_VBIF_READ(mdata, offset, nrt_vbif) \ + (nrt_vbif ? dss_reg_r(&mdata->vbif_nrt_io, offset, 0) :\ + dss_reg_r(&mdata->vbif_io, offset, 0)) +#define MDSS_REG_WRITE(mdata, offset, value) \ + dss_reg_w(&mdata->mdss_io, offset, value, 0) +#define MDSS_REG_READ(mdata, offset) \ + dss_reg_r(&mdata->mdss_io, offset, 0) + +#endif /* MDSS_H */ diff --git a/drivers/video/fbdev/msm/mdss_cec_core.c b/drivers/video/fbdev/msm/mdss_cec_core.c new file mode 100644 index 0000000000000000000000000000000000000000..93094b7275663894f2b5f6febb17dd699e4f5f42 --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_cec_core.c @@ -0,0 +1,806 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, 2020, The Linux Foundation. All rights reserved. */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include + +#include "mdss_fb.h" +#include "mdss_cec_core.h" + +#define CEC_ENABLE_MASK BIT(0) +#define CEC_WAKEUP_ENABLE_MASK BIT(1) + +struct cec_msg_node { + struct cec_msg msg; + struct list_head list; +}; + +struct cec_ctl { + bool enabled; + bool compliance_enabled; + bool cec_wakeup_en; + + u8 logical_addr; + + spinlock_t lock; + struct list_head msg_head; + struct cec_abstract_init_data init_data; + +}; + +static struct cec_ctl *cec_get_ctl(struct device *dev) +{ + struct fb_info *fbi; + struct msm_fb_data_type *mfd; + struct mdss_panel_info *pinfo; + + if (!dev) { + pr_err("invalid device\n"); + goto error; + } + + fbi = dev_get_drvdata(dev); + if (!fbi) { + pr_err("invalid fbi\n"); + goto error; + } + + mfd = fbi->par; + if (!mfd) { + pr_err("invalid mfd\n"); + goto error; + } + + pinfo = mfd->panel_info; + if (!pinfo) { + pr_err("invalid pinfo\n"); + goto error; + } + + return pinfo->cec_data; + +error: + return NULL; +} + +static int cec_msg_send(struct cec_ctl *ctl, struct cec_msg *msg) +{ + int ret = -EINVAL; + struct cec_ops *ops; + + if (!ctl) { + pr_err("invalid cec ctl\n"); + goto end; + } + + if (!msg) { + pr_err("invalid cec message\n"); + goto end; + } + + ops = ctl->init_data.ops; + + if (ops && ops->send_msg) + ret = ops->send_msg(ops->data, msg); +end: + return ret; +} + +static void cec_dump_msg(struct cec_ctl *ctl, struct cec_msg *msg) +{ + int i; + unsigned long flags; + + if (!ctl) { + pr_err("invalid cec ctl\n"); + return; + } + + if (!msg) { + pr_err("invalid cec message\n"); + return; + } + + spin_lock_irqsave(&ctl->lock, flags); + pr_debug("==%pS dump start ==\n", + __builtin_return_address(0)); + + pr_debug("cec: sender_id: %d\n", msg->sender_id); + pr_debug("cec: recvr_id: %d\n", msg->recvr_id); + + if (msg->frame_size < 2) { + pr_debug("cec: polling message\n"); + spin_unlock_irqrestore(&ctl->lock, flags); + return; + } + + pr_debug("cec: opcode: %02x\n", msg->opcode); + for (i = 0; i < msg->frame_size - 2; i++) + pr_debug("cec: operand(%2d) : %02x\n", i + 1, msg->operand[i]); + + pr_debug("==%pS dump end ==\n", + __builtin_return_address(0)); + spin_unlock_irqrestore(&ctl->lock, flags); +} + +static int cec_disable(struct cec_ctl *ctl) +{ + unsigned long flags; + int ret = -EINVAL; + struct cec_msg_node *msg_node, *tmp; + struct cec_ops *ops; + + if (!ctl) { + pr_err("invalid cec ctl\n"); + goto end; + } + + spin_lock_irqsave(&ctl->lock, flags); + list_for_each_entry_safe(msg_node, tmp, &ctl->msg_head, list) { + list_del(&msg_node->list); + kfree(msg_node); + } + spin_unlock_irqrestore(&ctl->lock, flags); + + ops = ctl->init_data.ops; + + if (ops && ops->enable) + ret = ops->enable(ops->data, false); + + if (!ret) + ctl->enabled = false; + +end: + return ret; +} + +static int cec_enable(struct cec_ctl *ctl) +{ + int ret = -EINVAL; + struct cec_ops *ops; + + if (!ctl) { + pr_err("invalid cec ctl\n"); + goto end; + } + + INIT_LIST_HEAD(&ctl->msg_head); + + ops = ctl->init_data.ops; + + if (ops && ops->enable) + ret = ops->enable(ops->data, true); + + if (!ret) + ctl->enabled = true; + +end: + return ret; +} + +static int cec_send_abort_opcode(struct cec_ctl *ctl, + struct cec_msg *in_msg, u8 reason_operand) +{ + int i = 0; + struct cec_msg out_msg; + + if (!ctl) { + pr_err("invalid cec ctl\n"); + return -EINVAL; + } + + if (!in_msg) { + pr_err("invalid cec message\n"); + return -EINVAL; + } + + out_msg.sender_id = 0x4; + out_msg.recvr_id = in_msg->sender_id; + out_msg.opcode = 0x0; /* opcode for feature abort */ + out_msg.operand[i++] = in_msg->opcode; + out_msg.operand[i++] = reason_operand; + out_msg.frame_size = i + 2; + + return cec_msg_send(ctl, &out_msg); +} + +static int cec_msg_parser(struct cec_ctl *ctl, struct cec_msg *in_msg) +{ + int rc = 0, i = 0; + struct cec_msg out_msg; + + if (!ctl) { + pr_err("invalid cec ctl\n"); + return -EINVAL; + } + + if (!in_msg) { + pr_err("invalid cec message\n"); + return -EINVAL; + } + + pr_debug("in_msg->opcode = 0x%x\n", in_msg->opcode); + switch (in_msg->opcode) { + case CEC_MSG_SET_OSD_STRING: + /* Set OSD String */ + pr_debug("Recvd OSD Str=[0x%x]\n", + in_msg->operand[3]); + break; + case CEC_MSG_GIVE_PHYS_ADDR: + /* Give Phy Addr */ + pr_debug("Recvd a Give Phy Addr cmd\n"); + + out_msg.sender_id = 0x4; + /* Broadcast */ + out_msg.recvr_id = 0xF; + out_msg.opcode = 0x84; + out_msg.operand[i++] = 0x10; + out_msg.operand[i++] = 0x0; + out_msg.operand[i++] = 0x04; + out_msg.frame_size = i + 2; + + rc = cec_msg_send(ctl, &out_msg); + break; + case CEC_MSG_ABORT: + /* Abort */ + pr_debug("Recvd an abort cmd.\n"); + + /* reason = "Refused" */ + rc = cec_send_abort_opcode(ctl, in_msg, 0x04); + break; + case CEC_MSG_GIVE_OSD_NAME: + /* Give OSD name */ + pr_debug("Recvd 'Give OSD name' cmd.\n"); + + out_msg.sender_id = 0x4; + out_msg.recvr_id = in_msg->sender_id; + out_msg.opcode = 0x47; /* OSD Name */ + /* Display control byte */ + out_msg.operand[i++] = 0x0; + out_msg.operand[i++] = 'H'; + out_msg.operand[i++] = 'e'; + out_msg.operand[i++] = 'l'; + out_msg.operand[i++] = 'l'; + out_msg.operand[i++] = 'o'; + out_msg.operand[i++] = ' '; + out_msg.operand[i++] = 'W'; + out_msg.operand[i++] = 'o'; + out_msg.operand[i++] = 'r'; + out_msg.operand[i++] = 'l'; + out_msg.operand[i++] = 'd'; + out_msg.frame_size = i + 2; + + rc = cec_msg_send(ctl, &out_msg); + break; + case CEC_MSG_GIVE_POWER_STATUS: + /* Give Device Power status */ + pr_debug("Recvd a Power status message\n"); + + out_msg.sender_id = 0x4; + out_msg.recvr_id = in_msg->sender_id; + out_msg.opcode = 0x90; /* OSD String */ + out_msg.operand[i++] = 'H'; + out_msg.operand[i++] = 'e'; + out_msg.operand[i++] = 'l'; + out_msg.operand[i++] = 'l'; + out_msg.operand[i++] = 'o'; + out_msg.operand[i++] = ' '; + out_msg.operand[i++] = 'W'; + out_msg.operand[i++] = 'o'; + out_msg.operand[i++] = 'r'; + out_msg.operand[i++] = 'l'; + out_msg.operand[i++] = 'd'; + out_msg.frame_size = i + 2; + + rc = cec_msg_send(ctl, &out_msg); + break; + case CEC_MSG_ROUTE_CHANGE_CMD: + /* Routing Change cmd */ + case CEC_MSG_SET_STREAM_PATH: + /* Set Stream Path */ + pr_debug("Recvd Set Stream or Routing Change cmd\n"); + + out_msg.sender_id = 0x4; + out_msg.recvr_id = 0xF; /* broadcast this message */ + out_msg.opcode = 0x82; /* Active Source */ + out_msg.operand[i++] = 0x10; + out_msg.operand[i++] = 0x0; + out_msg.frame_size = i + 2; + + rc = cec_msg_send(ctl, &out_msg); + if (rc) + goto end; + + /* sending message */ + memset(&out_msg, 0x0, sizeof(struct cec_msg)); + i = 0; + out_msg.sender_id = 0x4; + out_msg.recvr_id = in_msg->sender_id; + out_msg.opcode = 0x04; /* opcode for Image View On */ + out_msg.frame_size = i + 2; + + rc = cec_msg_send(ctl, &out_msg); + break; + case CEC_MSG_USER_CTRL_PRESS: + /* User Control Pressed */ + pr_debug("User Control Pressed\n"); + break; + case CEC_MSG_USER_CTRL_RELEASE: + /* User Control Released */ + pr_debug("User Control Released\n"); + break; + default: + pr_debug("Recvd an unknown cmd = [%u]\n", + in_msg->opcode); + + /* reason = "Unrecognized opcode" */ + rc = cec_send_abort_opcode(ctl, in_msg, 0x0); + break; + } +end: + return rc; +} + +static int cec_msg_recv(void *data, struct cec_msg *msg) +{ + unsigned long flags; + struct cec_ctl *ctl = data; + struct cec_msg_node *msg_node; + int ret = 0; + + if (!ctl) { + pr_err("invalid cec ctl\n"); + ret = -EINVAL; + goto end; + } + + if (!ctl->enabled) { + pr_err("cec not enabled\n"); + ret = -ENODEV; + goto end; + } + + msg_node = kzalloc(sizeof(*msg_node), GFP_KERNEL); + if (!msg_node) { + ret = -ENOMEM; + goto end; + } + + msg_node->msg = *msg; + + pr_debug("CEC read frame done\n"); + cec_dump_msg(ctl, &msg_node->msg); + + spin_lock_irqsave(&ctl->lock, flags); + if (ctl->compliance_enabled) { + spin_unlock_irqrestore(&ctl->lock, flags); + + ret = cec_msg_parser(ctl, &msg_node->msg); + if (ret) + pr_err("msg parsing failed\n"); + + kfree(msg_node); + } else { + list_add_tail(&msg_node->list, &ctl->msg_head); + spin_unlock_irqrestore(&ctl->lock, flags); + + /* wake-up sysfs read_msg context */ + sysfs_notify(ctl->init_data.kobj, "cec", "rd_msg"); + } +end: + return ret; +} + +static ssize_t enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + unsigned long flags; + struct cec_ctl *ctl = cec_get_ctl(dev); + + if (!ctl) { + pr_err("invalid cec ctl\n"); + ret = -EINVAL; + goto end; + } + + spin_lock_irqsave(&ctl->lock, flags); + if (ctl->enabled) { + pr_debug("cec is enabled\n"); + ret = scnprintf(buf, PAGE_SIZE, "%d\n", 1); + } else { + pr_err("cec is disabled\n"); + ret = scnprintf(buf, PAGE_SIZE, "%d\n", 0); + } + spin_unlock_irqrestore(&ctl->lock, flags); +end: + return ret; +} + +static ssize_t enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + bool cec_en; + ssize_t ret; + struct cec_ctl *ctl = cec_get_ctl(dev); + struct cec_ops *ops; + + if (!ctl) { + pr_err("invalid cec ctl\n"); + ret = -EINVAL; + goto end; + } + + ops = ctl->init_data.ops; + + ret = kstrtoint(buf, 10, &val); + if (ret) { + pr_err("kstrtoint failed.\n"); + goto end; + } + + cec_en = (val & CEC_ENABLE_MASK) ? true : false; + + /* bit 1 is used for wakeup feature */ + if ((val & CEC_ENABLE_MASK) && (val & CEC_WAKEUP_ENABLE_MASK)) + ctl->cec_wakeup_en = true; + else + ctl->cec_wakeup_en = false; + + if (ops && ops->wakeup_en) + ops->wakeup_en(ops->data, ctl->cec_wakeup_en); + + if (ctl->enabled == cec_en) { + pr_debug("cec is already %s\n", + cec_en ? "enabled" : "disabled"); + goto bail; + } + + if (cec_en) + ret = cec_enable(ctl); + else + ret = cec_disable(ctl); + + if (ret) + goto end; + +bail: + ret = strnlen(buf, PAGE_SIZE); +end: + return ret; +} + +static ssize_t enable_compliance_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long flags; + ssize_t ret; + struct cec_ctl *ctl = cec_get_ctl(dev); + + if (!ctl) { + pr_err("invalid cec ctl\n"); + return -EINVAL; + } + + spin_lock_irqsave(&ctl->lock, flags); + ret = scnprintf(buf, PAGE_SIZE, "%d\n", + ctl->compliance_enabled); + + spin_unlock_irqrestore(&ctl->lock, flags); + + return ret; +} + +static ssize_t enable_compliance_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + ssize_t ret; + struct cec_ctl *ctl = cec_get_ctl(dev); + struct cec_ops *ops; + + if (!ctl) { + pr_err("invalid cec ctl\n"); + ret = -EINVAL; + goto end; + } + + ops = ctl->init_data.ops; + + ret = kstrtoint(buf, 10, &val); + if (ret) { + pr_err("kstrtoint failed.\n"); + goto end; + } + + ctl->compliance_enabled = (val == 1) ? true : false; + + if (ctl->compliance_enabled) { + ret = cec_enable(ctl); + if (ret) + goto end; + + ctl->logical_addr = 0x4; + + if (ops && ops->wt_logical_addr) + ops->wt_logical_addr(ops->data, ctl->logical_addr); + + } else { + ctl->logical_addr = 0; + + ret = cec_disable(ctl); + if (ret) + goto end; + } + + ret = strnlen(buf, PAGE_SIZE); +end: + return ret; +} + +static ssize_t logical_addr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long flags; + ssize_t ret; + struct cec_ctl *ctl = cec_get_ctl(dev); + + if (!ctl) { + pr_err("invalid cec ctl\n"); + return -EINVAL; + } + + spin_lock_irqsave(&ctl->lock, flags); + ret = scnprintf(buf, PAGE_SIZE, "%d\n", ctl->logical_addr); + spin_unlock_irqrestore(&ctl->lock, flags); + + return ret; +} + +static ssize_t logical_addr_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int logical_addr; + unsigned long flags; + ssize_t ret = strnlen(buf, PAGE_SIZE); + struct cec_ctl *ctl = cec_get_ctl(dev); + struct cec_ops *ops; + + if (!ctl) { + pr_err("invalid cec ctl\n"); + ret = -EINVAL; + goto end; + } + + ops = ctl->init_data.ops; + + ret = kstrtoint(buf, 10, &logical_addr); + if (ret) { + pr_err("kstrtoint failed\n"); + goto end; + } + + if (logical_addr < 0 || logical_addr > 15) { + pr_err("invalid logical address\n"); + ret = -EINVAL; + goto end; + } + + spin_lock_irqsave(&ctl->lock, flags); + ctl->logical_addr = (u8)logical_addr; + if (ctl->enabled) { + if (ops && ops->wt_logical_addr) + ops->wt_logical_addr(ops->data, ctl->logical_addr); + } + spin_unlock_irqrestore(&ctl->lock, flags); +end: + return ret; +} + +static ssize_t rd_msg_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i = 0; + unsigned long flags; + struct cec_msg_node *msg_node, *tmp; + struct cec_ctl *ctl = cec_get_ctl(dev); + ssize_t ret; + + if (!ctl) { + pr_err("invalid cec ctl\n"); + ret = -EINVAL; + goto end; + } + + if (!ctl->enabled) { + pr_err("cec not enabled\n"); + ret = -EINVAL; + goto end; + } + + spin_lock_irqsave(&ctl->lock, flags); + + if (ctl->compliance_enabled) { + spin_unlock_irqrestore(&ctl->lock, flags); + pr_err("Read no allowed in compliance mode\n"); + ret = -EPERM; + goto end; + } + + if (list_empty_careful(&ctl->msg_head)) { + spin_unlock_irqrestore(&ctl->lock, flags); + pr_err("CEC message queue is empty\n"); + ret = -EINVAL; + goto end; + } + + list_for_each_entry_safe(msg_node, tmp, &ctl->msg_head, list) { + if ((i + 1) * sizeof(struct cec_msg) > PAGE_SIZE) { + pr_debug("Overflowing PAGE_SIZE.\n"); + break; + } + + memcpy(buf + (i * sizeof(struct cec_msg)), &msg_node->msg, + sizeof(struct cec_msg)); + list_del(&msg_node->list); + kfree(msg_node); + i++; + } + + spin_unlock_irqrestore(&ctl->lock, flags); + + ret = i * sizeof(struct cec_msg); +end: + return ret; +} + +static ssize_t wr_msg_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + ssize_t ret; + unsigned long flags; + struct cec_msg *msg = (struct cec_msg *)buf; + struct cec_ctl *ctl = cec_get_ctl(dev); + + if (!ctl) { + pr_err("invalid cec ctl\n"); + ret = -EINVAL; + goto end; + } + + spin_lock_irqsave(&ctl->lock, flags); + if (ctl->compliance_enabled) { + spin_unlock_irqrestore(&ctl->lock, flags); + pr_err("Write not allowed in compliance mode\n"); + ret = -EPERM; + goto end; + } + + if (!ctl->enabled) { + spin_unlock_irqrestore(&ctl->lock, flags); + pr_err("CEC is not configed.\n"); + ret = -EPERM; + goto end; + } + spin_unlock_irqrestore(&ctl->lock, flags); + + if (msg->frame_size > MAX_CEC_FRAME_SIZE) { + pr_err("msg frame too big!\n"); + ret = -EINVAL; + goto end; + } + ret = cec_msg_send(ctl, msg); + if (ret) { + pr_err("cec_msg_send failed\n"); + goto end; + } + + ret = sizeof(struct cec_msg); +end: + return ret; +} + +static DEVICE_ATTR_RW(enable); +static DEVICE_ATTR_RW(enable_compliance); +static DEVICE_ATTR_RW(logical_addr); +static DEVICE_ATTR_RO(rd_msg); +static DEVICE_ATTR_WO(wr_msg); + +static struct attribute *cec_fs_attrs[] = { + &dev_attr_enable.attr, + &dev_attr_enable_compliance.attr, + &dev_attr_logical_addr.attr, + &dev_attr_rd_msg.attr, + &dev_attr_wr_msg.attr, + NULL, +}; + +static struct attribute_group cec_fs_attr_group = { + .name = "cec", + .attrs = cec_fs_attrs, +}; + +/** + * cec_abstract_deinit() - Release CEC abstract module + * @input: CEC abstract data + * + * This API release all the resources allocated for this + * module. + * + * Return: 0 on success otherwise error code. + */ +int cec_abstract_deinit(void *input) +{ + struct cec_ctl *ctl = (struct cec_ctl *)input; + + if (!ctl) + return -EINVAL; + + sysfs_remove_group(ctl->init_data.kobj, &cec_fs_attr_group); + + kfree(ctl); + + return 0; +} + +/** + * cec_abstract_init() - Initialize CEC abstract module + * @init_data: data needed to initialize the CEC abstraction module + * + * This API will initialize the CEC abstract module which connects + * CEC client with CEC hardware. It creates sysfs nodes for client + * to read and write CEC messages. It interacts with hardware with + * provided operation function pointers. Also provides callback + * function pointers to let the hardware inform about incoming + * CEC message. + * + * Return: pinter to cec abstract data which needs to be passed + * as parameter with callback functions. + */ +void *cec_abstract_init(struct cec_abstract_init_data *init_data) +{ + struct cec_ctl *ctl = NULL; + int ret = 0; + + if (!init_data) { + pr_err("invalid cec abstract init data\n"); + ret = -EINVAL; + goto end; + } + + ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); + if (!ctl) { + ret = -ENOMEM; + goto end; + } + + /* keep a copy of init data */ + ctl->init_data = *init_data; + + ret = sysfs_create_group(ctl->init_data.kobj, &cec_fs_attr_group); + if (ret) { + pr_err("cec sysfs group creation failed\n"); + goto end; + } + + spin_lock_init(&ctl->lock); + + /* provide callback function pointers */ + if (init_data->cbs) { + init_data->cbs->msg_recv_notify = cec_msg_recv; + init_data->cbs->data = ctl; + } + + return ctl; +end: + kfree(ctl); + return ERR_PTR(ret); +} + diff --git a/drivers/video/fbdev/msm/mdss_cec_core.h b/drivers/video/fbdev/msm/mdss_cec_core.h new file mode 100644 index 0000000000000000000000000000000000000000..799b3a43b55dc461b423db62e657321a61a64b48 --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_cec_core.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015-2016, 2018, 2020, The Linux Foundation. All rights reserved. */ + +#ifndef __MDSS_CEC_CORE_H__ +#define __MDSS_CEC_CORE_H__ + +#define MAX_OPERAND_SIZE 14 + +/* total size: HEADER block (1) + opcode block (1) + operands (14) */ +#define MAX_CEC_FRAME_SIZE (MAX_OPERAND_SIZE + 2) + +/* CEC message set */ +#define CEC_MSG_SET_OSD_STRING 0x64 +#define CEC_MSG_GIVE_PHYS_ADDR 0x83 +#define CEC_MSG_ABORT 0xFF +#define CEC_MSG_GIVE_OSD_NAME 0x46 +#define CEC_MSG_GIVE_POWER_STATUS 0x8F +#define CEC_MSG_ROUTE_CHANGE_CMD 0x80 +#define CEC_MSG_SET_STREAM_PATH 0x86 +#define CEC_MSG_USER_CTRL_PRESS 0x44 +#define CEC_MSG_USER_CTRL_RELEASE 0x45 + +/** + * struct cec_msg - CEC message related data + * @sender_id: CEC message initiator's id + * @recvr_id: CEC message destination's id + * @opcode: CEC message opcode + * @operand: CEC message operands corresponding to opcode + * @frame_size: total CEC frame size + * @retransmit: number of re-tries to transmit message + * + * Basic CEC message structure used by both client and driver. + */ +struct cec_msg { + u8 sender_id; + u8 recvr_id; + u8 opcode; + u8 operand[MAX_OPERAND_SIZE]; + u8 frame_size; + u8 retransmit; +}; + +/** + * struct cec_ops - CEC operations function pointers + * @enable: function pointer to enable CEC + * @send_msg: function pointer to send CEC message + * @wt_logical_addr: function pointer to write logical address + * @wakeup_en: function pointer to enable wakeup feature + * @is_wakeup_en: function pointer to query wakeup feature state + * @device_suspend: function pointer to update device suspend state + * @data: pointer to the data needed to send with operation functions + * + * Defines all the operations that abstract module can call + * to programe the CEC driver. + */ +struct cec_ops { + int (*enable)(void *data, bool enable); + int (*send_msg)(void *data, + struct cec_msg *msg); + void (*wt_logical_addr)(void *data, u8 addr); + void (*wakeup_en)(void *data, bool en); + bool (*is_wakeup_en)(void *data); + void (*device_suspend)(void *data, bool suspend); + void *data; +}; + +/** + * struct cec_cbs - CEC callback function pointers + * @msg_recv_notify: function pointer called CEC driver to notify incoming msg + * @data: pointer to data needed to be send with the callback function + * + * Defines callback functions which CEC driver can callback to notify any + * change in the hardware. + */ +struct cec_cbs { + int (*msg_recv_notify)(void *data, struct cec_msg *msg); + void *data; +}; + +/** + * struct cec_abstract_init_data - initalization data for abstract module + * @ops: pointer to struct containing all operation function pointers + * @cbs: pointer to struct containing all callack function pointers + * @kobj: pointer to kobject instance associated with CEC driver. + * + * Defines initialization data needed by init API to initialize the module. + */ +struct cec_abstract_init_data { + struct cec_ops *ops; + struct cec_cbs *cbs; + struct kobject *kobj; +}; + +void *cec_abstract_init(struct cec_abstract_init_data *init_data); +int cec_abstract_deinit(void *input); +#endif /* __MDSS_CEC_CORE_H_*/ diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.c b/drivers/video/fbdev/msm/mdss_compat_utils.c new file mode 100644 index 0000000000000000000000000000000000000000..e197bcc3a1b7500ae62396a27d8b60aef8c0e384 --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_compat_utils.c @@ -0,0 +1,4305 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2018, 2020, The Linux Foundation. All rights reserved. + * + */ + +#include +#include + +#include + +#include "mdss_fb.h" +#include "mdss_compat_utils.h" +#include "mdss_mdp_hwio.h" +#include "mdss_mdp.h" + +#define MSMFB_CURSOR32 _IOW(MSMFB_IOCTL_MAGIC, 130, struct fb_cursor32) +#define MSMFB_SET_LUT32 _IOW(MSMFB_IOCTL_MAGIC, 131, struct fb_cmap32) +#define MSMFB_HISTOGRAM32 _IOWR(MSMFB_IOCTL_MAGIC, 132,\ + struct mdp_histogram_data32) +#define MSMFB_GET_CCS_MATRIX32 _IOWR(MSMFB_IOCTL_MAGIC, 133, struct mdp_ccs32) +#define MSMFB_SET_CCS_MATRIX32 _IOW(MSMFB_IOCTL_MAGIC, 134, struct mdp_ccs32) +#define MSMFB_OVERLAY_SET32 _IOWR(MSMFB_IOCTL_MAGIC, 135,\ + struct mdp_overlay32) + +#define MSMFB_OVERLAY_GET32 _IOR(MSMFB_IOCTL_MAGIC, 140,\ + struct mdp_overlay32) +#define MSMFB_OVERLAY_BLT32 _IOWR(MSMFB_IOCTL_MAGIC, 142,\ + struct msmfb_overlay_blt32) +#define MSMFB_HISTOGRAM_START32 _IOR(MSMFB_IOCTL_MAGIC, 144,\ + struct mdp_histogram_start_req32) + +#define MSMFB_OVERLAY_3D32 _IOWR(MSMFB_IOCTL_MAGIC, 147,\ + struct msmfb_overlay_3d32) + +#define MSMFB_MIXER_INFO32 _IOWR(MSMFB_IOCTL_MAGIC, 148,\ + struct msmfb_mixer_info_req32) +#define MSMFB_MDP_PP32 _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp32) +#define MSMFB_BUFFER_SYNC32 _IOW(MSMFB_IOCTL_MAGIC, 162, struct mdp_buf_sync32) +#define MSMFB_OVERLAY_PREPARE32 _IOWR(MSMFB_IOCTL_MAGIC, 169, \ + struct mdp_overlay_list32) +#define MSMFB_ATOMIC_COMMIT32 _IOWR(MDP_IOCTL_MAGIC, 128, compat_caddr_t) + +#define MSMFB_ASYNC_POSITION_UPDATE_32 _IOWR(MDP_IOCTL_MAGIC, 129, \ + struct mdp_position_update32) + +static int __copy_layer_pp_info_params(struct mdp_input_layer *layer, + struct mdp_input_layer32 *layer32); + +static unsigned int __do_compat_ioctl_nr(unsigned int cmd32) +{ + unsigned int cmd; + + switch (cmd32) { + case MSMFB_CURSOR32: + cmd = MSMFB_CURSOR; + break; + case MSMFB_SET_LUT32: + cmd = MSMFB_SET_LUT; + break; + case MSMFB_HISTOGRAM32: + cmd = MSMFB_HISTOGRAM; + break; + case MSMFB_GET_CCS_MATRIX32: + cmd = MSMFB_GET_CCS_MATRIX; + break; + case MSMFB_SET_CCS_MATRIX32: + cmd = MSMFB_SET_CCS_MATRIX; + break; + case MSMFB_OVERLAY_SET32: + cmd = MSMFB_OVERLAY_SET; + break; + case MSMFB_OVERLAY_GET32: + cmd = MSMFB_OVERLAY_GET; + break; + case MSMFB_OVERLAY_BLT32: + cmd = MSMFB_OVERLAY_BLT; + break; + case MSMFB_OVERLAY_3D32: + cmd = MSMFB_OVERLAY_3D; + break; + case MSMFB_MIXER_INFO32: + cmd = MSMFB_MIXER_INFO; + break; + case MSMFB_MDP_PP32: + cmd = MSMFB_MDP_PP; + break; + case MSMFB_BUFFER_SYNC32: + cmd = MSMFB_BUFFER_SYNC; + break; + case MSMFB_OVERLAY_PREPARE32: + cmd = MSMFB_OVERLAY_PREPARE; + break; + case MSMFB_ATOMIC_COMMIT32: + cmd = MSMFB_ATOMIC_COMMIT; + break; + case MSMFB_ASYNC_POSITION_UPDATE_32: + cmd = MSMFB_ASYNC_POSITION_UPDATE; + break; + default: + cmd = cmd32; + break; + } + + return cmd; +} + +static void __copy_atomic_commit_struct(struct mdp_layer_commit *commit, + struct mdp_layer_commit32 *commit32) +{ + unsigned int destSize = sizeof(commit->commit_v1.reserved); + unsigned int srcSize = sizeof(commit32->commit_v1.reserved); + unsigned int count = (destSize <= srcSize ? destSize : srcSize); + + commit->version = commit32->version; + commit->commit_v1.flags = commit32->commit_v1.flags; + commit->commit_v1.input_layer_cnt = + commit32->commit_v1.input_layer_cnt; + commit->commit_v1.left_roi = commit32->commit_v1.left_roi; + commit->commit_v1.right_roi = commit32->commit_v1.right_roi; + commit->commit_v1.bl_level = commit32->commit_v1.bl_level; + memcpy(&commit->commit_v1.reserved, &commit32->commit_v1.reserved, + count); +} + +static struct mdp_input_layer32 *__create_layer_list32( + struct mdp_layer_commit32 *commit32, + u32 layer_count) +{ + u32 buffer_size32; + struct mdp_input_layer32 *layer_list32; + int ret; + + buffer_size32 = sizeof(struct mdp_input_layer32) * layer_count; + + layer_list32 = kmalloc(buffer_size32, GFP_KERNEL); + if (!layer_list32) { + layer_list32 = ERR_PTR(-ENOMEM); + goto end; + } + + ret = copy_from_user(layer_list32, + compat_ptr(commit32->commit_v1.input_layers), + sizeof(struct mdp_input_layer32) * layer_count); + if (ret) { + pr_err("layer list32 copy from user failed, ptr %pK\n", + compat_ptr(commit32->commit_v1.input_layers)); + kfree(layer_list32); + ret = -EFAULT; + layer_list32 = ERR_PTR(ret); + } + +end: + return layer_list32; +} + +static int __copy_scale_params(struct mdp_input_layer *layer, + struct mdp_input_layer32 *layer32) +{ + struct mdp_scale_data *scale; + int ret; + + if (!(layer->flags & MDP_LAYER_ENABLE_PIXEL_EXT)) + return 0; + + scale = kmalloc(sizeof(struct mdp_scale_data), GFP_KERNEL); + if (!scale) { + ret = -ENOMEM; + goto end; + } + + /* scale structure size is same for compat and 64bit version */ + ret = copy_from_user(scale, compat_ptr(layer32->scale), + sizeof(struct mdp_scale_data)); + if (ret) { + kfree(scale); + pr_err("scale param copy from user failed, ptr %pK\n", + compat_ptr(layer32->scale)); + ret = -EFAULT; + } else { + layer->scale = scale; + } +end: + return ret; +} + +static struct mdp_input_layer *__create_layer_list( + struct mdp_layer_commit *commit, + struct mdp_input_layer32 *layer_list32, + u32 layer_count) +{ + int i, ret = 0; + u32 buffer_size; + struct mdp_input_layer *layer, *layer_list; + struct mdp_input_layer32 *layer32; + + buffer_size = sizeof(struct mdp_input_layer) * layer_count; + + layer_list = kmalloc(buffer_size, GFP_KERNEL); + if (!layer_list) { + layer_list = ERR_PTR(-ENOMEM); + goto end; + } + + commit->commit_v1.input_layers = layer_list; + + for (i = 0; i < layer_count; i++) { + layer = &layer_list[i]; + layer32 = &layer_list32[i]; + + layer->flags = layer32->flags; + layer->pipe_ndx = layer32->pipe_ndx; + layer->rect_num = layer32->rect_num; + layer->horz_deci = layer32->horz_deci; + layer->vert_deci = layer32->vert_deci; + layer->z_order = layer32->z_order; + layer->transp_mask = layer32->transp_mask; + layer->bg_color = layer32->bg_color; + layer->blend_op = layer32->blend_op; + layer->alpha = layer32->alpha; + layer->color_space = layer32->color_space; + layer->src_rect = layer32->src_rect; + layer->dst_rect = layer32->dst_rect; + layer->buffer = layer32->buffer; + memcpy(&layer->reserved, &layer32->reserved, + sizeof(layer->reserved)); + + layer->scale = NULL; + ret = __copy_scale_params(layer, layer32); + if (ret) + break; + + layer->pp_info = NULL; + ret = __copy_layer_pp_info_params(layer, layer32); + if (ret) + break; + } + + if (ret) { + for (i--; i >= 0; i--) { + kfree(layer_list[i].scale); + mdss_mdp_free_layer_pp_info(&layer_list[i]); + } + kfree(layer_list); + layer_list = ERR_PTR(ret); + } + +end: + return layer_list; +} + +static int __copy_to_user_atomic_commit(struct mdp_layer_commit *commit, + struct mdp_layer_commit32 *commit32, + struct mdp_input_layer32 *layer_list32, + unsigned long argp, u32 layer_count) +{ + int i, ret; + struct mdp_input_layer *layer_list; + + layer_list = commit->commit_v1.input_layers; + + for (i = 0; i < layer_count; i++) + layer_list32[i].error_code = layer_list[i].error_code; + + ret = copy_to_user(compat_ptr(commit32->commit_v1.input_layers), + layer_list32, + sizeof(struct mdp_input_layer32) * layer_count); + if (ret) + goto end; + + ret = copy_to_user(compat_ptr(commit32->commit_v1.output_layer), + commit->commit_v1.output_layer, + sizeof(struct mdp_output_layer)); + if (ret) + goto end; + + commit32->commit_v1.release_fence = + commit->commit_v1.release_fence; + commit32->commit_v1.retire_fence = + commit->commit_v1.retire_fence; + + ret = copy_to_user((void __user *)argp, commit32, + sizeof(struct mdp_layer_commit32)); + +end: + return ret; +} + +static int __compat_atomic_commit(struct fb_info *info, unsigned int cmd, + unsigned long argp, struct file *file) +{ + int ret, i; + struct mdp_layer_commit commit; + struct mdp_layer_commit32 commit32; + u32 layer_count; + struct mdp_input_layer *layer_list = NULL; + struct mdp_input_layer32 *layer_list32 = NULL; + struct mdp_output_layer *output_layer = NULL; + + /* copy top level memory from 32 bit structure to kernel memory */ + ret = copy_from_user(&commit32, (void __user *)argp, + sizeof(struct mdp_layer_commit32)); + if (ret) { + pr_err("%s:copy_from_user failed, ptr %pK\n", __func__, + (void __user *)argp); + ret = -EFAULT; + return ret; + } + + memset(&commit, 0, sizeof(struct mdp_layer_commit)); + __copy_atomic_commit_struct(&commit, &commit32); + + if (commit32.commit_v1.output_layer) { + int buffer_size = sizeof(struct mdp_output_layer); + + output_layer = kzalloc(buffer_size, GFP_KERNEL); + if (!output_layer) + return -ENOMEM; + ret = copy_from_user(output_layer, + compat_ptr(commit32.commit_v1.output_layer), + buffer_size); + if (ret) { + pr_err("fail to copy output layer from user, ptr %pK\n", + compat_ptr(commit32.commit_v1.output_layer)); + ret = -EFAULT; + goto layer_list_err; + } + + commit.commit_v1.output_layer = output_layer; + } + + layer_count = commit32.commit_v1.input_layer_cnt; + if (layer_count > MAX_LAYER_COUNT) { + ret = -EINVAL; + goto layer_list_err; + } else if (layer_count) { + /* + * allocate memory for layer list in 32bit domain and copy it + * from user + */ + layer_list32 = __create_layer_list32(&commit32, layer_count); + if (IS_ERR_OR_NULL(layer_list32)) { + ret = PTR_ERR(layer_list32); + goto layer_list_err; + } + + /* + * allocate memory for layer list in kernel memory domain and + * copy layer info from 32bit structures to kernel memory + */ + layer_list = __create_layer_list(&commit, layer_list32, + layer_count); + if (IS_ERR_OR_NULL(layer_list)) { + ret = PTR_ERR(layer_list); + goto layer_list_err; + } + } + + ret = mdss_fb_atomic_commit(info, &commit, file); + if (ret) + pr_err("atomic commit failed ret:%d\n", ret); + + if (layer_count) + __copy_to_user_atomic_commit(&commit, &commit32, layer_list32, + argp, layer_count); + + for (i = 0; i < layer_count; i++) { + kfree(layer_list[i].scale); + mdss_mdp_free_layer_pp_info(&layer_list[i]); + } + kfree(layer_list); +layer_list_err: + kfree(layer_list32); + kfree(output_layer); + return ret; +} + +static int __copy_to_user_async_position_update( + struct mdp_position_update *update_pos, + struct mdp_position_update32 *update_pos32, + unsigned long argp, u32 layer_cnt) +{ + int ret; + + ret = copy_to_user(update_pos32->input_layers, + update_pos->input_layers, + sizeof(struct mdp_async_layer) * layer_cnt); + if (ret) + goto end; + + ret = copy_to_user((void __user *) argp, update_pos32, + sizeof(struct mdp_position_update32)); + +end: + return ret; +} + +static struct mdp_async_layer *__create_async_layer_list( + struct mdp_position_update32 *update_pos32, u32 layer_cnt) +{ + u32 buffer_size; + struct mdp_async_layer *layer_list; + int ret; + + buffer_size = sizeof(struct mdp_async_layer) * layer_cnt; + + layer_list = kmalloc(buffer_size, GFP_KERNEL); + if (!layer_list) { + layer_list = ERR_PTR(-ENOMEM); + goto end; + } + + ret = copy_from_user(layer_list, + update_pos32->input_layers, buffer_size); + if (ret) { + pr_err("layer list32 copy from user failed\n"); + kfree(layer_list); + layer_list = ERR_PTR(ret); + } + +end: + return layer_list; +} + +static int __compat_async_position_update(struct fb_info *info, + unsigned int cmd, unsigned long argp) +{ + struct mdp_position_update update_pos; + struct mdp_position_update32 update_pos32; + struct mdp_async_layer *layer_list = NULL; + u32 layer_cnt, ret; + + /* copy top level memory from 32 bit structure to kernel memory */ + ret = copy_from_user(&update_pos32, (void __user *)argp, + sizeof(struct mdp_position_update32)); + if (ret) { + pr_err("%s:copy_from_user failed\n", __func__); + return ret; + } + + update_pos.input_layer_cnt = update_pos32.input_layer_cnt; + layer_cnt = update_pos32.input_layer_cnt; + if ((!layer_cnt) || (layer_cnt > MAX_LAYER_COUNT)) { + pr_err("invalid async layers :%d to update\n", layer_cnt); + return -EINVAL; + } + + layer_list = __create_async_layer_list(&update_pos32, + layer_cnt); + if (IS_ERR_OR_NULL(layer_list)) + return PTR_ERR(layer_list); + + update_pos.input_layers = layer_list; + + ret = mdss_fb_async_position_update(info, &update_pos); + if (ret) + pr_err("async position update failed ret:%d\n", ret); + + ret = __copy_to_user_async_position_update(&update_pos, &update_pos32, + argp, layer_cnt); + if (ret) + pr_err("copy to user of async update position failed\n"); + + kfree(layer_list); + return ret; +} + +static int mdss_fb_compat_buf_sync(struct fb_info *info, unsigned int cmd, + unsigned long arg, struct file *file) +{ + struct mdp_buf_sync32 __user *buf_sync32; + struct mdp_buf_sync __user *buf_sync; + u32 data; + int ret; + + buf_sync = compat_alloc_user_space(sizeof(*buf_sync)); + if (!buf_sync) { + pr_err("%s:%u: compat alloc error [%zu] bytes\n", + __func__, __LINE__, sizeof(*buf_sync)); + return -EINVAL; + } + buf_sync32 = compat_ptr(arg); + + if (copy_in_user(&buf_sync->flags, &buf_sync32->flags, + 3 * sizeof(u32))) + return -EFAULT; + + if (get_user(data, &buf_sync32->acq_fen_fd) || + put_user(compat_ptr(data), &buf_sync->acq_fen_fd) || + get_user(data, &buf_sync32->rel_fen_fd) || + put_user(compat_ptr(data), &buf_sync->rel_fen_fd) || + get_user(data, &buf_sync32->retire_fen_fd) || + put_user(compat_ptr(data), &buf_sync->retire_fen_fd)) + return -EFAULT; + + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) buf_sync, file); + if (ret) { + pr_err("%s: failed %d\n", __func__, ret); + return ret; + } + + if (copy_in_user(compat_ptr(buf_sync32->rel_fen_fd), + buf_sync->rel_fen_fd, + sizeof(int))) + return -EFAULT; + if (copy_in_user(compat_ptr(buf_sync32->retire_fen_fd), + buf_sync->retire_fen_fd, + sizeof(int))) { + if (buf_sync->flags & MDP_BUF_SYNC_FLAG_RETIRE_FENCE) + return -EFAULT; + pr_debug("%s: no retire fence fd for wb\n", + __func__); + } + + return ret; +} + +static int __from_user_fb_cmap(struct fb_cmap __user *cmap, + struct fb_cmap32 __user *cmap32) +{ + __u32 data; + + if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32))) + return -EFAULT; + + if (get_user(data, &cmap32->red) || + put_user(compat_ptr(data), &cmap->red) || + get_user(data, &cmap32->green) || + put_user(compat_ptr(data), &cmap->green) || + get_user(data, &cmap32->blue) || + put_user(compat_ptr(data), &cmap->blue) || + get_user(data, &cmap32->transp) || + put_user(compat_ptr(data), &cmap->transp)) + return -EFAULT; + + return 0; +} + +static int __to_user_fb_cmap(struct fb_cmap __user *cmap, + struct fb_cmap32 __user *cmap32) +{ + unsigned long data; + + if (copy_in_user(&cmap32->start, &cmap->start, 2 * sizeof(__u32))) + return -EFAULT; + + if (get_user(data, (unsigned long *) &cmap->red) || + put_user((compat_caddr_t) data, &cmap32->red) || + get_user(data, (unsigned long *) &cmap->green) || + put_user((compat_caddr_t) data, &cmap32->green) || + get_user(data, (unsigned long *) &cmap->blue) || + put_user((compat_caddr_t) data, &cmap32->blue) || + get_user(data, (unsigned long *) &cmap->transp) || + put_user((compat_caddr_t) data, &cmap32->transp)) + return -EFAULT; + + return 0; +} + +static int __from_user_fb_image(struct fb_image __user *image, + struct fb_image32 __user *image32) +{ + __u32 data; + + if (copy_in_user(&image->dx, &image32->dx, 6 * sizeof(u32)) || + copy_in_user(&image->depth, &image32->depth, sizeof(u8))) + return -EFAULT; + + if (get_user(data, &image32->data) || + put_user(compat_ptr(data), &image->data)) + return -EFAULT; + + if (__from_user_fb_cmap(&image->cmap, &image32->cmap)) + return -EFAULT; + + return 0; +} + +static int mdss_fb_compat_cursor(struct fb_info *info, unsigned int cmd, + unsigned long arg, struct file *file) +{ + struct fb_cursor32 __user *cursor32; + struct fb_cursor __user *cursor; + __u32 data; + int ret; + + cursor = compat_alloc_user_space(sizeof(*cursor)); + if (!cursor) { + pr_err("%s:%u: compat alloc error [%zu] bytes\n", + __func__, __LINE__, sizeof(*cursor)); + return -EINVAL; + } + cursor32 = compat_ptr(arg); + + if (copy_in_user(&cursor->set, &cursor32->set, 3 * sizeof(u16))) + return -EFAULT; + + if (get_user(data, &cursor32->mask) || + put_user(compat_ptr(data), &cursor->mask)) + return -EFAULT; + + if (copy_in_user(&cursor->hot, &cursor32->hot, sizeof(struct fbcurpos))) + return -EFAULT; + + if (__from_user_fb_image(&cursor->image, &cursor32->image)) + return -EFAULT; + + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) cursor, file); + return ret; +} + +static int mdss_fb_compat_set_lut(struct fb_info *info, unsigned long arg, + struct file *file) +{ + struct fb_cmap_user __user *cmap; + struct fb_cmap32 __user *cmap32; + __u32 data; + int ret; + + cmap = compat_alloc_user_space(sizeof(*cmap)); + cmap32 = compat_ptr(arg); + + if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32))) + return -EFAULT; + + if (get_user(data, &cmap32->red) || + put_user(compat_ptr(data), &cmap->red) || + get_user(data, &cmap32->green) || + put_user(compat_ptr(data), &cmap->green) || + get_user(data, &cmap32->blue) || + put_user(compat_ptr(data), &cmap->blue) || + get_user(data, &cmap32->transp) || + put_user(compat_ptr(data), &cmap->transp)) + return -EFAULT; + + ret = mdss_fb_do_ioctl(info, MSMFB_SET_LUT, (unsigned long) cmap, file); + if (!ret) + pr_debug("%s: compat ioctl successful\n", __func__); + + return ret; +} + +static int __from_user_sharp_cfg( + struct mdp_sharp_cfg32 __user *sharp_cfg32, + struct mdp_sharp_cfg __user *sharp_cfg) +{ + if (copy_in_user(&sharp_cfg->flags, + &sharp_cfg32->flags, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg->strength, + &sharp_cfg32->strength, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg->edge_thr, + &sharp_cfg32->edge_thr, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg->smooth_thr, + &sharp_cfg32->smooth_thr, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg->noise_thr, + &sharp_cfg32->noise_thr, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_sharp_cfg( + struct mdp_sharp_cfg32 __user *sharp_cfg32, + struct mdp_sharp_cfg __user *sharp_cfg) +{ + if (copy_in_user(&sharp_cfg32->flags, + &sharp_cfg->flags, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg32->strength, + &sharp_cfg->strength, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg32->edge_thr, + &sharp_cfg->edge_thr, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg32->smooth_thr, + &sharp_cfg->smooth_thr, + sizeof(uint32_t)) || + copy_in_user(&sharp_cfg32->noise_thr, + &sharp_cfg->noise_thr, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_histogram_cfg( + struct mdp_histogram_cfg32 __user *hist_cfg32, + struct mdp_histogram_cfg __user *hist_cfg) +{ + if (copy_in_user(&hist_cfg->ops, + &hist_cfg32->ops, + sizeof(uint32_t)) || + copy_in_user(&hist_cfg->block, + &hist_cfg32->block, + sizeof(uint32_t)) || + copy_in_user(&hist_cfg->frame_cnt, + &hist_cfg32->frame_cnt, + sizeof(uint8_t)) || + copy_in_user(&hist_cfg->bit_mask, + &hist_cfg32->bit_mask, + sizeof(uint8_t)) || + copy_in_user(&hist_cfg->num_bins, + &hist_cfg32->num_bins, + sizeof(uint16_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_histogram_cfg( + struct mdp_histogram_cfg32 __user *hist_cfg32, + struct mdp_histogram_cfg __user *hist_cfg) +{ + if (copy_in_user(&hist_cfg32->ops, + &hist_cfg->ops, + sizeof(uint32_t)) || + copy_in_user(&hist_cfg32->block, + &hist_cfg->block, + sizeof(uint32_t)) || + copy_in_user(&hist_cfg32->frame_cnt, + &hist_cfg->frame_cnt, + sizeof(uint8_t)) || + copy_in_user(&hist_cfg32->bit_mask, + &hist_cfg->bit_mask, + sizeof(uint8_t)) || + copy_in_user(&hist_cfg32->num_bins, + &hist_cfg->num_bins, + sizeof(uint16_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_pcc_coeff( + struct mdp_pcc_coeff32 __user *pcc_coeff32, + struct mdp_pcc_coeff __user *pcc_coeff) +{ + if (copy_in_user(&pcc_coeff->c, + &pcc_coeff32->c, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->r, + &pcc_coeff32->r, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->g, + &pcc_coeff32->g, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->b, + &pcc_coeff32->b, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->rr, + &pcc_coeff32->rr, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->gg, + &pcc_coeff32->gg, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->bb, + &pcc_coeff32->bb, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->rg, + &pcc_coeff32->rg, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->gb, + &pcc_coeff32->gb, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->rb, + &pcc_coeff32->rb, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->rgb_0, + &pcc_coeff32->rgb_0, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff->rgb_1, + &pcc_coeff32->rgb_1, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_pcc_coeff( + struct mdp_pcc_coeff32 __user *pcc_coeff32, + struct mdp_pcc_coeff __user *pcc_coeff) +{ + if (copy_in_user(&pcc_coeff32->c, + &pcc_coeff->c, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->r, + &pcc_coeff->r, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->g, + &pcc_coeff->g, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->b, + &pcc_coeff->b, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->rr, + &pcc_coeff->rr, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->gg, + &pcc_coeff->gg, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->bb, + &pcc_coeff->bb, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->rg, + &pcc_coeff->rg, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->gb, + &pcc_coeff->gb, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->rb, + &pcc_coeff->rb, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->rgb_0, + &pcc_coeff->rgb_0, + sizeof(uint32_t)) || + copy_in_user(&pcc_coeff32->rgb_1, + &pcc_coeff->rgb_1, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_pcc_coeff_v17( + struct mdp_pcc_cfg_data32 __user *pcc_cfg32, + struct mdp_pcc_cfg_data __user *pcc_cfg) +{ + struct mdp_pcc_data_v1_7_32 pcc_cfg_payload32; + struct mdp_pcc_data_v1_7 pcc_cfg_payload; + + if (copy_from_user(&pcc_cfg_payload32, + compat_ptr(pcc_cfg32->cfg_payload), + sizeof(struct mdp_pcc_data_v1_7_32))) { + pr_err("failed to copy payload for pcc from user\n"); + return -EFAULT; + } + + memset(&pcc_cfg_payload, 0, sizeof(pcc_cfg_payload)); + pcc_cfg_payload.r.b = pcc_cfg_payload32.r.b; + pcc_cfg_payload.r.g = pcc_cfg_payload32.r.g; + pcc_cfg_payload.r.c = pcc_cfg_payload32.r.c; + pcc_cfg_payload.r.r = pcc_cfg_payload32.r.r; + pcc_cfg_payload.r.gb = pcc_cfg_payload32.r.gb; + pcc_cfg_payload.r.rb = pcc_cfg_payload32.r.rb; + pcc_cfg_payload.r.rg = pcc_cfg_payload32.r.rg; + pcc_cfg_payload.r.rgb = pcc_cfg_payload32.r.rgb; + + pcc_cfg_payload.g.b = pcc_cfg_payload32.g.b; + pcc_cfg_payload.g.g = pcc_cfg_payload32.g.g; + pcc_cfg_payload.g.c = pcc_cfg_payload32.g.c; + pcc_cfg_payload.g.r = pcc_cfg_payload32.g.r; + pcc_cfg_payload.g.gb = pcc_cfg_payload32.g.gb; + pcc_cfg_payload.g.rb = pcc_cfg_payload32.g.rb; + pcc_cfg_payload.g.rg = pcc_cfg_payload32.g.rg; + pcc_cfg_payload.g.rgb = pcc_cfg_payload32.g.rgb; + + pcc_cfg_payload.b.b = pcc_cfg_payload32.b.b; + pcc_cfg_payload.b.g = pcc_cfg_payload32.b.g; + pcc_cfg_payload.b.c = pcc_cfg_payload32.b.c; + pcc_cfg_payload.b.r = pcc_cfg_payload32.b.r; + pcc_cfg_payload.b.gb = pcc_cfg_payload32.b.gb; + pcc_cfg_payload.b.rb = pcc_cfg_payload32.b.rb; + pcc_cfg_payload.b.rg = pcc_cfg_payload32.b.rg; + pcc_cfg_payload.b.rgb = pcc_cfg_payload32.b.rgb; + + if (copy_to_user(pcc_cfg->cfg_payload, &pcc_cfg_payload, + sizeof(pcc_cfg_payload))) { + pr_err("failed to copy payload for pcc to user\n"); + return -EFAULT; + } + return 0; +} + +static int __from_user_pcc_cfg_data( + struct mdp_pcc_cfg_data32 __user *pcc_cfg32, + struct mdp_pcc_cfg_data __user *pcc_cfg) +{ + u32 version; + + if (copy_in_user(&pcc_cfg->block, + &pcc_cfg32->block, + sizeof(uint32_t)) || + copy_in_user(&pcc_cfg->ops, + &pcc_cfg32->ops, + sizeof(uint32_t)) || + copy_in_user(&pcc_cfg->version, + &pcc_cfg32->version, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_from_user(&version, &pcc_cfg32->version, sizeof(u32))) { + pr_err("failed to copy version for pcc\n"); + return -EFAULT; + } + + switch (version) { + case mdp_pcc_v1_7: + if (__from_user_pcc_coeff_v17(pcc_cfg32, pcc_cfg)) { + pr_err("failed to copy pcc v17 data\n"); + return -EFAULT; + } + break; + default: + pr_debug("pcc version %d not supported use legacy\n", version); + if (__from_user_pcc_coeff( + compat_ptr((uintptr_t)&pcc_cfg32->r), + &pcc_cfg->r) || + __from_user_pcc_coeff( + compat_ptr((uintptr_t)&pcc_cfg32->g), + &pcc_cfg->g) || + __from_user_pcc_coeff( + compat_ptr((uintptr_t)&pcc_cfg32->b), + &pcc_cfg->b)) + return -EFAULT; + break; + } + return 0; +} + +static int __to_user_pcc_coeff_v1_7( + struct mdp_pcc_cfg_data32 __user *pcc_cfg32, + struct mdp_pcc_cfg_data __user *pcc_cfg) +{ + struct mdp_pcc_data_v1_7_32 pcc_cfg_payload32; + struct mdp_pcc_data_v1_7 pcc_cfg_payload; + + memset(&pcc_cfg_payload32, 0, sizeof(pcc_cfg_payload32)); + if (copy_from_user(&pcc_cfg_payload, + pcc_cfg->cfg_payload, + sizeof(struct mdp_pcc_data_v1_7))) { + pr_err("failed to copy payload for pcc from user\n"); + return -EFAULT; + } + + pcc_cfg_payload32.r.b = pcc_cfg_payload.r.b; + pcc_cfg_payload32.r.g = pcc_cfg_payload.r.g; + pcc_cfg_payload32.r.c = pcc_cfg_payload.r.c; + pcc_cfg_payload32.r.r = pcc_cfg_payload.r.r; + pcc_cfg_payload32.r.gb = pcc_cfg_payload.r.gb; + pcc_cfg_payload32.r.rb = pcc_cfg_payload.r.rb; + pcc_cfg_payload32.r.rg = pcc_cfg_payload.r.rg; + pcc_cfg_payload32.r.rgb = pcc_cfg_payload.r.rgb; + + pcc_cfg_payload32.g.b = pcc_cfg_payload.g.b; + pcc_cfg_payload32.g.g = pcc_cfg_payload.g.g; + pcc_cfg_payload32.g.c = pcc_cfg_payload.g.c; + pcc_cfg_payload32.g.r = pcc_cfg_payload.g.r; + pcc_cfg_payload32.g.gb = pcc_cfg_payload.g.gb; + pcc_cfg_payload32.g.rb = pcc_cfg_payload.g.rb; + pcc_cfg_payload32.g.rg = pcc_cfg_payload.g.rg; + pcc_cfg_payload32.g.rgb = pcc_cfg_payload.g.rgb; + + pcc_cfg_payload32.b.b = pcc_cfg_payload.b.b; + pcc_cfg_payload32.b.g = pcc_cfg_payload.b.g; + pcc_cfg_payload32.b.c = pcc_cfg_payload.b.c; + pcc_cfg_payload32.b.r = pcc_cfg_payload.b.r; + pcc_cfg_payload32.b.gb = pcc_cfg_payload.b.gb; + pcc_cfg_payload32.b.rb = pcc_cfg_payload.b.rb; + pcc_cfg_payload32.b.rg = pcc_cfg_payload.b.rg; + pcc_cfg_payload32.b.rgb = pcc_cfg_payload.b.rgb; + + if (copy_to_user(compat_ptr(pcc_cfg32->cfg_payload), + &pcc_cfg_payload32, + sizeof(pcc_cfg_payload32))) { + pr_err("failed to copy payload for pcc to user\n"); + return -EFAULT; + } + + return 0; +} + + +static int __to_user_pcc_cfg_data( + struct mdp_pcc_cfg_data32 __user *pcc_cfg32, + struct mdp_pcc_cfg_data __user *pcc_cfg) +{ + u32 version; + u32 ops; + + if (copy_from_user(&ops, &pcc_cfg->ops, sizeof(u32))) { + pr_err("failed to copy op for pcc\n"); + return -EFAULT; + } + + if (!(ops & MDP_PP_OPS_READ)) { + pr_debug("Read op is not set. Skipping compat copyback\n"); + return 0; + } + + if (copy_from_user(&version, &pcc_cfg->version, sizeof(u32))) { + pr_err("failed to copy version for pcc\n"); + return -EFAULT; + } + + switch (version) { + case mdp_pcc_v1_7: + if (__to_user_pcc_coeff_v1_7(pcc_cfg32, pcc_cfg)) { + pr_err("failed to copy pcc v1_7 data\n"); + return -EFAULT; + } + break; + default: + pr_debug("version invalid, fallback to legacy\n"); + + if (__to_user_pcc_coeff( + compat_ptr((uintptr_t)&pcc_cfg32->r), + &pcc_cfg->r) || + __to_user_pcc_coeff( + compat_ptr((uintptr_t)&pcc_cfg32->g), + &pcc_cfg->g) || + __to_user_pcc_coeff( + compat_ptr((uintptr_t)&pcc_cfg32->b), + &pcc_cfg->b)) + return -EFAULT; + break; + } + + return 0; +} + +static int __from_user_csc_cfg( + struct mdp_csc_cfg32 __user *csc_data32, + struct mdp_csc_cfg __user *csc_data) +{ + if (copy_in_user(&csc_data->flags, + &csc_data32->flags, + sizeof(uint32_t)) || + copy_in_user(&csc_data->csc_mv[0], + &csc_data32->csc_mv[0], + 9 * sizeof(uint32_t)) || + copy_in_user(&csc_data->csc_pre_bv[0], + &csc_data32->csc_pre_bv[0], + 3 * sizeof(uint32_t)) || + copy_in_user(&csc_data->csc_post_bv[0], + &csc_data32->csc_post_bv[0], + 3 * sizeof(uint32_t)) || + copy_in_user(&csc_data->csc_pre_lv[0], + &csc_data32->csc_pre_lv[0], + 6 * sizeof(uint32_t)) || + copy_in_user(&csc_data->csc_post_lv[0], + &csc_data32->csc_post_lv[0], + 6 * sizeof(uint32_t))) + return -EFAULT; + + return 0; +} +static int __to_user_csc_cfg( + struct mdp_csc_cfg32 __user *csc_data32, + struct mdp_csc_cfg __user *csc_data) +{ + if (copy_in_user(&csc_data32->flags, + &csc_data->flags, + sizeof(uint32_t)) || + copy_in_user(&csc_data32->csc_mv[0], + &csc_data->csc_mv[0], + 9 * sizeof(uint32_t)) || + copy_in_user(&csc_data32->csc_pre_bv[0], + &csc_data->csc_pre_bv[0], + 3 * sizeof(uint32_t)) || + copy_in_user(&csc_data32->csc_post_bv[0], + &csc_data->csc_post_bv[0], + 3 * sizeof(uint32_t)) || + copy_in_user(&csc_data32->csc_pre_lv[0], + &csc_data->csc_pre_lv[0], + 6 * sizeof(uint32_t)) || + copy_in_user(&csc_data32->csc_post_lv[0], + &csc_data->csc_post_lv[0], + 6 * sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_csc_cfg_data( + struct mdp_csc_cfg_data32 __user *csc_cfg32, + struct mdp_csc_cfg_data __user *csc_cfg) +{ + if (copy_in_user(&csc_cfg->block, + &csc_cfg32->block, + sizeof(uint32_t))) + return -EFAULT; + + if (__from_user_csc_cfg( + compat_ptr((uintptr_t)&csc_cfg32->csc_data), + &csc_cfg->csc_data)) + return -EFAULT; + + return 0; +} + +static int __to_user_csc_cfg_data( + struct mdp_csc_cfg_data32 __user *csc_cfg32, + struct mdp_csc_cfg_data __user *csc_cfg) +{ + if (copy_in_user(&csc_cfg32->block, + &csc_cfg->block, + sizeof(uint32_t))) + return -EFAULT; + + if (__to_user_csc_cfg( + compat_ptr((uintptr_t)&csc_cfg32->csc_data), + &csc_cfg->csc_data)) + return -EFAULT; + + return 0; +} + +static int __from_user_igc_lut_data_v17( + struct mdp_igc_lut_data32 __user *igc_lut32, + struct mdp_igc_lut_data __user *igc_lut) +{ + struct mdp_igc_lut_data_v1_7_32 igc_cfg_payload_32; + struct mdp_igc_lut_data_v1_7 igc_cfg_payload; + + if (copy_from_user(&igc_cfg_payload_32, + compat_ptr(igc_lut32->cfg_payload), + sizeof(igc_cfg_payload_32))) { + pr_err("failed to copy payload from user for igc\n"); + return -EFAULT; + } + + memset(&igc_cfg_payload, 0, sizeof(igc_cfg_payload)); + igc_cfg_payload.c0_c1_data = compat_ptr(igc_cfg_payload_32.c0_c1_data); + igc_cfg_payload.c2_data = compat_ptr(igc_cfg_payload_32.c2_data); + igc_cfg_payload.len = igc_cfg_payload_32.len; + igc_cfg_payload.table_fmt = igc_cfg_payload_32.table_fmt; + if (copy_to_user(igc_lut->cfg_payload, &igc_cfg_payload, + sizeof(igc_cfg_payload))) { + pr_err("failed to copy payload to user for igc\n"); + return -EFAULT; + } + return 0; +} + +static int __from_user_igc_lut_data( + struct mdp_igc_lut_data32 __user *igc_lut32, + struct mdp_igc_lut_data __user *igc_lut) +{ + uint32_t data; + uint32_t version = mdp_igc_vmax; + int ret = 0; + + if (copy_in_user(&igc_lut->block, + &igc_lut32->block, + sizeof(uint32_t)) || + copy_in_user(&igc_lut->len, + &igc_lut32->len, + sizeof(uint32_t)) || + copy_in_user(&igc_lut->ops, + &igc_lut32->ops, + sizeof(uint32_t)) || + copy_in_user(&igc_lut->version, + &igc_lut32->version, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(version, &igc_lut32->version)) { + pr_err("failed to copy the version for IGC\n"); + return -EFAULT; + } + + switch (version) { + case mdp_igc_v1_7: + ret = __from_user_igc_lut_data_v17(igc_lut32, igc_lut); + if (ret) + pr_err("failed to copy payload for igc version %d ret %d\n", + version, ret); + break; + default: + pr_debug("version not supported fallback to legacy %d\n", + version); + if (get_user(data, &igc_lut32->c0_c1_data) || + put_user(compat_ptr(data), &igc_lut->c0_c1_data) || + get_user(data, &igc_lut32->c2_data) || + put_user(compat_ptr(data), &igc_lut->c2_data)) + return -EFAULT; + break; + } + return ret; +} + +static int __to_user_igc_lut_data( + struct mdp_igc_lut_data32 __user *igc_lut32, + struct mdp_igc_lut_data __user *igc_lut) +{ + unsigned long data; + + if (copy_in_user(&igc_lut32->block, + &igc_lut->block, + sizeof(uint32_t)) || + copy_in_user(&igc_lut32->len, + &igc_lut->len, + sizeof(uint32_t)) || + copy_in_user(&igc_lut32->ops, + &igc_lut->ops, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, (unsigned long *) &igc_lut->c0_c1_data) || + put_user((compat_caddr_t) data, &igc_lut32->c0_c1_data) || + get_user(data, (unsigned long *) &igc_lut->c2_data) || + put_user((compat_caddr_t) data, &igc_lut32->c2_data)) + return -EFAULT; + + return 0; +} + +static int __from_user_ar_gc_lut_data( + struct mdp_ar_gc_lut_data32 __user *ar_gc_data32, + struct mdp_ar_gc_lut_data __user *ar_gc_data) +{ + if (copy_in_user(&ar_gc_data->x_start, + &ar_gc_data32->x_start, + sizeof(uint32_t)) || + copy_in_user(&ar_gc_data->slope, + &ar_gc_data32->slope, + sizeof(uint32_t)) || + copy_in_user(&ar_gc_data->offset, + &ar_gc_data32->offset, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_ar_gc_lut_data( + struct mdp_ar_gc_lut_data32 __user *ar_gc_data32, + struct mdp_ar_gc_lut_data __user *ar_gc_data) +{ + if (copy_in_user(&ar_gc_data32->x_start, + &ar_gc_data->x_start, + sizeof(uint32_t)) || + copy_in_user(&ar_gc_data32->slope, + &ar_gc_data->slope, + sizeof(uint32_t)) || + copy_in_user(&ar_gc_data32->offset, + &ar_gc_data->offset, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + + +static int __from_user_pgc_lut_data_v1_7( + struct mdp_pgc_lut_data32 __user *pgc_lut32, + struct mdp_pgc_lut_data __user *pgc_lut) +{ + struct mdp_pgc_lut_data_v1_7_32 pgc_cfg_payload_32; + struct mdp_pgc_lut_data_v1_7 pgc_cfg_payload; + + if (copy_from_user(&pgc_cfg_payload_32, + compat_ptr(pgc_lut32->cfg_payload), + sizeof(pgc_cfg_payload_32))) { + pr_err("failed to copy from user the pgc32 payload\n"); + return -EFAULT; + } + memset(&pgc_cfg_payload, 0, sizeof(pgc_cfg_payload)); + pgc_cfg_payload.c0_data = compat_ptr(pgc_cfg_payload_32.c0_data); + pgc_cfg_payload.c1_data = compat_ptr(pgc_cfg_payload_32.c1_data); + pgc_cfg_payload.c2_data = compat_ptr(pgc_cfg_payload_32.c2_data); + pgc_cfg_payload.len = pgc_cfg_payload_32.len; + if (copy_to_user(pgc_lut->cfg_payload, &pgc_cfg_payload, + sizeof(pgc_cfg_payload))) { + pr_err("failed to copy to user pgc payload\n"); + return -EFAULT; + } + return 0; +} + +static int __from_user_pgc_lut_data_legacy( + struct mdp_pgc_lut_data32 __user *pgc_lut32, + struct mdp_pgc_lut_data __user *pgc_lut) +{ + struct mdp_ar_gc_lut_data32 __user *r_data_temp32; + struct mdp_ar_gc_lut_data32 __user *g_data_temp32; + struct mdp_ar_gc_lut_data32 __user *b_data_temp32; + struct mdp_ar_gc_lut_data __user *r_data_temp; + struct mdp_ar_gc_lut_data __user *g_data_temp; + struct mdp_ar_gc_lut_data __user *b_data_temp; + uint8_t num_r_stages, num_g_stages, num_b_stages; + int i; + + if (copy_from_user(&num_r_stages, + &pgc_lut32->num_r_stages, + sizeof(uint8_t)) || + copy_from_user(&num_g_stages, + &pgc_lut32->num_g_stages, + sizeof(uint8_t)) || + copy_from_user(&num_b_stages, + &pgc_lut32->num_b_stages, + sizeof(uint8_t))) + return -EFAULT; + + if (num_r_stages > GC_LUT_SEGMENTS || num_b_stages > GC_LUT_SEGMENTS + || num_g_stages > GC_LUT_SEGMENTS || !num_r_stages || !num_b_stages + || !num_g_stages) { + pr_err("invalid number of stages r_stages %d b_stages %d g_stages %d\n", + num_r_stages, num_b_stages, num_g_stages); + return -EFAULT; + } + + r_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->r_data); + r_data_temp = pgc_lut->r_data; + + for (i = 0; i < num_r_stages; i++) { + if (__from_user_ar_gc_lut_data( + &r_data_temp32[i], + &r_data_temp[i])) + return -EFAULT; + } + + g_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->g_data); + g_data_temp = pgc_lut->g_data; + + for (i = 0; i < num_g_stages; i++) { + if (__from_user_ar_gc_lut_data( + &g_data_temp32[i], + &g_data_temp[i])) + return -EFAULT; + } + + b_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->b_data); + b_data_temp = pgc_lut->b_data; + + for (i = 0; i < num_b_stages; i++) { + if (__from_user_ar_gc_lut_data( + &b_data_temp32[i], + &b_data_temp[i])) + return -EFAULT; + } + return 0; +} + +static int __from_user_pgc_lut_data( + struct mdp_pgc_lut_data32 __user *pgc_lut32, + struct mdp_pgc_lut_data __user *pgc_lut) +{ + u32 version = mdp_pgc_vmax; + int ret = 0; + + if (copy_in_user(&pgc_lut->block, + &pgc_lut32->block, + sizeof(uint32_t)) || + copy_in_user(&pgc_lut->flags, + &pgc_lut32->flags, + sizeof(uint32_t)) || + copy_in_user(&pgc_lut->num_r_stages, + &pgc_lut32->num_r_stages, + sizeof(uint8_t)) || + copy_in_user(&pgc_lut->num_g_stages, + &pgc_lut32->num_g_stages, + sizeof(uint8_t)) || + copy_in_user(&pgc_lut->num_b_stages, + &pgc_lut32->num_b_stages, + sizeof(uint8_t)) || + copy_in_user(&pgc_lut->version, + &pgc_lut32->version, + sizeof(uint32_t))) + return -EFAULT; + if (copy_from_user(&version, &pgc_lut32->version, sizeof(u32))) { + pr_err("version copying failed\n"); + return -EFAULT; + } + switch (version) { + case mdp_pgc_v1_7: + ret = __from_user_pgc_lut_data_v1_7(pgc_lut32, pgc_lut); + if (ret) + pr_err("failed to copy pgc v17\n"); + break; + default: + pr_debug("version %d not supported fallback to legacy\n", + version); + ret = __from_user_pgc_lut_data_legacy(pgc_lut32, pgc_lut); + if (ret) + pr_err("copy from user pgc lut legacy failed ret %d\n", + ret); + break; + } + return ret; +} + +static int __to_user_pgc_lut_data( + struct mdp_pgc_lut_data32 __user *pgc_lut32, + struct mdp_pgc_lut_data __user *pgc_lut) +{ + struct mdp_ar_gc_lut_data32 __user *r_data_temp32; + struct mdp_ar_gc_lut_data32 __user *g_data_temp32; + struct mdp_ar_gc_lut_data32 __user *b_data_temp32; + struct mdp_ar_gc_lut_data __user *r_data_temp; + struct mdp_ar_gc_lut_data __user *g_data_temp; + struct mdp_ar_gc_lut_data __user *b_data_temp; + uint8_t num_r_stages, num_g_stages, num_b_stages; + int i; + + if (copy_in_user(&pgc_lut32->block, + &pgc_lut->block, + sizeof(uint32_t)) || + copy_in_user(&pgc_lut32->flags, + &pgc_lut->flags, + sizeof(uint32_t)) || + copy_in_user(&pgc_lut32->num_r_stages, + &pgc_lut->num_r_stages, + sizeof(uint8_t)) || + copy_in_user(&pgc_lut32->num_g_stages, + &pgc_lut->num_g_stages, + sizeof(uint8_t)) || + copy_in_user(&pgc_lut32->num_b_stages, + &pgc_lut->num_b_stages, + sizeof(uint8_t))) + return -EFAULT; + + if (copy_from_user(&num_r_stages, + &pgc_lut->num_r_stages, + sizeof(uint8_t)) || + copy_from_user(&num_g_stages, + &pgc_lut->num_g_stages, + sizeof(uint8_t)) || + copy_from_user(&num_b_stages, + &pgc_lut->num_b_stages, + sizeof(uint8_t))) + return -EFAULT; + + r_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->r_data); + r_data_temp = pgc_lut->r_data; + for (i = 0; i < num_r_stages; i++) { + if (__to_user_ar_gc_lut_data( + &r_data_temp32[i], + &r_data_temp[i])) + return -EFAULT; + } + + g_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->g_data); + g_data_temp = pgc_lut->g_data; + for (i = 0; i < num_g_stages; i++) { + if (__to_user_ar_gc_lut_data( + &g_data_temp32[i], + &g_data_temp[i])) + return -EFAULT; + } + + b_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->b_data); + b_data_temp = pgc_lut->b_data; + for (i = 0; i < num_b_stages; i++) { + if (__to_user_ar_gc_lut_data( + &b_data_temp32[i], + &b_data_temp[i])) + return -EFAULT; + } + + return 0; +} + +static int __from_user_hist_lut_data_v1_7( + struct mdp_hist_lut_data32 __user *hist_lut32, + struct mdp_hist_lut_data __user *hist_lut) +{ + struct mdp_hist_lut_data_v1_7_32 hist_lut_cfg_payload32; + struct mdp_hist_lut_data_v1_7 hist_lut_cfg_payload; + + if (copy_from_user(&hist_lut_cfg_payload32, + compat_ptr(hist_lut32->cfg_payload), + sizeof(hist_lut_cfg_payload32))) { + pr_err("failed to copy the Hist Lut payload from userspace\n"); + return -EFAULT; + } + + memset(&hist_lut_cfg_payload, 0, sizeof(hist_lut_cfg_payload)); + hist_lut_cfg_payload.len = hist_lut_cfg_payload32.len; + hist_lut_cfg_payload.data = compat_ptr(hist_lut_cfg_payload32.data); + + if (copy_to_user(hist_lut->cfg_payload, + &hist_lut_cfg_payload, + sizeof(hist_lut_cfg_payload))) { + pr_err("Failed to copy to user hist lut cfg payload\n"); + return -EFAULT; + } + + return 0; +} + +static int __from_user_hist_lut_data( + struct mdp_hist_lut_data32 __user *hist_lut32, + struct mdp_hist_lut_data __user *hist_lut) +{ + uint32_t version = 0; + uint32_t data; + + if (copy_in_user(&hist_lut->block, + &hist_lut32->block, + sizeof(uint32_t)) || + copy_in_user(&hist_lut->version, + &hist_lut32->version, + sizeof(uint32_t)) || + copy_in_user(&hist_lut->hist_lut_first, + &hist_lut32->hist_lut_first, + sizeof(uint32_t)) || + copy_in_user(&hist_lut->ops, + &hist_lut32->ops, + sizeof(uint32_t)) || + copy_in_user(&hist_lut->len, + &hist_lut32->len, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_from_user(&version, + &hist_lut32->version, + sizeof(uint32_t))) { + pr_err("failed to copy the version info\n"); + return -EFAULT; + } + + switch (version) { + case mdp_hist_lut_v1_7: + if (__from_user_hist_lut_data_v1_7(hist_lut32, hist_lut)) { + pr_err("failed to get hist lut data for version %d\n", + version); + return -EFAULT; + } + break; + default: + pr_debug("version invalid, fallback to legacy\n"); + if (get_user(data, &hist_lut32->data) || + put_user(compat_ptr(data), &hist_lut->data)) + return -EFAULT; + break; + } + + return 0; +} + +static int __to_user_hist_lut_data( + struct mdp_hist_lut_data32 __user *hist_lut32, + struct mdp_hist_lut_data __user *hist_lut) +{ + unsigned long data; + + if (copy_in_user(&hist_lut32->block, + &hist_lut->block, + sizeof(uint32_t)) || + copy_in_user(&hist_lut32->ops, + &hist_lut->ops, + sizeof(uint32_t)) || + copy_in_user(&hist_lut32->len, + &hist_lut->len, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, (unsigned long *) &hist_lut->data) || + put_user((compat_caddr_t) data, &hist_lut32->data)) + return -EFAULT; + + return 0; +} + +static int __from_user_rgb_lut_data( + struct mdp_rgb_lut_data32 __user *rgb_lut32, + struct mdp_rgb_lut_data __user *rgb_lut) +{ + if (copy_in_user(&rgb_lut->flags, &rgb_lut32->flags, + sizeof(uint32_t)) || + copy_in_user(&rgb_lut->lut_type, &rgb_lut32->lut_type, + sizeof(uint32_t))) + return -EFAULT; + + return __from_user_fb_cmap(&rgb_lut->cmap, &rgb_lut32->cmap); +} + +static int __to_user_rgb_lut_data( + struct mdp_rgb_lut_data32 __user *rgb_lut32, + struct mdp_rgb_lut_data __user *rgb_lut) +{ + if (copy_in_user(&rgb_lut32->flags, &rgb_lut->flags, + sizeof(uint32_t)) || + copy_in_user(&rgb_lut32->lut_type, &rgb_lut->lut_type, + sizeof(uint32_t))) + return -EFAULT; + + return __to_user_fb_cmap(&rgb_lut->cmap, &rgb_lut32->cmap); +} + +static int __from_user_lut_cfg_data( + struct mdp_lut_cfg_data32 __user *lut_cfg32, + struct mdp_lut_cfg_data __user *lut_cfg) +{ + uint32_t lut_type; + int ret = 0; + + if (copy_from_user(&lut_type, &lut_cfg32->lut_type, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_in_user(&lut_cfg->lut_type, + &lut_cfg32->lut_type, + sizeof(uint32_t))) + return -EFAULT; + + switch (lut_type) { + case mdp_lut_igc: + ret = __from_user_igc_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.igc_lut_data), + &lut_cfg->data.igc_lut_data); + break; + case mdp_lut_pgc: + ret = __from_user_pgc_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.pgc_lut_data), + &lut_cfg->data.pgc_lut_data); + break; + case mdp_lut_hist: + ret = __from_user_hist_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.hist_lut_data), + &lut_cfg->data.hist_lut_data); + break; + case mdp_lut_rgb: + ret = __from_user_rgb_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.rgb_lut_data), + &lut_cfg->data.rgb_lut_data); + break; + default: + break; + } + + return ret; +} + +static int __to_user_lut_cfg_data( + struct mdp_lut_cfg_data32 __user *lut_cfg32, + struct mdp_lut_cfg_data __user *lut_cfg) +{ + uint32_t lut_type; + int ret = 0; + + if (copy_from_user(&lut_type, &lut_cfg->lut_type, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_in_user(&lut_cfg32->lut_type, + &lut_cfg->lut_type, + sizeof(uint32_t))) + return -EFAULT; + + switch (lut_type) { + case mdp_lut_igc: + ret = __to_user_igc_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.igc_lut_data), + &lut_cfg->data.igc_lut_data); + break; + case mdp_lut_pgc: + ret = __to_user_pgc_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.pgc_lut_data), + &lut_cfg->data.pgc_lut_data); + break; + case mdp_lut_hist: + ret = __to_user_hist_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.hist_lut_data), + &lut_cfg->data.hist_lut_data); + break; + case mdp_lut_rgb: + ret = __to_user_rgb_lut_data( + compat_ptr((uintptr_t)&lut_cfg32->data.rgb_lut_data), + &lut_cfg->data.rgb_lut_data); + break; + default: + break; + } + + return ret; +} + +static int __from_user_qseed_cfg( + struct mdp_qseed_cfg32 __user *qseed_data32, + struct mdp_qseed_cfg __user *qseed_data) +{ + uint32_t data; + + if (copy_in_user(&qseed_data->table_num, + &qseed_data32->table_num, + sizeof(uint32_t)) || + copy_in_user(&qseed_data->ops, + &qseed_data32->ops, + sizeof(uint32_t)) || + copy_in_user(&qseed_data->len, + &qseed_data32->len, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, &qseed_data32->data) || + put_user(compat_ptr(data), &qseed_data->data)) + return -EFAULT; + + return 0; +} + +static int __to_user_qseed_cfg( + struct mdp_qseed_cfg32 __user *qseed_data32, + struct mdp_qseed_cfg __user *qseed_data) +{ + unsigned long data; + + if (copy_in_user(&qseed_data32->table_num, + &qseed_data->table_num, + sizeof(uint32_t)) || + copy_in_user(&qseed_data32->ops, + &qseed_data->ops, + sizeof(uint32_t)) || + copy_in_user(&qseed_data32->len, + &qseed_data->len, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, (unsigned long *) &qseed_data->data) || + put_user((compat_caddr_t) data, &qseed_data32->data)) + return -EFAULT; + + return 0; +} + +static int __from_user_qseed_cfg_data( + struct mdp_qseed_cfg_data32 __user *qseed_cfg32, + struct mdp_qseed_cfg_data __user *qseed_cfg) +{ + if (copy_in_user(&qseed_cfg->block, + &qseed_cfg32->block, + sizeof(uint32_t))) + return -EFAULT; + + if (__from_user_qseed_cfg( + compat_ptr((uintptr_t)&qseed_cfg32->qseed_data), + &qseed_cfg->qseed_data)) + return -EFAULT; + + return 0; +} + +static int __to_user_qseed_cfg_data( + struct mdp_qseed_cfg_data32 __user *qseed_cfg32, + struct mdp_qseed_cfg_data __user *qseed_cfg) +{ + if (copy_in_user(&qseed_cfg32->block, + &qseed_cfg->block, + sizeof(uint32_t))) + return -EFAULT; + + if (__to_user_qseed_cfg( + compat_ptr((uintptr_t)&qseed_cfg32->qseed_data), + &qseed_cfg->qseed_data)) + return -EFAULT; + + return 0; +} + +static int __from_user_bl_scale_data( + struct mdp_bl_scale_data32 __user *bl_scale32, + struct mdp_bl_scale_data __user *bl_scale) +{ + if (copy_in_user(&bl_scale->min_lvl, + &bl_scale32->min_lvl, + sizeof(uint32_t)) || + copy_in_user(&bl_scale->scale, + &bl_scale32->scale, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_pa_cfg( + struct mdp_pa_cfg32 __user *pa_data32, + struct mdp_pa_cfg __user *pa_data) +{ + if (copy_in_user(&pa_data->flags, + &pa_data32->flags, + sizeof(uint32_t)) || + copy_in_user(&pa_data->hue_adj, + &pa_data32->hue_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_data->sat_adj, + &pa_data32->sat_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_data->val_adj, + &pa_data32->val_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_data->cont_adj, + &pa_data32->cont_adj, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_pa_cfg( + struct mdp_pa_cfg32 __user *pa_data32, + struct mdp_pa_cfg __user *pa_data) +{ + if (copy_in_user(&pa_data32->flags, + &pa_data->flags, + sizeof(uint32_t)) || + copy_in_user(&pa_data32->hue_adj, + &pa_data->hue_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_data32->sat_adj, + &pa_data->sat_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_data32->val_adj, + &pa_data->val_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_data32->cont_adj, + &pa_data->cont_adj, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_pa_cfg_data( + struct mdp_pa_cfg_data32 __user *pa_cfg32, + struct mdp_pa_cfg_data __user *pa_cfg) +{ + if (copy_in_user(&pa_cfg->block, + &pa_cfg32->block, + sizeof(uint32_t))) + return -EFAULT; + if (__from_user_pa_cfg( + compat_ptr((uintptr_t)&pa_cfg32->pa_data), + &pa_cfg->pa_data)) + return -EFAULT; + + return 0; +} + +static int __to_user_pa_cfg_data( + struct mdp_pa_cfg_data32 __user *pa_cfg32, + struct mdp_pa_cfg_data __user *pa_cfg) +{ + if (copy_in_user(&pa_cfg32->block, + &pa_cfg->block, + sizeof(uint32_t))) + return -EFAULT; + if (__to_user_pa_cfg( + compat_ptr((uintptr_t)&pa_cfg32->pa_data), + &pa_cfg->pa_data)) + return -EFAULT; + + return 0; +} + +static int __from_user_mem_col_cfg( + struct mdp_pa_mem_col_cfg32 __user *mem_col_cfg32, + struct mdp_pa_mem_col_cfg __user *mem_col_cfg) +{ + if (copy_in_user(&mem_col_cfg->color_adjust_p0, + &mem_col_cfg32->color_adjust_p0, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg->color_adjust_p1, + &mem_col_cfg32->color_adjust_p1, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg->hue_region, + &mem_col_cfg32->hue_region, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg->sat_region, + &mem_col_cfg32->sat_region, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg->val_region, + &mem_col_cfg32->val_region, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_mem_col_cfg( + struct mdp_pa_mem_col_cfg32 __user *mem_col_cfg32, + struct mdp_pa_mem_col_cfg __user *mem_col_cfg) +{ + if (copy_in_user(&mem_col_cfg32->color_adjust_p0, + &mem_col_cfg->color_adjust_p0, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg32->color_adjust_p1, + &mem_col_cfg->color_adjust_p1, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg32->hue_region, + &mem_col_cfg->hue_region, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg32->sat_region, + &mem_col_cfg->sat_region, + sizeof(uint32_t)) || + copy_in_user(&mem_col_cfg32->val_region, + &mem_col_cfg->val_region, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_pa_v2_data( + struct mdp_pa_v2_data32 __user *pa_v2_data32, + struct mdp_pa_v2_data __user *pa_v2_data) +{ + uint32_t data; + + if (copy_in_user(&pa_v2_data->flags, + &pa_v2_data32->flags, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data->global_hue_adj, + &pa_v2_data32->global_hue_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data->global_sat_adj, + &pa_v2_data32->global_sat_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data->global_val_adj, + &pa_v2_data32->global_val_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data->global_cont_adj, + &pa_v2_data32->global_cont_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data->six_zone_thresh, + &pa_v2_data32->six_zone_thresh, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data->six_zone_len, + &pa_v2_data32->six_zone_len, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, &pa_v2_data32->six_zone_curve_p0) || + put_user(compat_ptr(data), &pa_v2_data->six_zone_curve_p0) || + get_user(data, &pa_v2_data32->six_zone_curve_p1) || + put_user(compat_ptr(data), &pa_v2_data->six_zone_curve_p1)) + return -EFAULT; + + if (__from_user_mem_col_cfg( + compat_ptr((uintptr_t)&pa_v2_data32->skin_cfg), + &pa_v2_data->skin_cfg) || + __from_user_mem_col_cfg( + compat_ptr((uintptr_t)&pa_v2_data32->sky_cfg), + &pa_v2_data->sky_cfg) || + __from_user_mem_col_cfg( + compat_ptr((uintptr_t)&pa_v2_data32->fol_cfg), + &pa_v2_data->fol_cfg)) + return -EFAULT; + + return 0; +} + +static int __to_user_pa_v2_data( + struct mdp_pa_v2_data32 __user *pa_v2_data32, + struct mdp_pa_v2_data __user *pa_v2_data) +{ + unsigned long data; + + if (copy_in_user(&pa_v2_data32->flags, + &pa_v2_data->flags, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data32->global_hue_adj, + &pa_v2_data->global_hue_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data32->global_sat_adj, + &pa_v2_data->global_sat_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data32->global_val_adj, + &pa_v2_data->global_val_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data32->global_cont_adj, + &pa_v2_data->global_cont_adj, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data32->six_zone_thresh, + &pa_v2_data->six_zone_thresh, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_data32->six_zone_len, + &pa_v2_data->six_zone_len, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, (unsigned long *) &pa_v2_data->six_zone_curve_p0) || + put_user((compat_caddr_t) data, &pa_v2_data32->six_zone_curve_p0) || + get_user(data, (unsigned long *) &pa_v2_data->six_zone_curve_p1) || + put_user((compat_caddr_t) data, &pa_v2_data32->six_zone_curve_p1)) + return -EFAULT; + + if (__to_user_mem_col_cfg( + compat_ptr((uintptr_t)&pa_v2_data32->skin_cfg), + &pa_v2_data->skin_cfg) || + __to_user_mem_col_cfg( + compat_ptr((uintptr_t)&pa_v2_data32->sky_cfg), + &pa_v2_data->sky_cfg) || + __to_user_mem_col_cfg( + compat_ptr((uintptr_t)&pa_v2_data32->fol_cfg), + &pa_v2_data->fol_cfg)) + return -EFAULT; + + return 0; +} + +static inline void __from_user_pa_mem_col_data_v1_7( + struct mdp_pa_mem_col_data_v1_7_32 *mem_col_data32, + struct mdp_pa_mem_col_data_v1_7 *mem_col_data) +{ + mem_col_data->color_adjust_p0 = mem_col_data32->color_adjust_p0; + mem_col_data->color_adjust_p1 = mem_col_data32->color_adjust_p1; + mem_col_data->color_adjust_p2 = mem_col_data32->color_adjust_p2; + mem_col_data->blend_gain = mem_col_data32->blend_gain; + mem_col_data->sat_hold = mem_col_data32->sat_hold; + mem_col_data->val_hold = mem_col_data32->val_hold; + mem_col_data->hue_region = mem_col_data32->hue_region; + mem_col_data->sat_region = mem_col_data32->sat_region; + mem_col_data->val_region = mem_col_data32->val_region; +} + + +static int __from_user_pa_data_v1_7( + struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32, + struct mdp_pa_v2_cfg_data __user *pa_v2_cfg) +{ + struct mdp_pa_data_v1_7_32 pa_cfg_payload32; + struct mdp_pa_data_v1_7 pa_cfg_payload; + + if (copy_from_user(&pa_cfg_payload32, + compat_ptr(pa_v2_cfg32->cfg_payload), + sizeof(pa_cfg_payload32))) { + pr_err("failed to copy the PA payload from userspace\n"); + return -EFAULT; + } + + memset(&pa_cfg_payload, 0, sizeof(pa_cfg_payload)); + pa_cfg_payload.mode = pa_cfg_payload32.mode; + pa_cfg_payload.global_hue_adj = pa_cfg_payload32.global_hue_adj; + pa_cfg_payload.global_sat_adj = pa_cfg_payload32.global_sat_adj; + pa_cfg_payload.global_val_adj = pa_cfg_payload32.global_val_adj; + pa_cfg_payload.global_cont_adj = pa_cfg_payload32.global_cont_adj; + + __from_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.skin_cfg, + &pa_cfg_payload.skin_cfg); + __from_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.sky_cfg, + &pa_cfg_payload.sky_cfg); + __from_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.fol_cfg, + &pa_cfg_payload.fol_cfg); + + pa_cfg_payload.six_zone_thresh = pa_cfg_payload32.six_zone_thresh; + pa_cfg_payload.six_zone_adj_p0 = pa_cfg_payload32.six_zone_adj_p0; + pa_cfg_payload.six_zone_adj_p1 = pa_cfg_payload32.six_zone_adj_p1; + pa_cfg_payload.six_zone_sat_hold = pa_cfg_payload32.six_zone_sat_hold; + pa_cfg_payload.six_zone_val_hold = pa_cfg_payload32.six_zone_val_hold; + pa_cfg_payload.six_zone_len = pa_cfg_payload32.six_zone_len; + + pa_cfg_payload.six_zone_curve_p0 = + compat_ptr(pa_cfg_payload32.six_zone_curve_p0); + pa_cfg_payload.six_zone_curve_p1 = + compat_ptr(pa_cfg_payload32.six_zone_curve_p1); + + if (copy_to_user(pa_v2_cfg->cfg_payload, &pa_cfg_payload, + sizeof(pa_cfg_payload))) { + pr_err("Failed to copy to user pa cfg payload\n"); + return -EFAULT; + } + + return 0; +} + +static int __from_user_pa_v2_cfg_data( + struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32, + struct mdp_pa_v2_cfg_data __user *pa_v2_cfg) +{ + uint32_t version; + + if (copy_in_user(&pa_v2_cfg->block, + &pa_v2_cfg32->block, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_cfg->version, + &pa_v2_cfg32->version, + sizeof(uint32_t)) || + copy_in_user(&pa_v2_cfg->flags, + &pa_v2_cfg32->flags, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_from_user(&version, + &pa_v2_cfg32->version, + sizeof(uint32_t))) { + pr_err("failed to copy the version info\n"); + return -EFAULT; + } + + switch (version) { + case mdp_pa_v1_7: + if (__from_user_pa_data_v1_7(pa_v2_cfg32, pa_v2_cfg)) { + pr_err("failed to get pa data for version %d\n", + version); + return -EFAULT; + } + break; + default: + pr_debug("version invalid, fallback to legacy\n"); + if (__from_user_pa_v2_data( + compat_ptr((uintptr_t)&pa_v2_cfg32->pa_v2_data), + &pa_v2_cfg->pa_v2_data)) + return -EFAULT; + break; + } + + return 0; +} + +static inline void __to_user_pa_mem_col_data_v1_7( + struct mdp_pa_mem_col_data_v1_7_32 *mem_col_data32, + struct mdp_pa_mem_col_data_v1_7 *mem_col_data) +{ + mem_col_data32->color_adjust_p0 = mem_col_data->color_adjust_p0; + mem_col_data32->color_adjust_p1 = mem_col_data->color_adjust_p1; + mem_col_data32->color_adjust_p2 = mem_col_data->color_adjust_p2; + mem_col_data32->blend_gain = mem_col_data->blend_gain; + mem_col_data32->sat_hold = mem_col_data->sat_hold; + mem_col_data32->val_hold = mem_col_data->val_hold; + mem_col_data32->hue_region = mem_col_data->hue_region; + mem_col_data32->sat_region = mem_col_data->sat_region; + mem_col_data32->val_region = mem_col_data->val_region; +} + +static int __to_user_pa_data_v1_7( + struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32, + struct mdp_pa_v2_cfg_data __user *pa_v2_cfg) +{ + struct mdp_pa_data_v1_7_32 pa_cfg_payload32; + struct mdp_pa_data_v1_7 pa_cfg_payload; + + memset(&pa_cfg_payload32, 0, sizeof(pa_cfg_payload32)); + if (copy_from_user(&pa_cfg_payload, + pa_v2_cfg->cfg_payload, + sizeof(pa_cfg_payload))) { + pr_err("failed to copy the PA payload from userspace\n"); + return -EFAULT; + } + + pa_cfg_payload32.mode = pa_cfg_payload.mode; + pa_cfg_payload32.global_hue_adj = pa_cfg_payload.global_hue_adj; + pa_cfg_payload32.global_sat_adj = pa_cfg_payload.global_sat_adj; + pa_cfg_payload32.global_val_adj = pa_cfg_payload.global_val_adj; + pa_cfg_payload32.global_cont_adj = pa_cfg_payload.global_cont_adj; + + __to_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.skin_cfg, + &pa_cfg_payload.skin_cfg); + __to_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.sky_cfg, + &pa_cfg_payload.sky_cfg); + __to_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.fol_cfg, + &pa_cfg_payload.fol_cfg); + + pa_cfg_payload32.six_zone_thresh = pa_cfg_payload.six_zone_thresh; + pa_cfg_payload32.six_zone_adj_p0 = pa_cfg_payload.six_zone_adj_p0; + pa_cfg_payload32.six_zone_adj_p1 = pa_cfg_payload.six_zone_adj_p1; + pa_cfg_payload32.six_zone_sat_hold = pa_cfg_payload.six_zone_sat_hold; + pa_cfg_payload32.six_zone_val_hold = pa_cfg_payload.six_zone_val_hold; + pa_cfg_payload32.six_zone_len = pa_cfg_payload.six_zone_len; + + if (copy_to_user(compat_ptr(pa_v2_cfg32->cfg_payload), + &pa_cfg_payload32, + sizeof(pa_cfg_payload32))) { + pr_err("Failed to copy to user pa cfg payload\n"); + return -EFAULT; + } + + return 0; +} + +static int __to_user_pa_v2_cfg_data( + struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32, + struct mdp_pa_v2_cfg_data __user *pa_v2_cfg) +{ + uint32_t version = 0; + uint32_t flags = 0; + + if (copy_from_user(&version, + &pa_v2_cfg32->version, + sizeof(uint32_t))) + return -EFAULT; + + switch (version) { + case mdp_pa_v1_7: + if (copy_from_user(&flags, + &pa_v2_cfg32->flags, + sizeof(uint32_t))) { + pr_err("failed to get PA v1_7 flags\n"); + return -EFAULT; + } + + if (!(flags & MDP_PP_OPS_READ)) { + pr_debug("Read op not set. Skipping compat copyback\n"); + return 0; + } + + if (__to_user_pa_data_v1_7(pa_v2_cfg32, pa_v2_cfg)) { + pr_err("failed to set pa data for version %d\n", + version); + return -EFAULT; + } + break; + default: + pr_debug("version invalid, fallback to legacy\n"); + + if (copy_from_user(&flags, + &pa_v2_cfg32->pa_v2_data.flags, + sizeof(uint32_t))) { + pr_err("failed to get PAv2 flags\n"); + return -EFAULT; + } + + if (!(flags & MDP_PP_OPS_READ)) { + pr_debug("Read op not set. Skipping compat copyback\n"); + return 0; + } + + if (__to_user_pa_v2_data( + compat_ptr((uintptr_t)&pa_v2_cfg32->pa_v2_data), + &pa_v2_cfg->pa_v2_data)) + return -EFAULT; + break; + } + + return 0; +} + +static int __from_user_dither_cfg_data( + struct mdp_dither_cfg_data32 __user *dither_cfg32, + struct mdp_dither_cfg_data __user *dither_cfg) +{ + if (copy_in_user(&dither_cfg->block, + &dither_cfg32->block, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg->flags, + &dither_cfg32->flags, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg->g_y_depth, + &dither_cfg32->g_y_depth, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg->r_cr_depth, + &dither_cfg32->r_cr_depth, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg->b_cb_depth, + &dither_cfg32->b_cb_depth, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_dither_cfg_data( + struct mdp_dither_cfg_data32 __user *dither_cfg32, + struct mdp_dither_cfg_data __user *dither_cfg) +{ + if (copy_in_user(&dither_cfg32->block, + &dither_cfg->block, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg32->flags, + &dither_cfg->flags, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg32->g_y_depth, + &dither_cfg->g_y_depth, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg32->r_cr_depth, + &dither_cfg->r_cr_depth, + sizeof(uint32_t)) || + copy_in_user(&dither_cfg32->b_cb_depth, + &dither_cfg->b_cb_depth, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_gamut_cfg_data_v17( + struct mdp_gamut_cfg_data32 __user *gamut_cfg32, + struct mdp_gamut_cfg_data __user *gamut_cfg) +{ + struct mdp_gamut_data_v1_7 gamut_cfg_payload; + struct mdp_gamut_data_v1_7_32 gamut_cfg_payload32; + u32 i = 0; + + if (copy_from_user(&gamut_cfg_payload32, + compat_ptr(gamut_cfg32->cfg_payload), + sizeof(gamut_cfg_payload32))) { + pr_err("failed to copy the gamut payload from userspace\n"); + return -EFAULT; + } + + memset(&gamut_cfg_payload, 0, sizeof(gamut_cfg_payload)); + gamut_cfg_payload.mode = gamut_cfg_payload32.mode; + for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) { + gamut_cfg_payload.tbl_size[i] = + gamut_cfg_payload32.tbl_size[i]; + gamut_cfg_payload.c0_data[i] = + compat_ptr(gamut_cfg_payload32.c0_data[i]); + gamut_cfg_payload.c1_c2_data[i] = + compat_ptr(gamut_cfg_payload32.c1_c2_data[i]); + } + for (i = 0; i < MDP_GAMUT_SCALE_OFF_TABLE_NUM; i++) { + gamut_cfg_payload.tbl_scale_off_sz[i] = + gamut_cfg_payload32.tbl_scale_off_sz[i]; + gamut_cfg_payload.scale_off_data[i] = + compat_ptr(gamut_cfg_payload32.scale_off_data[i]); + } + if (copy_to_user(gamut_cfg->cfg_payload, &gamut_cfg_payload, + sizeof(gamut_cfg_payload))) { + pr_err("failed to copy the gamut payload to userspace\n"); + return -EFAULT; + } + return 0; +} + +static int __from_user_gamut_cfg_data( + struct mdp_gamut_cfg_data32 __user *gamut_cfg32, + struct mdp_gamut_cfg_data __user *gamut_cfg) +{ + uint32_t data, version; + int i; + + if (copy_in_user(&gamut_cfg->block, + &gamut_cfg32->block, + sizeof(uint32_t)) || + copy_in_user(&gamut_cfg->flags, + &gamut_cfg32->flags, + sizeof(uint32_t)) || + copy_in_user(&gamut_cfg->gamut_first, + &gamut_cfg32->gamut_first, + sizeof(uint32_t)) || + copy_in_user(&gamut_cfg->tbl_size[0], + &gamut_cfg32->tbl_size[0], + MDP_GAMUT_TABLE_NUM * sizeof(uint32_t)) || + copy_in_user(&gamut_cfg->version, + &gamut_cfg32->version, + sizeof(uint32_t))) + return 0; + + if (copy_from_user(&version, &gamut_cfg32->version, sizeof(u32))) { + pr_err("failed to copy the version info\n"); + return -EFAULT; + } + + switch (version) { + case mdp_gamut_v1_7: + if (__from_user_gamut_cfg_data_v17(gamut_cfg32, gamut_cfg)) { + pr_err("failed to get the gamut data for version %d\n", + version); + return -EFAULT; + } + break; + default: + pr_debug("version invalid fallback to legacy\n"); + /* The Gamut LUT data contains 3 static arrays for R, G, and B + * gamut data. Each these arrays contains pointers dynamic arrays + * which hold the gamut LUTs for R, G, and B. Must copy the array of + * pointers from 32 bit to 64 bit addresses. + */ + for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) { + if (get_user(data, &gamut_cfg32->r_tbl[i]) || + put_user(compat_ptr(data), &gamut_cfg->r_tbl[i])) + return -EFAULT; + } + + for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) { + if (get_user(data, &gamut_cfg32->g_tbl[i]) || + put_user(compat_ptr(data), &gamut_cfg->g_tbl[i])) + return -EFAULT; + } + + for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) { + if (get_user(data, &gamut_cfg32->b_tbl[i]) || + put_user(compat_ptr(data), &gamut_cfg->b_tbl[i])) + return -EFAULT; + } + break; + } + return 0; +} + +static int __to_user_gamut_cfg_data( + struct mdp_gamut_cfg_data32 __user *gamut_cfg32, + struct mdp_gamut_cfg_data __user *gamut_cfg) +{ + unsigned long data; + int i; + + if (copy_in_user(&gamut_cfg32->block, + &gamut_cfg->block, + sizeof(uint32_t)) || + copy_in_user(&gamut_cfg32->flags, + &gamut_cfg->flags, + sizeof(uint32_t)) || + copy_in_user(&gamut_cfg32->gamut_first, + &gamut_cfg->gamut_first, + sizeof(uint32_t)) || + copy_in_user(&gamut_cfg32->tbl_size[0], + &gamut_cfg->tbl_size[0], + MDP_GAMUT_TABLE_NUM * sizeof(uint32_t))) + return 0; + + for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) { + if (get_user(data, (unsigned long *) &gamut_cfg->r_tbl[i]) || + put_user((compat_caddr_t)data, &gamut_cfg32->r_tbl[i])) + return -EFAULT; + } + + for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) { + if (get_user(data, (unsigned long *) &gamut_cfg->g_tbl[i]) || + put_user((compat_caddr_t)data, &gamut_cfg32->g_tbl[i])) + return -EFAULT; + } + + for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) { + if (get_user(data, (unsigned long *) &gamut_cfg->b_tbl[i]) || + put_user((compat_caddr_t)data, &gamut_cfg32->g_tbl[i])) + return -EFAULT; + } + + return 0; +} + +static int __from_user_calib_config_data( + struct mdp_calib_config_data32 __user *calib_cfg32, + struct mdp_calib_config_data __user *calib_cfg) +{ + if (copy_in_user(&calib_cfg->ops, + &calib_cfg32->ops, + sizeof(uint32_t)) || + copy_in_user(&calib_cfg->addr, + &calib_cfg32->addr, + sizeof(uint32_t)) || + copy_in_user(&calib_cfg->data, + &calib_cfg32->data, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __to_user_calib_config_data( + struct mdp_calib_config_data32 __user *calib_cfg32, + struct mdp_calib_config_data __user *calib_cfg) +{ + if (copy_in_user(&calib_cfg32->ops, + &calib_cfg->ops, + sizeof(uint32_t)) || + copy_in_user(&calib_cfg32->addr, + &calib_cfg->addr, + sizeof(uint32_t)) || + copy_in_user(&calib_cfg32->data, + &calib_cfg->data, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_ad_init( + struct mdss_ad_init32 __user *ad_init32, + struct mdss_ad_init __user *ad_init) +{ + uint32_t data; + + if (copy_in_user(&ad_init->asym_lut[0], + &ad_init32->asym_lut[0], + 33 * sizeof(uint32_t)) || + copy_in_user(&ad_init->color_corr_lut[0], + &ad_init32->color_corr_lut[0], + 33 * sizeof(uint32_t)) || + copy_in_user(&ad_init->i_control[0], + &ad_init32->i_control[0], + 2 * sizeof(uint8_t)) || + copy_in_user(&ad_init->black_lvl, + &ad_init32->black_lvl, + sizeof(uint16_t)) || + copy_in_user(&ad_init->white_lvl, + &ad_init32->white_lvl, + sizeof(uint16_t)) || + copy_in_user(&ad_init->var, + &ad_init32->var, + sizeof(uint8_t)) || + copy_in_user(&ad_init->limit_ampl, + &ad_init32->limit_ampl, + sizeof(uint8_t)) || + copy_in_user(&ad_init->i_dither, + &ad_init32->i_dither, + sizeof(uint8_t)) || + copy_in_user(&ad_init->slope_max, + &ad_init32->slope_max, + sizeof(uint8_t)) || + copy_in_user(&ad_init->slope_min, + &ad_init32->slope_min, + sizeof(uint8_t)) || + copy_in_user(&ad_init->dither_ctl, + &ad_init32->dither_ctl, + sizeof(uint8_t)) || + copy_in_user(&ad_init->format, + &ad_init32->format, + sizeof(uint8_t)) || + copy_in_user(&ad_init->auto_size, + &ad_init32->auto_size, + sizeof(uint8_t)) || + copy_in_user(&ad_init->frame_w, + &ad_init32->frame_w, + sizeof(uint16_t)) || + copy_in_user(&ad_init->frame_h, + &ad_init32->frame_h, + sizeof(uint16_t)) || + copy_in_user(&ad_init->logo_v, + &ad_init32->logo_v, + sizeof(uint8_t)) || + copy_in_user(&ad_init->logo_h, + &ad_init32->logo_h, + sizeof(uint8_t)) || + copy_in_user(&ad_init->alpha, + &ad_init32->alpha, + sizeof(uint32_t)) || + copy_in_user(&ad_init->alpha_base, + &ad_init32->alpha_base, + sizeof(uint32_t)) || + copy_in_user(&ad_init->bl_lin_len, + &ad_init32->bl_lin_len, + sizeof(uint32_t)) || + copy_in_user(&ad_init->bl_att_len, + &ad_init32->bl_att_len, + sizeof(uint32_t))) + return -EFAULT; + + + if (get_user(data, &ad_init32->bl_lin) || + put_user(compat_ptr(data), &ad_init->bl_lin) || + get_user(data, &ad_init32->bl_lin_inv) || + put_user(compat_ptr(data), &ad_init->bl_lin_inv) || + get_user(data, &ad_init32->bl_att_lut) || + put_user(compat_ptr(data), &ad_init->bl_att_lut)) + return -EFAULT; + + return 0; +} + +static int __from_user_ad_cfg( + struct mdss_ad_cfg32 __user *ad_cfg32, + struct mdss_ad_cfg __user *ad_cfg) +{ + if (copy_in_user(&ad_cfg->mode, + &ad_cfg32->mode, + sizeof(uint32_t)) || + copy_in_user(&ad_cfg->al_calib_lut[0], + &ad_cfg32->al_calib_lut[0], + 33 * sizeof(uint32_t)) || + copy_in_user(&ad_cfg->backlight_min, + &ad_cfg32->backlight_min, + sizeof(uint16_t)) || + copy_in_user(&ad_cfg->backlight_max, + &ad_cfg32->backlight_max, + sizeof(uint16_t)) || + copy_in_user(&ad_cfg->backlight_scale, + &ad_cfg32->backlight_scale, + sizeof(uint16_t)) || + copy_in_user(&ad_cfg->amb_light_min, + &ad_cfg32->amb_light_min, + sizeof(uint16_t)) || + copy_in_user(&ad_cfg->filter[0], + &ad_cfg32->filter[0], + 2 * sizeof(uint16_t)) || + copy_in_user(&ad_cfg->calib[0], + &ad_cfg32->calib[0], + 4 * sizeof(uint16_t)) || + copy_in_user(&ad_cfg->strength_limit, + &ad_cfg32->strength_limit, + sizeof(uint8_t)) || + copy_in_user(&ad_cfg->t_filter_recursion, + &ad_cfg32->t_filter_recursion, + sizeof(uint8_t)) || + copy_in_user(&ad_cfg->stab_itr, + &ad_cfg32->stab_itr, + sizeof(uint16_t)) || + copy_in_user(&ad_cfg->bl_ctrl_mode, + &ad_cfg32->bl_ctrl_mode, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_ad_init_cfg( + struct mdss_ad_init_cfg32 __user *ad_info32, + struct mdss_ad_init_cfg __user *ad_info) +{ + uint32_t op; + + if (copy_from_user(&op, &ad_info32->ops, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_in_user(&ad_info->ops, + &ad_info32->ops, + sizeof(uint32_t))) + return -EFAULT; + + if (op & MDP_PP_AD_INIT) { + if (__from_user_ad_init( + compat_ptr((uintptr_t)&ad_info32->params.init), + &ad_info->params.init)) + return -EFAULT; + } else if (op & MDP_PP_AD_CFG) { + if (__from_user_ad_cfg( + compat_ptr((uintptr_t)&ad_info32->params.cfg), + &ad_info->params.cfg)) + return -EFAULT; + } else { + pr_err("Invalid AD init/config operation\n"); + return -EINVAL; + } + + return 0; +} + +static int __from_user_ad_input( + struct mdss_ad_input32 __user *ad_input32, + struct mdss_ad_input __user *ad_input) +{ + int mode; + + if (copy_from_user(&mode, + &ad_input32->mode, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_in_user(&ad_input->mode, + &ad_input32->mode, + sizeof(uint32_t)) || + copy_in_user(&ad_input->output, + &ad_input32->output, + sizeof(uint32_t))) + return -EFAULT; + + switch (mode) { + case MDSS_AD_MODE_AUTO_BL: + case MDSS_AD_MODE_AUTO_STR: + if (copy_in_user(&ad_input->in.amb_light, + &ad_input32->in.amb_light, + sizeof(uint32_t))) + return -EFAULT; + break; + case MDSS_AD_MODE_TARG_STR: + case MDSS_AD_MODE_MAN_STR: + if (copy_in_user(&ad_input->in.strength, + &ad_input32->in.strength, + sizeof(uint32_t))) + return -EFAULT; + break; + case MDSS_AD_MODE_CALIB: + if (copy_in_user(&ad_input->in.calib_bl, + &ad_input32->in.calib_bl, + sizeof(uint32_t))) + return -EFAULT; + break; + } + + return 0; +} + +static int __to_user_ad_input( + struct mdss_ad_input32 __user *ad_input32, + struct mdss_ad_input __user *ad_input) +{ + int mode; + + if (copy_from_user(&mode, + &ad_input->mode, + sizeof(uint32_t))) + return -EFAULT; + + if (copy_in_user(&ad_input32->mode, + &ad_input->mode, + sizeof(uint32_t)) || + copy_in_user(&ad_input32->output, + &ad_input->output, + sizeof(uint32_t))) + return -EFAULT; + + switch (mode) { + case MDSS_AD_MODE_AUTO_BL: + case MDSS_AD_MODE_AUTO_STR: + if (copy_in_user(&ad_input32->in.amb_light, + &ad_input->in.amb_light, + sizeof(uint32_t))) + return -EFAULT; + break; + case MDSS_AD_MODE_TARG_STR: + case MDSS_AD_MODE_MAN_STR: + if (copy_in_user(&ad_input32->in.strength, + &ad_input->in.strength, + sizeof(uint32_t))) + return -EFAULT; + break; + case MDSS_AD_MODE_CALIB: + if (copy_in_user(&ad_input32->in.calib_bl, + &ad_input->in.calib_bl, + sizeof(uint32_t))) + return -EFAULT; + break; + } + + return 0; +} + +static int __from_user_calib_cfg( + struct mdss_calib_cfg32 __user *calib_cfg32, + struct mdss_calib_cfg __user *calib_cfg) +{ + if (copy_in_user(&calib_cfg->ops, + &calib_cfg32->ops, + sizeof(uint32_t)) || + copy_in_user(&calib_cfg->calib_mask, + &calib_cfg32->calib_mask, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_calib_config_buffer( + struct mdp_calib_config_buffer32 __user *calib_buffer32, + struct mdp_calib_config_buffer __user *calib_buffer) +{ + uint32_t data; + + if (copy_in_user(&calib_buffer->ops, + &calib_buffer32->ops, + sizeof(uint32_t)) || + copy_in_user(&calib_buffer->size, + &calib_buffer32->size, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, &calib_buffer32->buffer) || + put_user(compat_ptr(data), &calib_buffer->buffer)) + return -EFAULT; + + return 0; +} + +static int __to_user_calib_config_buffer( + struct mdp_calib_config_buffer32 __user *calib_buffer32, + struct mdp_calib_config_buffer __user *calib_buffer) +{ + unsigned long data; + + if (copy_in_user(&calib_buffer32->ops, + &calib_buffer->ops, + sizeof(uint32_t)) || + copy_in_user(&calib_buffer32->size, + &calib_buffer->size, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, (unsigned long *) &calib_buffer->buffer) || + put_user((compat_caddr_t) data, &calib_buffer32->buffer)) + return -EFAULT; + + return 0; +} + +static int __from_user_calib_dcm_state( + struct mdp_calib_dcm_state32 __user *calib_dcm32, + struct mdp_calib_dcm_state __user *calib_dcm) +{ + if (copy_in_user(&calib_dcm->ops, + &calib_dcm32->ops, + sizeof(uint32_t)) || + copy_in_user(&calib_dcm->dcm_state, + &calib_dcm32->dcm_state, + sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static u32 __pp_compat_size_igc(void) +{ + u32 alloc_size = 0; + /* When we have multiple versions pick largest struct size */ + alloc_size = sizeof(struct mdp_igc_lut_data_v1_7); + return alloc_size; +} + +static u32 __pp_compat_size_hist_lut(void) +{ + u32 alloc_size = 0; + /* When we have multiple versions pick largest struct size */ + alloc_size = sizeof(struct mdp_hist_lut_data_v1_7); + return alloc_size; +} + +static u32 __pp_compat_size_pgc(void) +{ + u32 tbl_sz_max = 0; + + tbl_sz_max = 3 * GC_LUT_SEGMENTS * sizeof(struct mdp_ar_gc_lut_data); + tbl_sz_max += sizeof(struct mdp_pgc_lut_data_v1_7); + return tbl_sz_max; +} + +static u32 __pp_compat_size_pcc(void) +{ + /* if new version of PCC is added return max struct size */ + return sizeof(struct mdp_pcc_data_v1_7); +} + +static u32 __pp_compat_size_pa(void) +{ + /* if new version of PA is added return max struct size */ + return sizeof(struct mdp_pa_data_v1_7); +} + +static u32 __pp_compat_size_gamut(void) +{ + return sizeof(struct mdp_gamut_data_v1_7); +} + +static int __pp_compat_alloc(struct msmfb_mdp_pp32 __user *pp32, + struct msmfb_mdp_pp __user **pp, + uint32_t op) +{ + uint32_t alloc_size = 0, lut_type, pgc_size = 0; + struct mdp_lut_cfg_data *lut_data; + + alloc_size = sizeof(struct msmfb_mdp_pp); + switch (op) { + case mdp_op_lut_cfg: + if (copy_from_user(&lut_type, + &pp32->data.lut_cfg_data.lut_type, + sizeof(uint32_t))) + return -EFAULT; + + switch (lut_type) { + case mdp_lut_pgc: + + pgc_size = GC_LUT_SEGMENTS * + sizeof(struct mdp_ar_gc_lut_data); + alloc_size += __pp_compat_size_pgc(); + + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) + return -ENOMEM; + if (clear_user(*pp, alloc_size)) + return -EFAULT; + lut_data = &(*pp)->data.lut_cfg_data; + if (put_user((struct mdp_ar_gc_lut_data *) + ((unsigned long) *pp + + sizeof(struct msmfb_mdp_pp)), + &(lut_data->data.pgc_lut_data.r_data)) || + put_user((struct mdp_ar_gc_lut_data *) + ((unsigned long) *pp + + sizeof(struct msmfb_mdp_pp) + + pgc_size), + &(lut_data->data.pgc_lut_data.g_data)) || + put_user((struct mdp_ar_gc_lut_data *) + ((unsigned long) *pp + + sizeof(struct msmfb_mdp_pp) + + (2 * pgc_size)), + &(lut_data->data.pgc_lut_data.b_data)) || + put_user((void *)((unsigned long) *pp + + sizeof(struct msmfb_mdp_pp) + + (3 * pgc_size)), + &(lut_data->data.pgc_lut_data.cfg_payload))) + return -EFAULT; + break; + case mdp_lut_igc: + alloc_size += __pp_compat_size_igc(); + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) { + pr_err("failed to alloc from user size %d for igc\n", + alloc_size); + return -ENOMEM; + } + if (clear_user(*pp, alloc_size)) + return -EFAULT; + lut_data = &(*pp)->data.lut_cfg_data; + if (put_user((void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)), + &(lut_data->data.igc_lut_data.cfg_payload))) + return -EFAULT; + break; + case mdp_lut_hist: + alloc_size += __pp_compat_size_hist_lut(); + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) { + pr_err("failed to alloc from user size %d for hist lut\n", + alloc_size); + return -ENOMEM; + } + if (clear_user(*pp, alloc_size)) + return -EFAULT; + lut_data = &(*pp)->data.lut_cfg_data; + if (put_user((void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)), + &(lut_data->data.hist_lut_data.cfg_payload))) + return -EFAULT; + break; + default: + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) { + pr_err("failed to alloc from user size %d for lut_type %d\n", + alloc_size, lut_type); + return -ENOMEM; + } + if (clear_user(*pp, alloc_size)) + return -EFAULT; + break; + } + break; + case mdp_op_pcc_cfg: + alloc_size += __pp_compat_size_pcc(); + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) { + pr_err("alloc from user size %d for pcc fail\n", + alloc_size); + return -ENOMEM; + } + if (clear_user(*pp, alloc_size)) + return -EFAULT; + if (put_user((void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)), + &(*pp)->data.pcc_cfg_data.cfg_payload)) + return -EFAULT; + break; + case mdp_op_gamut_cfg: + alloc_size += __pp_compat_size_gamut(); + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) { + pr_err("alloc from user size %d for pcc fail\n", + alloc_size); + return -ENOMEM; + } + if (clear_user(*pp, alloc_size)) + return -EFAULT; + if (put_user((void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)), + &(*pp)->data.gamut_cfg_data.cfg_payload)) + return -EFAULT; + break; + case mdp_op_pa_v2_cfg: + alloc_size += __pp_compat_size_pa(); + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) { + pr_err("alloc from user size %d for pcc fail\n", + alloc_size); + return -ENOMEM; + } + if (clear_user(*pp, alloc_size)) + return -EFAULT; + if (put_user((void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)), + &(*pp)->data.pa_v2_cfg_data.cfg_payload)) + return -EFAULT; + break; + default: + *pp = compat_alloc_user_space(alloc_size); + if (*pp == NULL) + return -ENOMEM; + if (clear_user(*pp, alloc_size)) + return -EFAULT; + break; + } + return 0; +} + +static int mdss_compat_pp_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg, struct file *file) +{ + uint32_t op; + int ret = 0; + struct msmfb_mdp_pp32 __user *pp32; + struct msmfb_mdp_pp __user *pp; + + pp32 = compat_ptr(arg); + if (copy_from_user(&op, &pp32->op, sizeof(uint32_t))) + return -EFAULT; + + ret = __pp_compat_alloc(pp32, &pp, op); + if (ret) + return ret; + + if (copy_in_user(&pp->op, &pp32->op, sizeof(uint32_t))) + return -EFAULT; + + switch (op) { + case mdp_op_pcc_cfg: + ret = __from_user_pcc_cfg_data( + compat_ptr((uintptr_t)&pp32->data.pcc_cfg_data), + &pp->data.pcc_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_pcc_cfg_data( + compat_ptr((uintptr_t)&pp32->data.pcc_cfg_data), + &pp->data.pcc_cfg_data); + break; + case mdp_op_csc_cfg: + ret = __from_user_csc_cfg_data( + compat_ptr((uintptr_t)&pp32->data.csc_cfg_data), + &pp->data.csc_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_csc_cfg_data( + compat_ptr((uintptr_t)&pp32->data.csc_cfg_data), + &pp->data.csc_cfg_data); + break; + case mdp_op_lut_cfg: + ret = __from_user_lut_cfg_data( + compat_ptr((uintptr_t)&pp32->data.lut_cfg_data), + &pp->data.lut_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_lut_cfg_data( + compat_ptr((uintptr_t)&pp32->data.lut_cfg_data), + &pp->data.lut_cfg_data); + break; + case mdp_op_qseed_cfg: + ret = __from_user_qseed_cfg_data( + compat_ptr((uintptr_t)&pp32->data.qseed_cfg_data), + &pp->data.qseed_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_qseed_cfg_data( + compat_ptr((uintptr_t)&pp32->data.qseed_cfg_data), + &pp->data.qseed_cfg_data); + break; + case mdp_bl_scale_cfg: + ret = __from_user_bl_scale_data( + compat_ptr((uintptr_t)&pp32->data.bl_scale_data), + &pp->data.bl_scale_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + break; + case mdp_op_pa_cfg: + ret = __from_user_pa_cfg_data( + compat_ptr((uintptr_t)&pp32->data.pa_cfg_data), + &pp->data.pa_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_pa_cfg_data( + compat_ptr((uintptr_t)&pp32->data.pa_cfg_data), + &pp->data.pa_cfg_data); + break; + case mdp_op_pa_v2_cfg: + ret = __from_user_pa_v2_cfg_data( + compat_ptr((uintptr_t)&pp32->data.pa_v2_cfg_data), + &pp->data.pa_v2_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_pa_v2_cfg_data( + compat_ptr((uintptr_t)&pp32->data.pa_v2_cfg_data), + &pp->data.pa_v2_cfg_data); + break; + case mdp_op_dither_cfg: + ret = __from_user_dither_cfg_data( + compat_ptr((uintptr_t)&pp32->data.dither_cfg_data), + &pp->data.dither_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_dither_cfg_data( + compat_ptr((uintptr_t)&pp32->data.dither_cfg_data), + &pp->data.dither_cfg_data); + break; + case mdp_op_gamut_cfg: + ret = __from_user_gamut_cfg_data( + compat_ptr((uintptr_t)&pp32->data.gamut_cfg_data), + &pp->data.gamut_cfg_data); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_gamut_cfg_data( + compat_ptr((uintptr_t)&pp32->data.gamut_cfg_data), + &pp->data.gamut_cfg_data); + break; + case mdp_op_calib_cfg: + ret = __from_user_calib_config_data( + compat_ptr((uintptr_t)&pp32->data.calib_cfg), + &pp->data.calib_cfg); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_calib_config_data( + compat_ptr((uintptr_t)&pp32->data.calib_cfg), + &pp->data.calib_cfg); + break; + case mdp_op_ad_cfg: + ret = __from_user_ad_init_cfg( + compat_ptr((uintptr_t)&pp32->data.ad_init_cfg), + &pp->data.ad_init_cfg); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + break; + case mdp_op_ad_input: + ret = __from_user_ad_input( + compat_ptr((uintptr_t)&pp32->data.ad_input), + &pp->data.ad_input); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_ad_input( + compat_ptr((uintptr_t)&pp32->data.ad_input), + &pp->data.ad_input); + break; + case mdp_op_calib_mode: + ret = __from_user_calib_cfg( + compat_ptr((uintptr_t)&pp32->data.mdss_calib_cfg), + &pp->data.mdss_calib_cfg); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + break; + case mdp_op_calib_buffer: + ret = __from_user_calib_config_buffer( + compat_ptr((uintptr_t)&pp32->data.calib_buffer), + &pp->data.calib_buffer); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + if (ret) + goto pp_compat_exit; + ret = __to_user_calib_config_buffer( + compat_ptr((uintptr_t)&pp32->data.calib_buffer), + &pp->data.calib_buffer); + break; + case mdp_op_calib_dcm_state: + ret = __from_user_calib_dcm_state( + compat_ptr((uintptr_t)&pp32->data.calib_dcm), + &pp->data.calib_dcm); + if (ret) + goto pp_compat_exit; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file); + break; + default: + break; + } + +pp_compat_exit: + return ret; +} + +static int __from_user_pp_params(struct mdp_overlay_pp_params32 *ppp32, + struct mdp_overlay_pp_params *ppp) +{ + int ret = 0; + + if (copy_in_user(&ppp->config_ops, + &ppp32->config_ops, + sizeof(uint32_t))) + return -EFAULT; + + ret = __from_user_csc_cfg( + compat_ptr((uintptr_t)&ppp32->csc_cfg), + &ppp->csc_cfg); + if (ret) + return ret; + ret = __from_user_qseed_cfg( + compat_ptr((uintptr_t)&ppp32->qseed_cfg[0]), + &ppp->qseed_cfg[0]); + if (ret) + return ret; + ret = __from_user_qseed_cfg( + compat_ptr((uintptr_t)&ppp32->qseed_cfg[1]), + &ppp->qseed_cfg[1]); + if (ret) + return ret; + ret = __from_user_pa_cfg( + compat_ptr((uintptr_t)&ppp32->pa_cfg), + &ppp->pa_cfg); + if (ret) + return ret; + ret = __from_user_igc_lut_data( + compat_ptr((uintptr_t)&ppp32->igc_cfg), + &ppp->igc_cfg); + if (ret) + return ret; + ret = __from_user_sharp_cfg( + compat_ptr((uintptr_t)&ppp32->sharp_cfg), + &ppp->sharp_cfg); + if (ret) + return ret; + ret = __from_user_histogram_cfg( + compat_ptr((uintptr_t)&ppp32->hist_cfg), + &ppp->hist_cfg); + if (ret) + return ret; + ret = __from_user_hist_lut_data( + compat_ptr((uintptr_t)&ppp32->hist_lut_cfg), + &ppp->hist_lut_cfg); + if (ret) + return ret; + ret = __from_user_pa_v2_data( + compat_ptr((uintptr_t)&ppp32->pa_v2_cfg), + &ppp->pa_v2_cfg); + + return ret; +} + +static int __to_user_pp_params(struct mdp_overlay_pp_params *ppp, + struct mdp_overlay_pp_params32 *ppp32) +{ + int ret = 0; + + if (copy_in_user(&ppp32->config_ops, + &ppp->config_ops, + sizeof(uint32_t))) + return -EFAULT; + + ret = __to_user_csc_cfg( + compat_ptr((uintptr_t)&ppp32->csc_cfg), + &ppp->csc_cfg); + if (ret) + return ret; + ret = __to_user_qseed_cfg( + compat_ptr((uintptr_t)&ppp32->qseed_cfg[0]), + &ppp->qseed_cfg[0]); + if (ret) + return ret; + ret = __to_user_qseed_cfg( + compat_ptr((uintptr_t)&ppp32->qseed_cfg[1]), + &ppp->qseed_cfg[1]); + if (ret) + return ret; + ret = __to_user_pa_cfg( + compat_ptr((uintptr_t)&ppp32->pa_cfg), + &ppp->pa_cfg); + if (ret) + return ret; + ret = __to_user_igc_lut_data( + compat_ptr((uintptr_t)&ppp32->igc_cfg), + &ppp->igc_cfg); + if (ret) + return ret; + ret = __to_user_sharp_cfg( + compat_ptr((uintptr_t)&ppp32->sharp_cfg), + &ppp->sharp_cfg); + if (ret) + return ret; + ret = __to_user_histogram_cfg( + compat_ptr((uintptr_t)&ppp32->hist_cfg), + &ppp->hist_cfg); + if (ret) + return ret; + ret = __to_user_hist_lut_data( + compat_ptr((uintptr_t)&ppp32->hist_lut_cfg), + &ppp->hist_lut_cfg); + if (ret) + return ret; + ret = __to_user_pa_v2_data( + compat_ptr((uintptr_t)&ppp32->pa_v2_cfg), + &ppp->pa_v2_cfg); + + return ret; +} + +static int __from_user_hist_start_req( + struct mdp_histogram_start_req32 __user *hist_req32, + struct mdp_histogram_start_req __user *hist_req) +{ + if (copy_in_user(&hist_req->block, + &hist_req32->block, + sizeof(uint32_t)) || + copy_in_user(&hist_req->frame_cnt, + &hist_req32->frame_cnt, + sizeof(uint8_t)) || + copy_in_user(&hist_req->bit_mask, + &hist_req32->bit_mask, + sizeof(uint8_t)) || + copy_in_user(&hist_req->num_bins, + &hist_req32->num_bins, + sizeof(uint16_t))) + return -EFAULT; + + return 0; +} + +static int __from_user_hist_data( + struct mdp_histogram_data32 __user *hist_data32, + struct mdp_histogram_data __user *hist_data) +{ + uint32_t data; + + if (copy_in_user(&hist_data->block, + &hist_data32->block, + sizeof(uint32_t)) || + copy_in_user(&hist_data->bin_cnt, + &hist_data32->bin_cnt, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, &hist_data32->c0) || + put_user(compat_ptr(data), &hist_data->c0) || + get_user(data, &hist_data32->c1) || + put_user(compat_ptr(data), &hist_data->c1) || + get_user(data, &hist_data32->c2) || + put_user(compat_ptr(data), &hist_data->c2) || + get_user(data, &hist_data32->extra_info) || + put_user(compat_ptr(data), &hist_data->extra_info)) + return -EFAULT; + + return 0; +} + +static int __to_user_hist_data( + struct mdp_histogram_data32 __user *hist_data32, + struct mdp_histogram_data __user *hist_data) +{ + unsigned long data; + + if (copy_in_user(&hist_data32->block, + &hist_data->block, + sizeof(uint32_t)) || + copy_in_user(&hist_data32->bin_cnt, + &hist_data->bin_cnt, + sizeof(uint32_t))) + return -EFAULT; + + if (get_user(data, (unsigned long *) &hist_data->c0) || + put_user((compat_caddr_t) data, &hist_data32->c0) || + get_user(data, (unsigned long *) &hist_data->c1) || + put_user((compat_caddr_t) data, &hist_data32->c1) || + get_user(data, (unsigned long *) &hist_data->c2) || + put_user((compat_caddr_t) data, &hist_data32->c2) || + get_user(data, (unsigned long *) &hist_data->extra_info) || + put_user((compat_caddr_t) data, &hist_data32->extra_info)) + return -EFAULT; + + return 0; +} + +static int mdss_histo_compat_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg, struct file *file) +{ + struct mdp_histogram_data __user *hist; + struct mdp_histogram_data32 __user *hist32; + struct mdp_histogram_start_req __user *hist_req; + struct mdp_histogram_start_req32 __user *hist_req32; + int ret = 0; + + switch (cmd) { + case MSMFB_HISTOGRAM_START: + hist_req32 = compat_ptr(arg); + hist_req = compat_alloc_user_space( + sizeof(struct mdp_histogram_start_req)); + if (!hist_req) { + pr_err("%s:%u: compat alloc error [%zu] bytes\n", + __func__, __LINE__, + sizeof(struct mdp_histogram_start_req)); + return -EINVAL; + } + if (clear_user(hist_req, + sizeof(struct mdp_histogram_start_req))) + return -EFAULT; + ret = __from_user_hist_start_req(hist_req32, hist_req); + if (ret) + goto histo_compat_err; + ret = mdss_fb_do_ioctl(info, cmd, + (unsigned long) hist_req, file); + break; + case MSMFB_HISTOGRAM_STOP: + ret = mdss_fb_do_ioctl(info, cmd, arg, file); + break; + case MSMFB_HISTOGRAM: + hist32 = compat_ptr(arg); + hist = compat_alloc_user_space( + sizeof(struct mdp_histogram_data)); + if (!hist) { + pr_err("%s:%u: compat alloc error [%zu] bytes\n", + __func__, __LINE__, + sizeof(struct mdp_histogram_data)); + return -EINVAL; + } + if (clear_user(hist, sizeof(struct mdp_histogram_data))) + return -EFAULT; + ret = __from_user_hist_data(hist32, hist); + if (ret) + goto histo_compat_err; + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) hist, file); + if (ret) + goto histo_compat_err; + ret = __to_user_hist_data(hist32, hist); + break; + default: + break; + } + +histo_compat_err: + return ret; +} + +static int __copy_layer_pp_info_qseed_params( + struct mdp_overlay_pp_params *pp_info, + struct mdp_overlay_pp_params32 *pp_info32) +{ + pp_info->qseed_cfg[0].table_num = pp_info32->qseed_cfg[0].table_num; + pp_info->qseed_cfg[0].ops = pp_info32->qseed_cfg[0].ops; + pp_info->qseed_cfg[0].len = pp_info32->qseed_cfg[0].len; + pp_info->qseed_cfg[0].data = compat_ptr(pp_info32->qseed_cfg[0].data); + + pp_info->qseed_cfg[1].table_num = pp_info32->qseed_cfg[1].table_num; + pp_info->qseed_cfg[1].ops = pp_info32->qseed_cfg[1].ops; + pp_info->qseed_cfg[1].len = pp_info32->qseed_cfg[1].len; + pp_info->qseed_cfg[1].data = compat_ptr(pp_info32->qseed_cfg[1].data); + + return 0; +} + +static int __copy_layer_igc_lut_data_v1_7( + struct mdp_igc_lut_data_v1_7 *cfg_payload, + struct mdp_igc_lut_data_v1_7_32 __user *cfg_payload32) +{ + struct mdp_igc_lut_data_v1_7_32 local_cfg_payload32; + int ret = 0; + + ret = copy_from_user(&local_cfg_payload32, + cfg_payload32, + sizeof(struct mdp_igc_lut_data_v1_7_32)); + if (ret) { + pr_err("copy from user failed, IGC cfg payload = %pK\n", + cfg_payload32); + ret = -EFAULT; + goto exit; + } + + cfg_payload->table_fmt = local_cfg_payload32.table_fmt; + cfg_payload->len = local_cfg_payload32.len; + cfg_payload->c0_c1_data = compat_ptr(local_cfg_payload32.c0_c1_data); + cfg_payload->c2_data = compat_ptr(local_cfg_payload32.c2_data); + +exit: + return ret; +} + +static int __copy_layer_pp_info_igc_params( + struct mdp_overlay_pp_params *pp_info, + struct mdp_overlay_pp_params32 *pp_info32) +{ + void *cfg_payload = NULL; + uint32_t payload_size = 0; + int ret = 0; + + pp_info->igc_cfg.block = pp_info32->igc_cfg.block; + pp_info->igc_cfg.version = pp_info32->igc_cfg.version; + pp_info->igc_cfg.ops = pp_info32->igc_cfg.ops; + + if (pp_info->igc_cfg.version != 0) { + payload_size = __pp_compat_size_igc(); + + cfg_payload = kmalloc(payload_size, GFP_KERNEL); + if (!cfg_payload) { + ret = -ENOMEM; + goto exit; + } + } + + switch (pp_info->igc_cfg.version) { + case mdp_igc_v1_7: + ret = __copy_layer_igc_lut_data_v1_7(cfg_payload, + compat_ptr(pp_info32->igc_cfg.cfg_payload)); + if (ret) { + pr_err("compat copy of IGC cfg payload failed, ret %d\n", + ret); + kfree(cfg_payload); + cfg_payload = NULL; + goto exit; + } + break; + default: + pr_debug("No version set, fallback to legacy IGC version\n"); + pp_info->igc_cfg.len = pp_info32->igc_cfg.len; + pp_info->igc_cfg.c0_c1_data = + compat_ptr(pp_info32->igc_cfg.c0_c1_data); + pp_info->igc_cfg.c2_data = + compat_ptr(pp_info32->igc_cfg.c2_data); + kfree(cfg_payload); + cfg_payload = NULL; + break; + } +exit: + pp_info->igc_cfg.cfg_payload = cfg_payload; + return ret; +} + +static int __copy_layer_hist_lut_data_v1_7( + struct mdp_hist_lut_data_v1_7 *cfg_payload, + struct mdp_hist_lut_data_v1_7_32 __user *cfg_payload32) +{ + struct mdp_hist_lut_data_v1_7_32 local_cfg_payload32; + int ret = 0; + + ret = copy_from_user(&local_cfg_payload32, + cfg_payload32, + sizeof(struct mdp_hist_lut_data_v1_7_32)); + if (ret) { + pr_err("copy from user failed, hist lut cfg_payload = %pK\n", + cfg_payload32); + ret = -EFAULT; + goto exit; + } + + cfg_payload->len = local_cfg_payload32.len; + cfg_payload->data = compat_ptr(local_cfg_payload32.data); +exit: + return ret; +} + +static int __copy_layer_pp_info_hist_lut_params( + struct mdp_overlay_pp_params *pp_info, + struct mdp_overlay_pp_params32 *pp_info32) +{ + void *cfg_payload = NULL; + uint32_t payload_size = 0; + int ret = 0; + + pp_info->hist_lut_cfg.block = pp_info32->hist_lut_cfg.block; + pp_info->hist_lut_cfg.version = pp_info32->hist_lut_cfg.version; + pp_info->hist_lut_cfg.ops = pp_info32->hist_lut_cfg.ops; + pp_info->hist_lut_cfg.hist_lut_first = + pp_info32->hist_lut_cfg.hist_lut_first; + + if (pp_info->hist_lut_cfg.version != 0) { + payload_size = __pp_compat_size_hist_lut(); + + cfg_payload = kmalloc(payload_size, GFP_KERNEL); + if (!cfg_payload) { + ret = -ENOMEM; + goto exit; + } + } + + switch (pp_info->hist_lut_cfg.version) { + case mdp_hist_lut_v1_7: + ret = __copy_layer_hist_lut_data_v1_7(cfg_payload, + compat_ptr(pp_info32->hist_lut_cfg.cfg_payload)); + if (ret) { + pr_err("compat copy of Hist LUT cfg payload failed, ret %d\n", + ret); + kfree(cfg_payload); + cfg_payload = NULL; + goto exit; + } + break; + default: + pr_debug("version invalid, fallback to legacy\n"); + pp_info->hist_lut_cfg.len = pp_info32->hist_lut_cfg.len; + pp_info->hist_lut_cfg.data = + compat_ptr(pp_info32->hist_lut_cfg.data); + kfree(cfg_payload); + cfg_payload = NULL; + break; + } +exit: + pp_info->hist_lut_cfg.cfg_payload = cfg_payload; + return ret; +} + +static int __copy_layer_pa_data_v1_7( + struct mdp_pa_data_v1_7 *cfg_payload, + struct mdp_pa_data_v1_7_32 __user *cfg_payload32) +{ + struct mdp_pa_data_v1_7_32 local_cfg_payload32; + int ret = 0; + + ret = copy_from_user(&local_cfg_payload32, + cfg_payload32, + sizeof(struct mdp_pa_data_v1_7_32)); + if (ret) { + pr_err("copy from user failed, pa cfg_payload = %pK\n", + cfg_payload32); + ret = -EFAULT; + goto exit; + } + + cfg_payload->mode = local_cfg_payload32.mode; + cfg_payload->global_hue_adj = local_cfg_payload32.global_hue_adj; + cfg_payload->global_sat_adj = local_cfg_payload32.global_sat_adj; + cfg_payload->global_val_adj = local_cfg_payload32.global_val_adj; + cfg_payload->global_cont_adj = local_cfg_payload32.global_cont_adj; + + memcpy(&cfg_payload->skin_cfg, &local_cfg_payload32.skin_cfg, + sizeof(struct mdp_pa_mem_col_data_v1_7)); + memcpy(&cfg_payload->sky_cfg, &local_cfg_payload32.sky_cfg, + sizeof(struct mdp_pa_mem_col_data_v1_7)); + memcpy(&cfg_payload->fol_cfg, &local_cfg_payload32.fol_cfg, + sizeof(struct mdp_pa_mem_col_data_v1_7)); + + cfg_payload->six_zone_thresh = local_cfg_payload32.six_zone_thresh; + cfg_payload->six_zone_adj_p0 = local_cfg_payload32.six_zone_adj_p0; + cfg_payload->six_zone_adj_p1 = local_cfg_payload32.six_zone_adj_p1; + cfg_payload->six_zone_sat_hold = local_cfg_payload32.six_zone_sat_hold; + cfg_payload->six_zone_val_hold = local_cfg_payload32.six_zone_val_hold; + cfg_payload->six_zone_len = local_cfg_payload32.six_zone_len; + + cfg_payload->six_zone_curve_p0 = + compat_ptr(local_cfg_payload32.six_zone_curve_p0); + cfg_payload->six_zone_curve_p1 = + compat_ptr(local_cfg_payload32.six_zone_curve_p1); +exit: + return ret; +} + +static int __copy_layer_pp_info_pa_v2_params( + struct mdp_overlay_pp_params *pp_info, + struct mdp_overlay_pp_params32 *pp_info32) +{ + void *cfg_payload = NULL; + uint32_t payload_size = 0; + int ret = 0; + + pp_info->pa_v2_cfg_data.block = pp_info32->pa_v2_cfg_data.block; + pp_info->pa_v2_cfg_data.version = pp_info32->pa_v2_cfg_data.version; + pp_info->pa_v2_cfg_data.flags = pp_info32->pa_v2_cfg_data.flags; + + if (pp_info->pa_v2_cfg_data.version != 0) { + payload_size = __pp_compat_size_pa(); + + cfg_payload = kmalloc(payload_size, GFP_KERNEL); + if (!cfg_payload) { + ret = -ENOMEM; + goto exit; + } + } + + switch (pp_info->pa_v2_cfg_data.version) { + case mdp_pa_v1_7: + ret = __copy_layer_pa_data_v1_7(cfg_payload, + compat_ptr(pp_info32->pa_v2_cfg_data.cfg_payload)); + if (ret) { + pr_err("compat copy of PA cfg payload failed, ret %d\n", + ret); + kfree(cfg_payload); + cfg_payload = NULL; + goto exit; + } + break; + default: + pr_debug("version invalid\n"); + kfree(cfg_payload); + cfg_payload = NULL; + break; + } +exit: + pp_info->pa_v2_cfg_data.cfg_payload = cfg_payload; + return ret; +} + +static int __copy_layer_pp_info_legacy_pa_v2_params( + struct mdp_overlay_pp_params *pp_info, + struct mdp_overlay_pp_params32 *pp_info32) +{ + pp_info->pa_v2_cfg.global_hue_adj = + pp_info32->pa_v2_cfg.global_hue_adj; + pp_info->pa_v2_cfg.global_sat_adj = + pp_info32->pa_v2_cfg.global_sat_adj; + pp_info->pa_v2_cfg.global_val_adj = + pp_info32->pa_v2_cfg.global_val_adj; + pp_info->pa_v2_cfg.global_cont_adj = + pp_info32->pa_v2_cfg.global_cont_adj; + + memcpy(&pp_info->pa_v2_cfg.skin_cfg, + &pp_info32->pa_v2_cfg.skin_cfg, + sizeof(struct mdp_pa_mem_col_cfg)); + memcpy(&pp_info->pa_v2_cfg.sky_cfg, + &pp_info32->pa_v2_cfg.sky_cfg, + sizeof(struct mdp_pa_mem_col_cfg)); + memcpy(&pp_info->pa_v2_cfg.fol_cfg, + &pp_info32->pa_v2_cfg.fol_cfg, + sizeof(struct mdp_pa_mem_col_cfg)); + + pp_info->pa_v2_cfg.six_zone_thresh = + pp_info32->pa_v2_cfg.six_zone_thresh; + pp_info->pa_v2_cfg.six_zone_len = + pp_info32->pa_v2_cfg.six_zone_len; + + pp_info->pa_v2_cfg.six_zone_curve_p0 = + compat_ptr(pp_info32->pa_v2_cfg.six_zone_curve_p0); + pp_info->pa_v2_cfg.six_zone_curve_p1 = + compat_ptr(pp_info32->pa_v2_cfg.six_zone_curve_p1); + + return 0; +} + +static int __copy_layer_pp_info_pcc_params( + struct mdp_overlay_pp_params *pp_info, + struct mdp_overlay_pp_params32 *pp_info32) +{ + void *cfg_payload = NULL; + uint32_t payload_size = 0; + int ret = 0; + + pp_info->pcc_cfg_data.block = pp_info32->pcc_cfg_data.block; + pp_info->pcc_cfg_data.version = pp_info32->pcc_cfg_data.version; + pp_info->pcc_cfg_data.ops = pp_info32->pcc_cfg_data.ops; + + if (pp_info->pcc_cfg_data.version != 0) { + payload_size = __pp_compat_size_pcc(); + + cfg_payload = kmalloc(payload_size, GFP_KERNEL); + if (!cfg_payload) { + ret = -ENOMEM; + goto exit; + } + } + + switch (pp_info->pcc_cfg_data.version) { + case mdp_pcc_v1_7: + ret = copy_from_user(cfg_payload, + compat_ptr(pp_info32->pcc_cfg_data.cfg_payload), + sizeof(struct mdp_pcc_data_v1_7)); + if (ret) { + pr_err("compat copy of PCC cfg payload failed, ptr %pK\n", + compat_ptr( + pp_info32->pcc_cfg_data.cfg_payload)); + ret = -EFAULT; + kfree(cfg_payload); + cfg_payload = NULL; + goto exit; + } + break; + default: + pr_debug("version invalid, fallback to legacy\n"); + kfree(cfg_payload); + cfg_payload = NULL; + break; + } +exit: + pp_info->pcc_cfg_data.cfg_payload = cfg_payload; + return ret; +} + + +static int __copy_layer_pp_info_params(struct mdp_input_layer *layer, + struct mdp_input_layer32 *layer32) +{ + struct mdp_overlay_pp_params *pp_info; + struct mdp_overlay_pp_params32 pp_info32; + int ret = 0; + + if (!(layer->flags & MDP_LAYER_PP)) + return 0; + + ret = copy_from_user(&pp_info32, + compat_ptr(layer32->pp_info), + sizeof(struct mdp_overlay_pp_params32)); + if (ret) { + pr_err("pp info copy from user failed, pp_info %pK\n", + compat_ptr(layer32->pp_info)); + ret = -EFAULT; + goto exit; + } + + pp_info = kzalloc(sizeof(struct mdp_overlay_pp_params), GFP_KERNEL); + if (!pp_info) { + ret = -ENOMEM; + goto exit; + } + memset(pp_info, 0, sizeof(struct mdp_overlay_pp_params)); + + pp_info->config_ops = pp_info32.config_ops; + + memcpy(&pp_info->csc_cfg, &pp_info32.csc_cfg, + sizeof(struct mdp_csc_cfg)); + memcpy(&pp_info->sharp_cfg, &pp_info32.sharp_cfg, + sizeof(struct mdp_sharp_cfg)); + memcpy(&pp_info->hist_cfg, &pp_info32.hist_cfg, + sizeof(struct mdp_histogram_cfg)); + memcpy(&pp_info->pa_cfg, &pp_info32.pa_cfg, + sizeof(struct mdp_pa_cfg)); + + ret = __copy_layer_pp_info_qseed_params(pp_info, &pp_info32); + if (ret) { + pr_err("compat copy pp_info QSEED params failed, ret %d\n", + ret); + goto exit_pp_info; + } + ret = __copy_layer_pp_info_legacy_pa_v2_params(pp_info, &pp_info32); + if (ret) { + pr_err("compat copy pp_info Legacy PAv2 params failed, ret %d\n", + ret); + goto exit_pp_info; + } + ret = __copy_layer_pp_info_igc_params(pp_info, &pp_info32); + if (ret) { + pr_err("compat copy pp_info IGC params failed, ret %d\n", + ret); + goto exit_pp_info; + } + ret = __copy_layer_pp_info_hist_lut_params(pp_info, &pp_info32); + if (ret) { + pr_err("compat copy pp_info Hist LUT params failed, ret %d\n", + ret); + goto exit_igc; + } + ret = __copy_layer_pp_info_pa_v2_params(pp_info, &pp_info32); + if (ret) { + pr_err("compat copy pp_info PAv2 params failed, ret %d\n", + ret); + goto exit_hist_lut; + } + ret = __copy_layer_pp_info_pcc_params(pp_info, &pp_info32); + if (ret) { + pr_err("compat copy pp_info PCC params failed, ret %d\n", + ret); + goto exit_pa; + } + + layer->pp_info = pp_info; + + return ret; + +exit_pa: + kfree(pp_info->pa_v2_cfg_data.cfg_payload); +exit_hist_lut: + kfree(pp_info->hist_lut_cfg.cfg_payload); +exit_igc: + kfree(pp_info->igc_cfg.cfg_payload); +exit_pp_info: + kfree(pp_info); +exit: + return ret; +} + + +static int __to_user_mdp_overlay(struct mdp_overlay32 __user *ov32, + struct mdp_overlay __user *ov) +{ + int ret = 0; + + ret = copy_in_user(&ov32->src, &ov->src, sizeof(ov32->src)) || + copy_in_user(&ov32->src_rect, + &ov->src_rect, sizeof(ov32->src_rect)) || + copy_in_user(&ov32->dst_rect, + &ov->dst_rect, sizeof(ov32->dst_rect)); + if (ret) + return -EFAULT; + + ret |= put_user(ov->z_order, &ov32->z_order); + ret |= put_user(ov->is_fg, &ov32->is_fg); + ret |= put_user(ov->alpha, &ov32->alpha); + ret |= put_user(ov->blend_op, &ov32->blend_op); + ret |= put_user(ov->transp_mask, &ov32->transp_mask); + ret |= put_user(ov->flags, &ov32->flags); + ret |= put_user(ov->id, &ov32->id); + ret |= put_user(ov->priority, &ov32->priority); + if (ret) + return -EFAULT; + + ret = copy_in_user(&ov32->user_data, &ov->user_data, + sizeof(ov32->user_data)); + if (ret) + return -EFAULT; + + ret |= put_user(ov->horz_deci, &ov32->horz_deci); + ret |= put_user(ov->vert_deci, &ov32->vert_deci); + if (ret) + return -EFAULT; + + ret = __to_user_pp_params( + &ov->overlay_pp_cfg, + compat_ptr((uintptr_t) &ov32->overlay_pp_cfg)); + if (ret) + return -EFAULT; + + ret = copy_in_user(&ov32->scale, &ov->scale, + sizeof(struct mdp_scale_data)); + if (ret) + return -EFAULT; + + ret = put_user(ov->frame_rate, &ov32->frame_rate); + if (ret) + return -EFAULT; + + return 0; +} + + +static int __from_user_mdp_overlay(struct mdp_overlay __user *ov, + struct mdp_overlay32 __user *ov32) +{ + __u32 data; + + if (copy_in_user(&ov->src, &ov32->src, + sizeof(ov32->src)) || + copy_in_user(&ov->src_rect, &ov32->src_rect, + sizeof(ov32->src_rect)) || + copy_in_user(&ov->dst_rect, &ov32->dst_rect, + sizeof(ov32->dst_rect))) + return -EFAULT; + + if (get_user(data, &ov32->z_order) || + put_user(data, &ov->z_order) || + get_user(data, &ov32->is_fg) || + put_user(data, &ov->is_fg) || + get_user(data, &ov32->alpha) || + put_user(data, &ov->alpha) || + get_user(data, &ov32->blend_op) || + put_user(data, &ov->blend_op) || + get_user(data, &ov32->transp_mask) || + put_user(data, &ov->transp_mask) || + get_user(data, &ov32->flags) || + put_user(data, &ov->flags) || + get_user(data, &ov32->pipe_type) || + put_user(data, &ov->pipe_type) || + get_user(data, &ov32->id) || + put_user(data, &ov->id) || + get_user(data, &ov32->priority) || + put_user(data, &ov->priority)) + return -EFAULT; + + if (copy_in_user(&ov->user_data, &ov32->user_data, + sizeof(ov32->user_data))) + return -EFAULT; + + if (get_user(data, &ov32->horz_deci) || + put_user(data, &ov->horz_deci) || + get_user(data, &ov32->vert_deci) || + put_user(data, &ov->vert_deci)) + return -EFAULT; + + if (__from_user_pp_params( + compat_ptr((uintptr_t) &ov32->overlay_pp_cfg), + &ov->overlay_pp_cfg)) + return -EFAULT; + + if (copy_in_user(&ov->scale, &ov32->scale, + sizeof(struct mdp_scale_data))) + return -EFAULT; + + if (get_user(data, &ov32->frame_rate) || + put_user(data, &ov->frame_rate)) + return -EFAULT; + + return 0; +} + +static int __from_user_mdp_overlaylist(struct mdp_overlay_list __user *ovlist, + struct mdp_overlay_list32 __user *ovlist32, + struct mdp_overlay **to_list_head) +{ + __u32 i, ret; + unsigned long data, from_list_head, num_overlays; + struct mdp_overlay32 *iter; + + if (!to_list_head || !ovlist32 || !ovlist) { + pr_err("%s:%u: null error\n", __func__, __LINE__); + return -EINVAL; + } + + if (copy_in_user(&ovlist->num_overlays, &ovlist32->num_overlays, + sizeof(ovlist32->num_overlays))) + return -EFAULT; + + if (copy_in_user(&ovlist->flags, &ovlist32->flags, + sizeof(ovlist32->flags))) + return -EFAULT; + + if (copy_in_user(&ovlist->processed_overlays, + &ovlist32->processed_overlays, + sizeof(ovlist32->processed_overlays))) + return -EFAULT; + + if (get_user(data, &ovlist32->overlay_list) || + get_user(num_overlays, &ovlist32->num_overlays)) { + ret = -EFAULT; + goto validate_exit; + } + + for (i = 0; i < num_overlays; i++) { + if (get_user(from_list_head, (__u32 *)data + i)) { + ret = -EFAULT; + goto validate_exit; + } + + iter = compat_ptr(from_list_head); + if (__from_user_mdp_overlay(to_list_head[i], + (struct mdp_overlay32 *)(iter))) { + ret = -EFAULT; + goto validate_exit; + } + } + if (put_user(to_list_head, &ovlist->overlay_list)) + return -EFAULT; + + return 0; + +validate_exit: + pr_err("%s: %u: copy error\n", __func__, __LINE__); + return -EFAULT; +} + +static int __to_user_mdp_overlaylist(struct mdp_overlay_list32 __user *ovlist32, + struct mdp_overlay_list __user *ovlist, + struct mdp_overlay **l_ptr) +{ + __u32 i, ret; + unsigned long data, data1; + struct mdp_overlay32 *temp; + struct mdp_overlay *l = l_ptr[0]; + + if (copy_in_user(&ovlist32->num_overlays, &ovlist->num_overlays, + sizeof(ovlist32->num_overlays))) + return -EFAULT; + + if (get_user(data, &ovlist32->overlay_list)) { + ret = -EFAULT; + pr_err("%s:%u: err\n", __func__, __LINE__); + goto validate_exit; + } + + for (i = 0; i < ovlist32->num_overlays; i++) { + if (get_user(data1, (__u32 *)data + i)) { + ret = -EFAULT; + goto validate_exit; + } + temp = compat_ptr(data1); + if (__to_user_mdp_overlay( + (struct mdp_overlay32 *) temp, + l + i)) { + ret = -EFAULT; + goto validate_exit; + } + } + + if (copy_in_user(&ovlist32->flags, &ovlist->flags, + sizeof(ovlist32->flags))) + return -EFAULT; + + if (copy_in_user(&ovlist32->processed_overlays, + &ovlist->processed_overlays, + sizeof(ovlist32->processed_overlays))) + return -EFAULT; + + return 0; + +validate_exit: + pr_err("%s: %u: copy error\n", __func__, __LINE__); + return -EFAULT; + +} + +void mdss_compat_align_list(void __user *total_mem_chunk, + struct mdp_overlay __user **list_ptr, u32 num_ov) +{ + int i = 0; + struct mdp_overlay __user *contig_overlays; + + contig_overlays = total_mem_chunk + sizeof(struct mdp_overlay_list) + + (num_ov * sizeof(struct mdp_overlay *)); + + for (i = 0; i < num_ov; i++) + list_ptr[i] = contig_overlays + i; +} + +static u32 __pp_sspp_size(void) +{ + u32 size = 0; + /* pick the largest of the revision when multiple revs are supported */ + size = sizeof(struct mdp_igc_lut_data_v1_7); + size += sizeof(struct mdp_pa_data_v1_7); + size += sizeof(struct mdp_pcc_data_v1_7); + size += sizeof(struct mdp_hist_lut_data_v1_7); + return size; +} + +static int __pp_sspp_set_offsets(struct mdp_overlay __user *ov) +{ + if (!ov) { + pr_err("invalid overlay pointer\n"); + return -EFAULT; + } + if (put_user((void *)((unsigned long)ov + sizeof(struct mdp_overlay)), + &(ov->overlay_pp_cfg.igc_cfg.cfg_payload)) || + put_user(ov->overlay_pp_cfg.igc_cfg.cfg_payload + + sizeof(struct mdp_igc_lut_data_v1_7), + &(ov->overlay_pp_cfg.pa_v2_cfg_data.cfg_payload)) || + put_user(ov->overlay_pp_cfg.pa_v2_cfg_data.cfg_payload + + sizeof(struct mdp_pa_data_v1_7), + &(ov->overlay_pp_cfg.pcc_cfg_data.cfg_payload)) || + put_user(ov->overlay_pp_cfg.pcc_cfg_data.cfg_payload + + sizeof(struct mdp_pcc_data_v1_7), + &(ov->overlay_pp_cfg.hist_lut_cfg.cfg_payload))) + return -EFAULT; + return 0; +} + +int mdss_compat_overlay_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg, struct file *file) +{ + struct mdp_overlay **layers_head; + struct mdp_overlay __user *ov; + struct mdp_overlay32 __user *ov32; + struct mdp_overlay_list __user *ovlist; + struct mdp_overlay_list32 __user *ovlist32; + size_t layers_refs_sz, layers_sz, prepare_sz; + void __user *total_mem_chunk; + uint32_t num_overlays; + uint32_t alloc_size = 0; + int ret; + + if (!info || !info->par) + return -EINVAL; + + + switch (cmd) { + case MSMFB_MDP_PP: + ret = mdss_compat_pp_ioctl(info, cmd, arg, file); + break; + case MSMFB_HISTOGRAM_START: + case MSMFB_HISTOGRAM_STOP: + case MSMFB_HISTOGRAM: + ret = mdss_histo_compat_ioctl(info, cmd, arg, file); + break; + case MSMFB_OVERLAY_GET: + alloc_size += sizeof(*ov) + __pp_sspp_size(); + ov = compat_alloc_user_space(alloc_size); + if (!ov) { + pr_err("%s:%u: compat alloc error [%zu] bytes\n", + __func__, __LINE__, sizeof(*ov)); + return -EINVAL; + } + ov32 = compat_ptr(arg); + ret = __pp_sspp_set_offsets(ov); + if (ret) { + pr_err("setting the pp offsets failed ret %d\n", ret); + return ret; + } + ret = __from_user_mdp_overlay(ov, ov32); + if (ret) + pr_err("%s: compat mdp overlay failed\n", __func__); + else + ret = mdss_fb_do_ioctl(info, cmd, + (unsigned long) ov, file); + ret = __to_user_mdp_overlay(ov32, ov); + break; + case MSMFB_OVERLAY_SET: + alloc_size += sizeof(*ov) + __pp_sspp_size(); + ov = compat_alloc_user_space(alloc_size); + if (!ov) { + pr_err("%s:%u: compat alloc error [%zu] bytes\n", + __func__, __LINE__, sizeof(*ov)); + return -EINVAL; + } + ret = __pp_sspp_set_offsets(ov); + if (ret) { + pr_err("setting the pp offsets failed ret %d\n", ret); + return ret; + } + ov32 = compat_ptr(arg); + ret = __from_user_mdp_overlay(ov, ov32); + if (ret) { + pr_err("%s: compat mdp overlay failed\n", __func__); + } else { + ret = mdss_fb_do_ioctl(info, cmd, + (unsigned long) ov, file); + ret = __to_user_mdp_overlay(ov32, ov); + } + break; + case MSMFB_OVERLAY_PREPARE: + ovlist32 = compat_ptr(arg); + if (get_user(num_overlays, &ovlist32->num_overlays)) { + pr_err("compat mdp prepare failed: invalid arg\n"); + return -EFAULT; + } + + if (num_overlays >= OVERLAY_MAX) { + pr_err("%s: No: of overlays exceeds max\n", __func__); + return -EINVAL; + } + + layers_sz = num_overlays * sizeof(struct mdp_overlay); + prepare_sz = sizeof(struct mdp_overlay_list); + layers_refs_sz = num_overlays * sizeof(struct mdp_overlay *); + + total_mem_chunk = compat_alloc_user_space( + prepare_sz + layers_refs_sz + layers_sz); + if (!total_mem_chunk) { + pr_err("%s:%u: compat alloc error [%zu] bytes\n", + __func__, __LINE__, + layers_refs_sz + layers_sz + prepare_sz); + return -EINVAL; + } + + layers_head = total_mem_chunk + prepare_sz; + mdss_compat_align_list(total_mem_chunk, layers_head, + num_overlays); + ovlist = (struct mdp_overlay_list *)total_mem_chunk; + + ret = __from_user_mdp_overlaylist(ovlist, ovlist32, + layers_head); + if (ret) { + pr_err("compat mdp overlaylist failed\n"); + } else { + ret = mdss_fb_do_ioctl(info, cmd, + (unsigned long) ovlist, file); + if (!ret) + ret = __to_user_mdp_overlaylist(ovlist32, + ovlist, layers_head); + } + break; + case MSMFB_OVERLAY_UNSET: + case MSMFB_OVERLAY_PLAY: + case MSMFB_OVERLAY_VSYNC_CTRL: + case MSMFB_METADATA_SET: + case MSMFB_METADATA_GET: + default: + pr_debug("%s: overlay ioctl cmd=[%u]\n", __func__, cmd); + ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) arg, file); + break; + } + return ret; +} + +/* + * mdss_fb_compat_ioctl() - MDSS Framebuffer compat ioctl function + * @info: pointer to framebuffer info + * @cmd: ioctl command + * @arg: argument to ioctl + * + * This function adds the compat translation layer for framebuffer + * ioctls to allow 32-bit userspace call ioctls on the mdss + * framebuffer device driven in 64-bit kernel. + */ +int mdss_fb_compat_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg, struct file *file) +{ + int ret; + + if (!info || !info->par) + return -EINVAL; + + cmd = __do_compat_ioctl_nr(cmd); + switch (cmd) { + case MSMFB_CURSOR: + ret = mdss_fb_compat_cursor(info, cmd, arg, file); + break; + case MSMFB_SET_LUT: + ret = mdss_fb_compat_set_lut(info, arg, file); + break; + case MSMFB_BUFFER_SYNC: + ret = mdss_fb_compat_buf_sync(info, cmd, arg, file); + break; + case MSMFB_ATOMIC_COMMIT: + ret = __compat_atomic_commit(info, cmd, arg, file); + break; + case MSMFB_ASYNC_POSITION_UPDATE: + ret = __compat_async_position_update(info, cmd, arg); + break; + case MSMFB_MDP_PP: + case MSMFB_HISTOGRAM_START: + case MSMFB_HISTOGRAM_STOP: + case MSMFB_HISTOGRAM: + case MSMFB_OVERLAY_GET: + case MSMFB_OVERLAY_SET: + case MSMFB_OVERLAY_UNSET: + case MSMFB_OVERLAY_PLAY: + case MSMFB_OVERLAY_VSYNC_CTRL: + case MSMFB_METADATA_SET: + case MSMFB_METADATA_GET: + case MSMFB_OVERLAY_PREPARE: + ret = mdss_compat_overlay_ioctl(info, cmd, arg, file); + break; + case MSMFB_NOTIFY_UPDATE: + case MSMFB_DISPLAY_COMMIT: + default: + ret = mdss_fb_do_ioctl(info, cmd, arg, file); + break; + } + + if (ret == -ENOTSUPP) + pr_err("%s: unsupported ioctl\n", __func__); + else if (ret) + pr_debug("%s: ioctl err cmd=%u ret=%d\n", __func__, cmd, ret); + + return ret; +} +EXPORT_SYMBOL(mdss_fb_compat_ioctl); diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.h b/drivers/video/fbdev/msm/mdss_compat_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..c0b2a6565adc35228f928d403955797cf2d23b9d --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_compat_utils.h @@ -0,0 +1,549 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved. + * + */ + +#ifndef MDSS_COMPAT_UTILS_H +#define MDSS_COMPAT_UTILS_H + +/* + * To allow proper structure padding for 64bit/32bit target + */ +#ifdef __LP64 +#define MDP_LAYER_COMMIT_V1_PAD 2 +#else +#define MDP_LAYER_COMMIT_V1_PAD 3 +#endif + +struct mdp_buf_sync32 { + u32 flags; + u32 acq_fen_fd_cnt; + u32 session_id; + compat_caddr_t acq_fen_fd; + compat_caddr_t rel_fen_fd; + compat_caddr_t retire_fen_fd; +}; + +struct fb_cmap32 { + u32 start; + u32 len; + compat_caddr_t red; + compat_caddr_t green; + compat_caddr_t blue; + compat_caddr_t transp; +}; + +struct fb_image32 { + u32 dx; + u32 dy; + u32 width; + u32 height; + u32 fg_color; + u32 bg_color; + u8 depth; + compat_caddr_t data; + struct fb_cmap32 cmap; +}; + +struct fb_cursor32 { + u16 set; + u16 enable; + u16 rop; + compat_caddr_t mask; + struct fbcurpos hot; + struct fb_image32 image; +}; + +struct mdp_ccs32 { +}; + +struct msmfb_overlay_blt32 { +}; + +struct msmfb_overlay_3d32 { +}; + +struct msmfb_mixer_info_req32 { +}; + +struct msmfb_metadata32 { + uint32_t op; + uint32_t flags; + union { + struct mdp_misr misr_request; + struct mdp_blend_cfg blend_cfg; + struct mdp_mixer_cfg mixer_cfg; + uint32_t panel_frame_rate; + uint32_t video_info_code; + struct mdss_hw_caps caps; + uint8_t secure_en; + } data; +}; + +struct mdp_histogram_start_req32 { + uint32_t block; + uint8_t frame_cnt; + uint8_t bit_mask; + uint16_t num_bins; +}; + +struct mdp_histogram_data32 { + uint32_t block; + uint32_t bin_cnt; + compat_caddr_t c0; + compat_caddr_t c1; + compat_caddr_t c2; + compat_caddr_t extra_info; +}; + +struct mdp_pcc_coeff32 { + uint32_t c, r, g, b, rr, gg, bb, rg, gb, rb, rgb_0, rgb_1; +}; + +struct mdp_pcc_coeff_v1_7_32 { + uint32_t c, r, g, b, rg, gb, rb, rgb; +}; + +struct mdp_pcc_data_v1_7_32 { + struct mdp_pcc_coeff_v1_7_32 r, g, b; +}; +struct mdp_pcc_cfg_data32 { + uint32_t version; + uint32_t block; + uint32_t ops; + struct mdp_pcc_coeff32 r, g, b; + compat_caddr_t cfg_payload; +}; + +struct mdp_csc_cfg32 { + /* flags for enable CSC, toggling RGB,YUV input/output */ + uint32_t flags; + uint32_t csc_mv[9]; + uint32_t csc_pre_bv[3]; + uint32_t csc_post_bv[3]; + uint32_t csc_pre_lv[6]; + uint32_t csc_post_lv[6]; +}; + +struct mdp_csc_cfg_data32 { + uint32_t block; + struct mdp_csc_cfg32 csc_data; +}; + +struct mdp_bl_scale_data32 { + uint32_t min_lvl; + uint32_t scale; +}; + +struct mdp_pa_mem_col_cfg32 { + uint32_t color_adjust_p0; + uint32_t color_adjust_p1; + uint32_t hue_region; + uint32_t sat_region; + uint32_t val_region; +}; + +struct mdp_pa_v2_data32 { + /* Mask bits for PA features */ + uint32_t flags; + uint32_t global_hue_adj; + uint32_t global_sat_adj; + uint32_t global_val_adj; + uint32_t global_cont_adj; + struct mdp_pa_mem_col_cfg32 skin_cfg; + struct mdp_pa_mem_col_cfg32 sky_cfg; + struct mdp_pa_mem_col_cfg32 fol_cfg; + uint32_t six_zone_len; + uint32_t six_zone_thresh; + compat_caddr_t six_zone_curve_p0; + compat_caddr_t six_zone_curve_p1; +}; + +struct mdp_pa_mem_col_data_v1_7_32 { + uint32_t color_adjust_p0; + uint32_t color_adjust_p1; + uint32_t color_adjust_p2; + uint32_t blend_gain; + uint8_t sat_hold; + uint8_t val_hold; + uint32_t hue_region; + uint32_t sat_region; + uint32_t val_region; +}; + +struct mdp_pa_data_v1_7_32 { + uint32_t mode; + uint32_t global_hue_adj; + uint32_t global_sat_adj; + uint32_t global_val_adj; + uint32_t global_cont_adj; + struct mdp_pa_mem_col_data_v1_7_32 skin_cfg; + struct mdp_pa_mem_col_data_v1_7_32 sky_cfg; + struct mdp_pa_mem_col_data_v1_7_32 fol_cfg; + uint32_t six_zone_thresh; + uint32_t six_zone_adj_p0; + uint32_t six_zone_adj_p1; + uint8_t six_zone_sat_hold; + uint8_t six_zone_val_hold; + uint32_t six_zone_len; + compat_caddr_t six_zone_curve_p0; + compat_caddr_t six_zone_curve_p1; +}; + +struct mdp_pa_v2_cfg_data32 { + uint32_t version; + uint32_t block; + uint32_t flags; + struct mdp_pa_v2_data32 pa_v2_data; + compat_caddr_t cfg_payload; +}; + +struct mdp_pa_cfg32 { + uint32_t flags; + uint32_t hue_adj; + uint32_t sat_adj; + uint32_t val_adj; + uint32_t cont_adj; +}; + +struct mdp_pa_cfg_data32 { + uint32_t block; + struct mdp_pa_cfg32 pa_data; +}; + +struct mdp_igc_lut_data_v1_7_32 { + uint32_t table_fmt; + uint32_t len; + compat_caddr_t c0_c1_data; + compat_caddr_t c2_data; +}; + +struct mdp_rgb_lut_data32 { + uint32_t flags; + uint32_t lut_type; + struct fb_cmap32 cmap; +}; + +struct mdp_igc_lut_data32 { + uint32_t block; + uint32_t version; + uint32_t len, ops; + compat_caddr_t c0_c1_data; + compat_caddr_t c2_data; + compat_caddr_t cfg_payload; +}; + +struct mdp_hist_lut_data_v1_7_32 { + uint32_t len; + compat_caddr_t data; +}; + +struct mdp_hist_lut_data32 { + uint32_t block; + uint32_t version; + uint32_t hist_lut_first; + uint32_t ops; + uint32_t len; + compat_caddr_t data; + compat_caddr_t cfg_payload; +}; + +struct mdp_ar_gc_lut_data32 { + uint32_t x_start; + uint32_t slope; + uint32_t offset; +}; + +struct mdp_pgc_lut_data_v1_7_32 { + uint32_t len; + compat_caddr_t c0_data; + compat_caddr_t c1_data; + compat_caddr_t c2_data; +}; + +struct mdp_pgc_lut_data32 { + uint32_t version; + uint32_t block; + uint32_t flags; + uint8_t num_r_stages; + uint8_t num_g_stages; + uint8_t num_b_stages; + compat_caddr_t r_data; + compat_caddr_t g_data; + compat_caddr_t b_data; + compat_caddr_t cfg_payload; +}; + +struct mdp_lut_cfg_data32 { + uint32_t lut_type; + union { + struct mdp_igc_lut_data32 igc_lut_data; + struct mdp_pgc_lut_data32 pgc_lut_data; + struct mdp_hist_lut_data32 hist_lut_data; + struct mdp_rgb_lut_data32 rgb_lut_data; + } data; +}; + +struct mdp_qseed_cfg32 { + uint32_t table_num; + uint32_t ops; + uint32_t len; + compat_caddr_t data; +}; + +struct mdp_qseed_cfg_data32 { + uint32_t block; + struct mdp_qseed_cfg32 qseed_data; +}; + +struct mdp_dither_cfg_data32 { + uint32_t block; + uint32_t flags; + uint32_t g_y_depth; + uint32_t r_cr_depth; + uint32_t b_cb_depth; +}; + +struct mdp_gamut_data_v1_7_32 { + uint32_t mode; + uint32_t tbl_size[MDP_GAMUT_TABLE_NUM_V1_7]; + compat_caddr_t c0_data[MDP_GAMUT_TABLE_NUM_V1_7]; + compat_caddr_t c1_c2_data[MDP_GAMUT_TABLE_NUM_V1_7]; + uint32_t tbl_scale_off_sz[MDP_GAMUT_SCALE_OFF_TABLE_NUM]; + compat_caddr_t scale_off_data[MDP_GAMUT_SCALE_OFF_TABLE_NUM]; +}; + +struct mdp_gamut_cfg_data32 { + uint32_t block; + uint32_t flags; + uint32_t version; + uint32_t gamut_first; + uint32_t tbl_size[MDP_GAMUT_TABLE_NUM]; + compat_caddr_t r_tbl[MDP_GAMUT_TABLE_NUM]; + compat_caddr_t g_tbl[MDP_GAMUT_TABLE_NUM]; + compat_caddr_t b_tbl[MDP_GAMUT_TABLE_NUM]; + compat_caddr_t cfg_payload; +}; + +struct mdp_calib_config_data32 { + uint32_t ops; + uint32_t addr; + uint32_t data; +}; + +struct mdp_calib_config_buffer32 { + uint32_t ops; + uint32_t size; + compat_caddr_t buffer; +}; + +struct mdp_calib_dcm_state32 { + uint32_t ops; + uint32_t dcm_state; +}; + +struct mdss_ad_init32 { + uint32_t asym_lut[33]; + uint32_t color_corr_lut[33]; + uint8_t i_control[2]; + uint16_t black_lvl; + uint16_t white_lvl; + uint8_t var; + uint8_t limit_ampl; + uint8_t i_dither; + uint8_t slope_max; + uint8_t slope_min; + uint8_t dither_ctl; + uint8_t format; + uint8_t auto_size; + uint16_t frame_w; + uint16_t frame_h; + uint8_t logo_v; + uint8_t logo_h; + uint32_t alpha; + uint32_t alpha_base; + uint32_t bl_lin_len; + uint32_t bl_att_len; + compat_caddr_t bl_lin; + compat_caddr_t bl_lin_inv; + compat_caddr_t bl_att_lut; +}; + +struct mdss_ad_cfg32 { + uint32_t mode; + uint32_t al_calib_lut[33]; + uint16_t backlight_min; + uint16_t backlight_max; + uint16_t backlight_scale; + uint16_t amb_light_min; + uint16_t filter[2]; + uint16_t calib[4]; + uint8_t strength_limit; + uint8_t t_filter_recursion; + uint16_t stab_itr; + uint32_t bl_ctrl_mode; +}; + +/* ops uses standard MDP_PP_* flags */ +struct mdss_ad_init_cfg32 { + uint32_t ops; + union { + struct mdss_ad_init32 init; + struct mdss_ad_cfg32 cfg; + } params; +}; + +struct mdss_ad_input32 { + uint32_t mode; + union { + uint32_t amb_light; + uint32_t strength; + uint32_t calib_bl; + } in; + uint32_t output; +}; + +struct mdss_calib_cfg32 { + uint32_t ops; + uint32_t calib_mask; +}; + +struct mdp_histogram_cfg32 { + uint32_t ops; + uint32_t block; + uint8_t frame_cnt; + uint8_t bit_mask; + uint16_t num_bins; +}; + +struct mdp_sharp_cfg32 { + uint32_t flags; + uint32_t strength; + uint32_t edge_thr; + uint32_t smooth_thr; + uint32_t noise_thr; +}; + +struct mdp_overlay_pp_params32 { + uint32_t config_ops; + struct mdp_csc_cfg32 csc_cfg; + struct mdp_qseed_cfg32 qseed_cfg[2]; + struct mdp_pa_cfg32 pa_cfg; + struct mdp_pa_v2_data32 pa_v2_cfg; + struct mdp_igc_lut_data32 igc_cfg; + struct mdp_sharp_cfg32 sharp_cfg; + struct mdp_histogram_cfg32 hist_cfg; + struct mdp_hist_lut_data32 hist_lut_cfg; + struct mdp_pa_v2_cfg_data32 pa_v2_cfg_data; + struct mdp_pcc_cfg_data32 pcc_cfg_data; +}; + +struct msmfb_mdp_pp32 { + uint32_t op; + union { + struct mdp_pcc_cfg_data32 pcc_cfg_data; + struct mdp_csc_cfg_data32 csc_cfg_data; + struct mdp_lut_cfg_data32 lut_cfg_data; + struct mdp_qseed_cfg_data32 qseed_cfg_data; + struct mdp_bl_scale_data32 bl_scale_data; + struct mdp_pa_cfg_data32 pa_cfg_data; + struct mdp_pa_v2_cfg_data32 pa_v2_cfg_data; + struct mdp_dither_cfg_data32 dither_cfg_data; + struct mdp_gamut_cfg_data32 gamut_cfg_data; + struct mdp_calib_config_data32 calib_cfg; + struct mdss_ad_init_cfg32 ad_init_cfg; + struct mdss_calib_cfg32 mdss_calib_cfg; + struct mdss_ad_input32 ad_input; + struct mdp_calib_config_buffer32 calib_buffer; + struct mdp_calib_dcm_state32 calib_dcm; + } data; +}; + +struct mdp_overlay32 { + struct msmfb_img src; + struct mdp_rect src_rect; + struct mdp_rect dst_rect; + uint32_t z_order; /* stage number */ + uint32_t is_fg; /* control alpha & transp */ + uint32_t alpha; + uint32_t blend_op; + uint32_t transp_mask; + uint32_t flags; + uint32_t pipe_type; + uint32_t id; + uint8_t priority; + uint32_t user_data[6]; + uint32_t bg_color; + uint8_t horz_deci; + uint8_t vert_deci; + struct mdp_overlay_pp_params32 overlay_pp_cfg; + struct mdp_scale_data scale; + uint8_t color_space; + uint32_t frame_rate; +}; + +struct mdp_overlay_list32 { + uint32_t num_overlays; + compat_caddr_t overlay_list; + uint32_t flags; + uint32_t processed_overlays; +}; + +struct mdp_input_layer32 { + uint32_t flags; + uint32_t pipe_ndx; + uint8_t horz_deci; + uint8_t vert_deci; + uint8_t alpha; + uint16_t z_order; + uint32_t transp_mask; + uint32_t bg_color; + enum mdss_mdp_blend_op blend_op; + enum mdp_color_space color_space; + struct mdp_rect src_rect; + struct mdp_rect dst_rect; + compat_caddr_t scale; + struct mdp_layer_buffer buffer; + compat_caddr_t pp_info; + int error_code; + uint32_t rect_num; + uint32_t reserved[5]; +}; + +struct mdp_output_layer32 { + uint32_t flags; + uint32_t writeback_ndx; + struct mdp_layer_buffer buffer; + enum mdp_color_space color_space; + uint32_t reserved[5]; +}; +struct mdp_layer_commit_v1_32 { + uint32_t flags; + int release_fence; + struct mdp_rect left_roi; + struct mdp_rect right_roi; + compat_caddr_t input_layers; + uint32_t input_layer_cnt; + compat_caddr_t output_layer; + int retire_fence; + compat_caddr_t dest_scaler; + uint32_t dest_scaler_cnt; + compat_caddr_t frc_info; + uint32_t bl_level; /* BL level to be updated in commit */ + uint32_t reserved[MDP_LAYER_COMMIT_V1_PAD]; +}; + +struct mdp_layer_commit32 { + uint32_t version; + union { + struct mdp_layer_commit_v1_32 commit_v1; + }; +}; + +struct mdp_position_update32 { + compat_caddr_t __user *input_layers; + uint32_t input_layer_cnt; +}; + +#endif diff --git a/drivers/video/fbdev/msm/mdss_dba_utils.c b/drivers/video/fbdev/msm/mdss_dba_utils.c new file mode 100644 index 0000000000000000000000000000000000000000..233b01351a2ae09dbf1c19a162edd75963065e10 --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_dba_utils.c @@ -0,0 +1,826 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, 2020, The Linux Foundation. All rights reserved. */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include