From 859783a138d2cf426f04f7c7b63be23885d0e3b5 Mon Sep 17 00:00:00 2001 From: Ghanim Fodi Date: Sun, 28 Oct 2018 18:26:33 +0200 Subject: [PATCH] msm: ipa: Add snapshot of IPA driver This is a snapshot of the IPA and GSI drivers from msm-4.14 to 4.19 kernel as of 'commit ("clk: qcom: npucc: Add CRC div source for npu_cc_cal_dp_clk_src")'. CRs-Fixed: 2341003 Change-Id: I498b2edb878e7d75311824dd217e75893efcfc73 Signed-off-by: Ghanim Fodi --- drivers/platform/msm/Kconfig | 100 + drivers/platform/msm/Makefile | 2 + drivers/platform/msm/gsi/Makefile | 6 + drivers/platform/msm/gsi/gsi.c | 3913 +++++++++ drivers/platform/msm/gsi/gsi.h | 314 + drivers/platform/msm/gsi/gsi_dbg.c | 764 ++ drivers/platform/msm/gsi/gsi_emulation.c | 227 + drivers/platform/msm/gsi/gsi_emulation.h | 186 + .../platform/msm/gsi/gsi_emulation_stubs.h | 13 + drivers/platform/msm/gsi/gsi_reg.h | 30 + drivers/platform/msm/gsi/gsi_reg_v1.h | 1058 +++ drivers/platform/msm/gsi/gsi_reg_v2.h | 1058 +++ drivers/platform/msm/ipa/Makefile | 6 + drivers/platform/msm/ipa/ipa_api.c | 3492 ++++++++ drivers/platform/msm/ipa/ipa_api.h | 450 + drivers/platform/msm/ipa/ipa_clients/Makefile | 6 + .../platform/msm/ipa/ipa_clients/ecm_ipa.c | 1630 ++++ .../platform/msm/ipa/ipa_clients/ipa_gsb.c | 1211 +++ .../msm/ipa/ipa_clients/ipa_mhi_client.c | 2852 +++++++ .../msm/ipa/ipa_clients/ipa_uc_offload.c | 818 ++ .../platform/msm/ipa/ipa_clients/ipa_usb.c | 3015 +++++++ .../platform/msm/ipa/ipa_clients/ipa_wdi3.c | 871 ++ .../platform/msm/ipa/ipa_clients/odu_bridge.c | 1256 +++ .../platform/msm/ipa/ipa_clients/rndis_ipa.c | 2704 ++++++ .../msm/ipa/ipa_clients/rndis_ipa_trace.h | 74 + drivers/platform/msm/ipa/ipa_common_i.h | 439 + drivers/platform/msm/ipa/ipa_rm.c | 1184 +++ .../msm/ipa/ipa_rm_dependency_graph.c | 240 + .../msm/ipa/ipa_rm_dependency_graph.h | 42 + drivers/platform/msm/ipa/ipa_rm_i.h | 150 + .../msm/ipa/ipa_rm_inactivity_timer.c | 277 + drivers/platform/msm/ipa/ipa_rm_peers_list.c | 270 + drivers/platform/msm/ipa/ipa_rm_peers_list.h | 55 + drivers/platform/msm/ipa/ipa_rm_resource.c | 1204 +++ drivers/platform/msm/ipa/ipa_rm_resource.h | 159 + .../msm/ipa/ipa_uc_offload_common_i.h | 22 + drivers/platform/msm/ipa/ipa_v3/Makefile | 15 + drivers/platform/msm/ipa/ipa_v3/ipa.c | 7118 ++++++++++++++++ drivers/platform/msm/ipa/ipa_v3/ipa_client.c | 1878 +++++ drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c | 2372 ++++++ drivers/platform/msm/ipa/ipa_v3/ipa_dma.c | 1251 +++ drivers/platform/msm/ipa/ipa_v3/ipa_dp.c | 4232 ++++++++++ .../msm/ipa/ipa_v3/ipa_dt_replacement.c | 862 ++ .../msm/ipa/ipa_v3/ipa_emulation_stubs.h | 121 + drivers/platform/msm/ipa/ipa_v3/ipa_flt.c | 1733 ++++ drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c | 1328 +++ .../platform/msm/ipa/ipa_v3/ipa_hw_stats.c | 2073 +++++ drivers/platform/msm/ipa/ipa_v3/ipa_i.h | 2652 ++++++ .../platform/msm/ipa/ipa_v3/ipa_interrupts.c | 629 ++ drivers/platform/msm/ipa/ipa_v3/ipa_intf.c | 807 ++ drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c | 748 ++ .../platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c | 1058 +++ .../platform/msm/ipa/ipa_v3/ipa_mhi_proxy.h | 48 + drivers/platform/msm/ipa/ipa_v3/ipa_nat.c | 1592 ++++ drivers/platform/msm/ipa/ipa_v3/ipa_odl.c | 684 ++ drivers/platform/msm/ipa/ipa_v3/ipa_odl.h | 73 + drivers/platform/msm/ipa/ipa_v3/ipa_pm.c | 1397 +++ drivers/platform/msm/ipa/ipa_v3/ipa_pm.h | 180 + .../platform/msm/ipa/ipa_v3/ipa_qmi_service.c | 1781 ++++ .../platform/msm/ipa/ipa_v3/ipa_qmi_service.h | 470 ++ .../msm/ipa/ipa_v3/ipa_qmi_service_v01.c | 4044 +++++++++ drivers/platform/msm/ipa/ipa_v3/ipa_rt.c | 2082 +++++ drivers/platform/msm/ipa/ipa_v3/ipa_trace.h | 183 + drivers/platform/msm/ipa/ipa_v3/ipa_uc.c | 982 +++ drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c | 956 +++ drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c | 625 ++ .../msm/ipa/ipa_v3/ipa_uc_offload_i.h | 625 ++ drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c | 2788 ++++++ drivers/platform/msm/ipa/ipa_v3/ipa_utils.c | 7478 +++++++++++++++++ drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c | 560 ++ .../platform/msm/ipa/ipa_v3/ipahal/Makefile | 5 + .../platform/msm/ipa/ipa_v3/ipahal/ipahal.c | 1583 ++++ .../platform/msm/ipa/ipa_v3/ipahal/ipahal.h | 653 ++ .../msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c | 4086 +++++++++ .../msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h | 284 + .../msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h | 174 + .../msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c | 540 ++ .../msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h | 241 + .../msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h | 48 + .../platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h | 724 ++ .../msm/ipa/ipa_v3/ipahal/ipahal_nat.c | 431 + .../msm/ipa/ipa_v3/ipahal/ipahal_nat.h | 72 + .../msm/ipa/ipa_v3/ipahal/ipahal_nat_i.h | 146 + .../msm/ipa/ipa_v3/ipahal/ipahal_reg.c | 3464 ++++++++ .../msm/ipa/ipa_v3/ipahal/ipahal_reg.h | 745 ++ .../msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h | 605 ++ drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c | 4278 ++++++++++ .../msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c | 553 ++ drivers/platform/msm/ipa/ipa_v3/teth_bridge.c | 276 + drivers/platform/msm/ipa/test/Makefile | 4 + drivers/platform/msm/ipa/test/ipa_pm_ut.c | 1792 ++++ drivers/platform/msm/ipa/test/ipa_test_dma.c | 1136 +++ .../platform/msm/ipa/test/ipa_test_example.c | 92 + .../platform/msm/ipa/test/ipa_test_hw_stats.c | 323 + drivers/platform/msm/ipa/test/ipa_test_mhi.c | 3326 ++++++++ .../platform/msm/ipa/test/ipa_ut_framework.c | 1115 +++ .../platform/msm/ipa/test/ipa_ut_framework.h | 233 + drivers/platform/msm/ipa/test/ipa_ut_i.h | 81 + .../platform/msm/ipa/test/ipa_ut_suite_list.h | 36 + include/linux/ecm_ipa.h | 88 + include/linux/ipa.h | 2399 ++++++ include/linux/ipa_mhi.h | 154 + include/linux/ipa_odu_bridge.h | 155 + include/linux/ipa_uc_offload.h | 314 + include/linux/ipa_usb.h | 329 + include/linux/ipa_wdi3.h | 407 + include/linux/msm_gsi.h | 1686 ++++ include/linux/rndis_ipa.h | 95 + 108 files changed, 118196 insertions(+) create mode 100644 drivers/platform/msm/gsi/Makefile create mode 100644 drivers/platform/msm/gsi/gsi.c create mode 100644 drivers/platform/msm/gsi/gsi.h create mode 100644 drivers/platform/msm/gsi/gsi_dbg.c create mode 100644 drivers/platform/msm/gsi/gsi_emulation.c create mode 100644 drivers/platform/msm/gsi/gsi_emulation.h create mode 100644 drivers/platform/msm/gsi/gsi_emulation_stubs.h create mode 100644 drivers/platform/msm/gsi/gsi_reg.h create mode 100644 drivers/platform/msm/gsi/gsi_reg_v1.h create mode 100644 drivers/platform/msm/gsi/gsi_reg_v2.h create mode 100644 drivers/platform/msm/ipa/Makefile create mode 100644 drivers/platform/msm/ipa/ipa_api.c create mode 100644 drivers/platform/msm/ipa/ipa_api.h create mode 100644 drivers/platform/msm/ipa/ipa_clients/Makefile create mode 100644 drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c create mode 100644 drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c create mode 100644 drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c create mode 100644 drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c create mode 100644 drivers/platform/msm/ipa/ipa_clients/ipa_usb.c create mode 100644 drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c create mode 100644 drivers/platform/msm/ipa/ipa_clients/odu_bridge.c create mode 100644 drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c create mode 100644 drivers/platform/msm/ipa/ipa_clients/rndis_ipa_trace.h create mode 100644 drivers/platform/msm/ipa/ipa_common_i.h create mode 100644 drivers/platform/msm/ipa/ipa_rm.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_dependency_graph.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_dependency_graph.h create mode 100644 drivers/platform/msm/ipa/ipa_rm_i.h create mode 100644 drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_peers_list.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_peers_list.h create mode 100644 drivers/platform/msm/ipa/ipa_rm_resource.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_resource.h create mode 100644 drivers/platform/msm/ipa/ipa_uc_offload_common_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/Makefile create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_client.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_dma.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_dp.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_flt.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_intf.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_nat.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_odl.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_odl.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_pm.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_pm.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_rt.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_trace.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_uc.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_utils.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/teth_bridge.c create mode 100644 drivers/platform/msm/ipa/test/Makefile create mode 100644 drivers/platform/msm/ipa/test/ipa_pm_ut.c create mode 100644 drivers/platform/msm/ipa/test/ipa_test_dma.c create mode 100644 drivers/platform/msm/ipa/test/ipa_test_example.c create mode 100644 drivers/platform/msm/ipa/test/ipa_test_hw_stats.c create mode 100644 drivers/platform/msm/ipa/test/ipa_test_mhi.c create mode 100644 drivers/platform/msm/ipa/test/ipa_ut_framework.c create mode 100644 drivers/platform/msm/ipa/test/ipa_ut_framework.h create mode 100644 drivers/platform/msm/ipa/test/ipa_ut_i.h create mode 100644 drivers/platform/msm/ipa/test/ipa_ut_suite_list.h create mode 100644 include/linux/ecm_ipa.h create mode 100644 include/linux/ipa.h create mode 100644 include/linux/ipa_mhi.h create mode 100644 include/linux/ipa_odu_bridge.h create mode 100644 include/linux/ipa_uc_offload.h create mode 100644 include/linux/ipa_usb.h create mode 100644 include/linux/ipa_wdi3.h create mode 100644 include/linux/msm_gsi.h create mode 100644 include/linux/rndis_ipa.h diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig index 53627979708f..45fc27abaab4 100644 --- a/drivers/platform/msm/Kconfig +++ b/drivers/platform/msm/Kconfig @@ -52,4 +52,104 @@ config SPS_SUPPORT_NDP_BAM to do the actual data transfer themselves, instead of the BAM. +config GSI + bool "GSI support" + help + This driver provides the transport needed to talk to the + IPA core. It replaces the BAM transport used previously. + + The GSI connects to a peripheral component via uniform TLV + interface, and allows it to interface with other peripherals + and CPUs over various types of interfaces such as MHI, xDCI, + xHCI, GPI, WDI, Ethernet, etc. + +config GSI_REGISTER_VERSION_2 + bool "GSI core Version 2 Registers SWI Support" + depends on GSI + help + GSI core registers Software interface version 2 has updated + registers interface to communicate with GSI. This includes + new registers offsets, new registers fields structure and + new registers. + +config IPA3 + tristate "IPA3 support" + select GSI + depends on NET + help + This driver supports the Internet Packet Accelerator (IPA3) core. + IPA is a programmable protocol processor HW block. + It is designed to support generic HW processing of UL/DL IP packets + for various use cases independent of radio technology. + The driver support client connection and configuration + for the IPA core. + Kernel and user-space processes can call the IPA driver + to configure IPA core. + +config IPA_WDI_UNIFIED_API + bool "IPA WDI unified API support" + depends on IPA3 + help + This driver supports IPA WDI unified API. + WDI is the interface between IPA micro controller and WLAN chipset. + It is designed to support IPA HW accelerating path for WLAN use case. + The IPA WDI unified API supports all WDI versions through a unified + interface. + +config RMNET_IPA3 + tristate "IPA3 RMNET WWAN Network Device" + depends on IPA3 && QCOM_QMI_HELPERS + help + This WWAN Network Driver implements network stack class device. + It supports Embedded data transfer from A7 to Q6. Configures IPA HW + for RmNet Data Driver and also exchange of QMI messages between + A7 and Q6 IPA-driver. + +config ECM_IPA + tristate "STD ECM LAN Driver support" + depends on IPA3 + help + Enables LAN between applications processor and a tethered + host using the STD ECM protocol. + This Network interface is aimed to allow data path go through + IPA core while using STD ECM protocol. + +config RNDIS_IPA + tristate "RNDIS_IPA Network Interface Driver support" + depends on IPA3 + help + Enables LAN between applications processor and a tethered + host using the RNDIS protocol. + This Network interface is aimed to allow data path go through + IPA core while using RNDIS protocol. + +config IPA3_MHI_PROXY + tristate "IPA3 MHI proxy driver" + depends on RMNET_IPA3 + help + This driver is used as a proxy between modem and MHI host driver. + Its main functionality is to setup MHI Satellite channels on behalf of + modem and provide the ability of modem to MHI device communication. + Once the configuration is done modem will communicate directly with + the MHI device without AP involvement, with the exception of + power management. + +config IPA_UT + tristate "IPA Unit-Test Framework and Test Suites" + depends on IPA3 && DEBUG_FS + help + This Module implements IPA in-kernel test framework. + The framework supports defining and running tests, grouped + into suites according to the sub-unit of the IPA being tested. + The user interface to run and control the tests is debugfs file + system. + +config IPA_EMULATION + bool "IPA on X86 Linux (IPA emulation support)" + depends on X86 && IPA3 + help + This options is used only when building the X86 version of + the IPA/GSI driver. On this mode, IPA driver will be probed + as PCIE device (and not platform device) where IPA emulation + shall be connected via PCIE to X86 machine. endmenu diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile index a706f17a381a..16e086d9010d 100644 --- a/drivers/platform/msm/Makefile +++ b/drivers/platform/msm/Makefile @@ -7,3 +7,5 @@ obj-$(CONFIG_MSM_EXT_DISPLAY) += msm_ext_display.o obj-$(CONFIG_QPNP_REVID) += qpnp-revid.o obj-$(CONFIG_SPS) += sps/ +obj-$(CONFIG_GSI) += gsi/ +obj-$(CONFIG_IPA3) += ipa/ diff --git a/drivers/platform/msm/gsi/Makefile b/drivers/platform/msm/gsi/Makefile new file mode 100644 index 000000000000..3bccb99819fa --- /dev/null +++ b/drivers/platform/msm/gsi/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 + +gsidbg-$(CONFIG_DEBUG_FS) += gsi_dbg.o +obj-$(CONFIG_GSI) += gsi.o gsidbg.o + +obj-$(CONFIG_IPA_EMULATION) += gsi_emulation.o diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c new file mode 100644 index 000000000000..5b1971247922 --- /dev/null +++ b/drivers/platform/msm/gsi/gsi.c @@ -0,0 +1,3913 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "gsi.h" +#include "gsi_reg.h" +#include "gsi_emulation.h" + +#define GSI_CMD_TIMEOUT (5*HZ) +#define GSI_START_CMD_TIMEOUT_MS 1000 +#define GSI_CMD_POLL_CNT 5 +#define GSI_STOP_CMD_TIMEOUT_MS 10 +#define GSI_MAX_CH_LOW_WEIGHT 15 + +#define GSI_RESET_WA_MIN_SLEEP 1000 +#define GSI_RESET_WA_MAX_SLEEP 2000 +#define GSI_CHNL_STATE_MAX_RETRYCNT 10 + +#define GSI_STTS_REG_BITS 32 + +#ifndef CONFIG_DEBUG_FS +void gsi_debugfs_init(void) +{ +} +#endif + +static const struct of_device_id msm_gsi_match[] = { + { .compatible = "qcom,msm_gsi", }, + { }, +}; + + +#if defined(CONFIG_IPA_EMULATION) +static bool running_emulation = true; +#else +static bool running_emulation; +#endif + +struct gsi_ctx *gsi_ctx; + +static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val) +{ + uint32_t curr; + + curr = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee)); + gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base + + GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee)); +} + +static void __gsi_config_ch_irq(int ee, uint32_t mask, uint32_t val) +{ + uint32_t curr; + + curr = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee)); + gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee)); +} + +static void __gsi_config_evt_irq(int ee, uint32_t mask, uint32_t val) +{ + uint32_t curr; + + curr = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee)); + gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee)); +} + +static void __gsi_config_ieob_irq(int ee, uint32_t mask, uint32_t val) +{ + uint32_t curr; + + curr = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee)); + gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee)); +} + +static void __gsi_config_glob_irq(int ee, uint32_t mask, uint32_t val) +{ + uint32_t curr; + + curr = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee)); + gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base + + GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee)); +} + +static void __gsi_config_gen_irq(int ee, uint32_t mask, uint32_t val) +{ + uint32_t curr; + + curr = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee)); + gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base + + GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee)); +} + +static void gsi_channel_state_change_wait(unsigned long chan_hdl, + struct gsi_chan_ctx *ctx, + uint32_t tm) +{ + int poll_cnt; + int gsi_pending_intr; + int res; + uint32_t ch; + uint32_t val; + int ee = gsi_ctx->per.ee; + + /* + * Start polling the GSI channel for + * duration = tm * poll_cnt. + * We need to do polling of gsi state for improving debugability + * of gsi hw state. + */ + + for (poll_cnt = 0; + poll_cnt < GSI_CMD_POLL_CNT; + poll_cnt++) { + res = wait_for_completion_timeout(&ctx->compl, + msecs_to_jiffies(tm)); + + /* Interrupt received, return */ + if (res != 0) + return; + + /* + * Check channel state here in case the channel is + * already started but interrupt is not yet received. + */ + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + ctx->state = (val & + GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >> + GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT; + + ch = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(gsi_ctx->per.ee)); + + gsi_pending_intr = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(ee)); + + GSIDBG("GSI wait on chan_hld=%lu chan=%lu state=%u intr=%u\n", + chan_hdl, + ch, + ctx->state, + gsi_pending_intr); + } + +} + +static void gsi_handle_ch_ctrl(int ee) +{ + uint32_t ch; + int i; + uint32_t val; + struct gsi_chan_ctx *ctx; + + ch = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(ee)); + gsi_writel(ch, gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(ee)); + GSIDBG("ch %x\n", ch); + for (i = 0; i < GSI_STTS_REG_BITS; i++) { + if ((1 << i) & ch) { + if (i >= gsi_ctx->max_ch || i >= GSI_CHAN_MAX) { + GSIERR("invalid channel %d\n", i); + break; + } + + ctx = &gsi_ctx->chan[i]; + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(i, ee)); + ctx->state = (val & + GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >> + GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT; + GSIDBG("ch %u state updated to %u\n", i, ctx->state); + complete(&ctx->compl); + gsi_ctx->ch_dbg[i].cmd_completed++; + } + } +} + +static void gsi_handle_ev_ctrl(int ee) +{ + uint32_t ch; + int i; + uint32_t val; + struct gsi_evt_ctx *ctx; + + ch = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(ee)); + gsi_writel(ch, gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(ee)); + GSIDBG("ev %x\n", ch); + for (i = 0; i < GSI_STTS_REG_BITS; i++) { + if ((1 << i) & ch) { + if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) { + GSIERR("invalid event %d\n", i); + break; + } + + ctx = &gsi_ctx->evtr[i]; + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(i, ee)); + ctx->state = (val & + GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK) >> + GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT; + GSIDBG("evt %u state updated to %u\n", i, ctx->state); + complete(&ctx->compl); + } + } +} + +static void gsi_handle_glob_err(uint32_t err) +{ + struct gsi_log_err *log; + struct gsi_chan_ctx *ch; + struct gsi_evt_ctx *ev; + struct gsi_chan_err_notify chan_notify; + struct gsi_evt_err_notify evt_notify; + struct gsi_per_notify per_notify; + uint32_t val; + enum gsi_err_type err_type; + + log = (struct gsi_log_err *)&err; + GSIERR("log err_type=%u ee=%u idx=%u\n", log->err_type, log->ee, + log->virt_idx); + GSIERR("code=%u arg1=%u arg2=%u arg3=%u\n", log->code, log->arg1, + log->arg2, log->arg3); + + err_type = log->err_type; + /* + * These are errors thrown by hardware. We need + * BUG_ON() to capture the hardware state right + * when it is unexpected. + */ + switch (err_type) { + case GSI_ERR_TYPE_GLOB: + per_notify.evt_id = GSI_PER_EVT_GLOB_ERROR; + per_notify.user_data = gsi_ctx->per.user_data; + per_notify.data.err_desc = err & 0xFFFF; + gsi_ctx->per.notify_cb(&per_notify); + break; + case GSI_ERR_TYPE_CHAN: + if (WARN_ON(log->virt_idx >= gsi_ctx->max_ch)) { + GSIERR("Unexpected ch %d\n", log->virt_idx); + return; + } + + ch = &gsi_ctx->chan[log->virt_idx]; + chan_notify.chan_user_data = ch->props.chan_user_data; + chan_notify.err_desc = err & 0xFFFF; + if (log->code == GSI_INVALID_TRE_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + GSI_ASSERT(); + } + + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(log->virt_idx, + gsi_ctx->per.ee)); + ch->state = (val & + GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >> + GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT; + GSIDBG("ch %u state updated to %u\n", log->virt_idx, + ch->state); + ch->stats.invalid_tre_error++; + if (ch->state != GSI_CHAN_STATE_ERROR) { + GSIERR("Unexpected channel state %d\n", + ch->state); + GSI_ASSERT(); + } + chan_notify.evt_id = GSI_CHAN_INVALID_TRE_ERR; + } else if (log->code == GSI_OUT_OF_BUFFERS_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + GSI_ASSERT(); + } + chan_notify.evt_id = GSI_CHAN_OUT_OF_BUFFERS_ERR; + } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + GSI_ASSERT(); + } + chan_notify.evt_id = GSI_CHAN_OUT_OF_RESOURCES_ERR; + complete(&ch->compl); + } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) { + chan_notify.evt_id = + GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR; + } else if (log->code == GSI_NON_ALLOCATED_EVT_ACCESS_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + GSI_ASSERT(); + } + chan_notify.evt_id = + GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR; + } else if (log->code == GSI_HWO_1_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + GSI_ASSERT(); + } + chan_notify.evt_id = GSI_CHAN_HWO_1_ERR; + } else { + GSIERR("unexpected event log code %d\n", log->code); + GSI_ASSERT(); + } + ch->props.err_cb(&chan_notify); + break; + case GSI_ERR_TYPE_EVT: + if (WARN_ON(log->virt_idx >= gsi_ctx->max_ev)) { + GSIERR("Unexpected ev %d\n", log->virt_idx); + return; + } + + ev = &gsi_ctx->evtr[log->virt_idx]; + evt_notify.user_data = ev->props.user_data; + evt_notify.err_desc = err & 0xFFFF; + if (log->code == GSI_OUT_OF_BUFFERS_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + GSI_ASSERT(); + } + evt_notify.evt_id = GSI_EVT_OUT_OF_BUFFERS_ERR; + } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + GSI_ASSERT(); + } + evt_notify.evt_id = GSI_EVT_OUT_OF_RESOURCES_ERR; + complete(&ev->compl); + } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) { + evt_notify.evt_id = GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR; + } else if (log->code == GSI_EVT_RING_EMPTY_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + GSI_ASSERT(); + } + evt_notify.evt_id = GSI_EVT_EVT_RING_EMPTY_ERR; + } else { + GSIERR("unexpected event log code %d\n", log->code); + GSI_ASSERT(); + } + ev->props.err_cb(&evt_notify); + break; + } +} + +static void gsi_handle_gp_int1(void) +{ + complete(&gsi_ctx->gen_ee_cmd_compl); +} + +static void gsi_handle_glob_ee(int ee) +{ + uint32_t val; + uint32_t err; + struct gsi_per_notify notify; + uint32_t clr = ~0; + + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(ee)); + + notify.user_data = gsi_ctx->per.user_data; + + if (val & GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK) { + err = gsi_readl(gsi_ctx->base + + GSI_EE_n_ERROR_LOG_OFFS(ee)); + if (gsi_ctx->per.ver >= GSI_VER_1_2) + gsi_writel(0, gsi_ctx->base + + GSI_EE_n_ERROR_LOG_OFFS(ee)); + gsi_writel(clr, gsi_ctx->base + + GSI_EE_n_ERROR_LOG_CLR_OFFS(ee)); + gsi_handle_glob_err(err); + } + + if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK) + gsi_handle_gp_int1(); + + if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK) { + notify.evt_id = GSI_PER_EVT_GLOB_GP2; + gsi_ctx->per.notify_cb(¬ify); + } + + if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK) { + notify.evt_id = GSI_PER_EVT_GLOB_GP3; + gsi_ctx->per.notify_cb(¬ify); + } + + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(ee)); +} + +static void gsi_incr_ring_wp(struct gsi_ring_ctx *ctx) +{ + ctx->wp_local += ctx->elem_sz; + if (ctx->wp_local == ctx->end) + ctx->wp_local = ctx->base; +} + +static void gsi_incr_ring_rp(struct gsi_ring_ctx *ctx) +{ + ctx->rp_local += ctx->elem_sz; + if (ctx->rp_local == ctx->end) + ctx->rp_local = ctx->base; +} + +uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr) +{ + WARN_ON(addr < ctx->base || addr >= ctx->end); + return (uint32_t)(addr - ctx->base) / ctx->elem_sz; +} + +static uint16_t gsi_get_complete_num(struct gsi_ring_ctx *ctx, uint64_t addr1, + uint64_t addr2) +{ + uint32_t addr_diff; + + GSIDBG_LOW("gsi base addr 0x%llx end addr 0x%llx\n", + ctx->base, ctx->end); + + if (addr1 < ctx->base || addr1 >= ctx->end) { + GSIERR("address = 0x%llx not in range\n", addr1); + GSI_ASSERT(); + } + + if (addr2 < ctx->base || addr2 >= ctx->end) { + GSIERR("address = 0x%llx not in range\n", addr2); + GSI_ASSERT(); + } + + addr_diff = (uint32_t)(addr2 - addr1); + if (addr1 < addr2) + return addr_diff / ctx->elem_sz; + else + return (addr_diff + ctx->len) / ctx->elem_sz; +} + +static void gsi_process_chan(struct gsi_xfer_compl_evt *evt, + struct gsi_chan_xfer_notify *notify, bool callback) +{ + uint32_t ch_id; + struct gsi_chan_ctx *ch_ctx; + uint16_t rp_idx; + uint64_t rp; + + ch_id = evt->chid; + if (WARN_ON(ch_id >= gsi_ctx->max_ch)) { + GSIERR("Unexpected ch %d\n", ch_id); + return; + } + + ch_ctx = &gsi_ctx->chan[ch_id]; + if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI)) + return; + + rp = evt->xfer_ptr; + + if (ch_ctx->ring.rp_local != rp) { + ch_ctx->stats.completed += + gsi_get_complete_num(&ch_ctx->ring, + ch_ctx->ring.rp_local, rp); + ch_ctx->ring.rp_local = rp; + } + + /* the element at RP is also processed */ + gsi_incr_ring_rp(&ch_ctx->ring); + ch_ctx->stats.completed++; + + ch_ctx->ring.rp = ch_ctx->ring.rp_local; + rp_idx = gsi_find_idx_from_addr(&ch_ctx->ring, rp); + notify->xfer_user_data = ch_ctx->user_data[rp_idx]; + notify->chan_user_data = ch_ctx->props.chan_user_data; + notify->evt_id = evt->code; + notify->bytes_xfered = evt->len; + if (callback) { + if (atomic_read(&ch_ctx->poll_mode)) { + GSIERR("Calling client callback in polling mode\n"); + WARN_ON(1); + } + ch_ctx->props.xfer_cb(notify); + } +} + +static void gsi_process_evt_re(struct gsi_evt_ctx *ctx, + struct gsi_chan_xfer_notify *notify, bool callback) +{ + struct gsi_xfer_compl_evt *evt; + + evt = (struct gsi_xfer_compl_evt *)(ctx->ring.base_va + + ctx->ring.rp_local - ctx->ring.base); + gsi_process_chan(evt, notify, callback); + gsi_incr_ring_rp(&ctx->ring); + /* recycle this element */ + gsi_incr_ring_wp(&ctx->ring); + ctx->stats.completed++; +} + +static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx) +{ + uint32_t val; + + val = (ctx->ring.wp_local & + GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) << + GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(ctx->id, + gsi_ctx->per.ee)); +} + +static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx) +{ + uint32_t val; + + /* + * allocate new events for this channel first + * before submitting the new TREs. + * for TO_GSI channels the event ring doorbell is rang as part of + * interrupt handling. + */ + if (ctx->evtr && ctx->props.dir == GSI_CHAN_DIR_FROM_GSI) + gsi_ring_evt_doorbell(ctx->evtr); + ctx->ring.wp = ctx->ring.wp_local; + + val = (ctx->ring.wp_local & + GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) << + GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(ctx->props.ch_id, + gsi_ctx->per.ee)); +} + +static void gsi_handle_ieob(int ee) +{ + uint32_t ch; + int i; + uint64_t rp; + struct gsi_evt_ctx *ctx; + struct gsi_chan_xfer_notify notify; + unsigned long flags; + unsigned long cntr; + uint32_t msk; + + ch = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(ee)); + msk = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee)); + gsi_writel(ch & msk, gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee)); + + for (i = 0; i < GSI_STTS_REG_BITS; i++) { + if ((1 << i) & ch & msk) { + if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) { + GSIERR("invalid event %d\n", i); + break; + } + ctx = &gsi_ctx->evtr[i]; + + /* + * Don't handle MSI interrupts, only handle IEOB + * IRQs + */ + if (ctx->props.intr == GSI_INTR_MSI) + continue; + + if (ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV) { + GSIERR("Unexpected irq intf %d\n", + ctx->props.intf); + GSI_ASSERT(); + } + spin_lock_irqsave(&ctx->ring.slock, flags); +check_again: + cntr = 0; + rp = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(i, ee)); + rp |= ctx->ring.rp & 0xFFFFFFFF00000000; + + ctx->ring.rp = rp; + while (ctx->ring.rp_local != rp) { + ++cntr; + if (ctx->props.exclusive && + atomic_read(&ctx->chan->poll_mode)) { + cntr = 0; + break; + } + gsi_process_evt_re(ctx, ¬ify, true); + } + gsi_ring_evt_doorbell(ctx); + if (cntr != 0) + goto check_again; + spin_unlock_irqrestore(&ctx->ring.slock, flags); + } + } +} + +static void gsi_handle_inter_ee_ch_ctrl(int ee) +{ + uint32_t ch; + int i; + + ch = gsi_readl(gsi_ctx->base + + GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(ee)); + gsi_writel(ch, gsi_ctx->base + + GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(ee)); + for (i = 0; i < GSI_STTS_REG_BITS; i++) { + if ((1 << i) & ch) { + /* not currently expected */ + GSIERR("ch %u was inter-EE changed\n", i); + } + } +} + +static void gsi_handle_inter_ee_ev_ctrl(int ee) +{ + uint32_t ch; + int i; + + ch = gsi_readl(gsi_ctx->base + + GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(ee)); + gsi_writel(ch, gsi_ctx->base + + GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(ee)); + for (i = 0; i < GSI_STTS_REG_BITS; i++) { + if ((1 << i) & ch) { + /* not currently expected */ + GSIERR("evt %u was inter-EE changed\n", i); + } + } +} + +static void gsi_handle_general(int ee) +{ + uint32_t val; + struct gsi_per_notify notify; + + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(ee)); + + notify.user_data = gsi_ctx->per.user_data; + + if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK) + notify.evt_id = GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW; + + if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK) + notify.evt_id = GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW; + + if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK) + notify.evt_id = GSI_PER_EVT_GENERAL_BUS_ERROR; + + if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK) + notify.evt_id = GSI_PER_EVT_GENERAL_BREAK_POINT; + + if (gsi_ctx->per.notify_cb) + gsi_ctx->per.notify_cb(¬ify); + + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(ee)); +} + +#define GSI_ISR_MAX_ITER 50 + +static void gsi_handle_irq(void) +{ + uint32_t type; + int ee = gsi_ctx->per.ee; + unsigned long cnt = 0; + + while (1) { + type = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(ee)); + + if (!type) + break; + + GSIDBG_LOW("type 0x%x\n", type); + + if (type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK) + gsi_handle_ch_ctrl(ee); + + if (type & GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK) + gsi_handle_ev_ctrl(ee); + + if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK) + gsi_handle_glob_ee(ee); + + if (type & GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK) + gsi_handle_ieob(ee); + + if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK) + gsi_handle_inter_ee_ch_ctrl(ee); + + if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK) + gsi_handle_inter_ee_ev_ctrl(ee); + + if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK) + gsi_handle_general(ee); + + if (++cnt > GSI_ISR_MAX_ITER) { + /* + * Max number of spurious interrupts from hardware. + * Unexpected hardware state. + */ + GSIERR("Too many spurious interrupt from GSI HW\n"); + GSI_ASSERT(); + } + + } +} + +static irqreturn_t gsi_isr(int irq, void *ctxt) +{ + if (gsi_ctx->per.req_clk_cb) { + bool granted = false; + + gsi_ctx->per.req_clk_cb(gsi_ctx->per.user_data, &granted); + if (granted) { + gsi_handle_irq(); + gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data); + } + } else { + gsi_handle_irq(); + } + return IRQ_HANDLED; + +} + +static uint32_t gsi_get_max_channels(enum gsi_ver ver) +{ + uint32_t reg = 0; + + switch (ver) { + case GSI_VER_ERR: + case GSI_VER_MAX: + GSIERR("GSI version is not supported %d\n", ver); + WARN_ON(1); + break; + case GSI_VER_1_0: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee)); + reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK) >> + GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT; + break; + case GSI_VER_1_2: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee)); + reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK) >> + GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT; + break; + case GSI_VER_1_3: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >> + GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT; + break; + case GSI_VER_2_0: + reg = gsi_readl(gsi_ctx->base + + GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >> + GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT; + break; + case GSI_VER_2_2: + reg = gsi_readl(gsi_ctx->base + + GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >> + GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT; + break; + case GSI_VER_2_5: + reg = gsi_readl(gsi_ctx->base + + GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >> + GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT; + break; + } + + GSIDBG("max channels %d\n", reg); + + return reg; +} + +static uint32_t gsi_get_max_event_rings(enum gsi_ver ver) +{ + uint32_t reg = 0; + + switch (ver) { + case GSI_VER_ERR: + case GSI_VER_MAX: + GSIERR("GSI version is not supported %d\n", ver); + WARN_ON(1); + break; + case GSI_VER_1_0: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee)); + reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK) >> + GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT; + break; + case GSI_VER_1_2: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee)); + reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK) >> + GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT; + break; + case GSI_VER_1_3: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >> + GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT; + break; + case GSI_VER_2_0: + reg = gsi_readl(gsi_ctx->base + + GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >> + GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT; + break; + case GSI_VER_2_2: + reg = gsi_readl(gsi_ctx->base + + GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >> + GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT; + break; + case GSI_VER_2_5: + reg = gsi_readl(gsi_ctx->base + + GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >> + GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT; + break; + } + + GSIDBG("max event rings %d\n", reg); + + return reg; +} +int gsi_complete_clk_grant(unsigned long dev_hdl) +{ + unsigned long flags; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!gsi_ctx->per_registered) { + GSIERR("no client registered\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (dev_hdl != (uintptr_t)gsi_ctx) { + GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl, + gsi_ctx); + return -GSI_STATUS_INVALID_PARAMS; + } + + spin_lock_irqsave(&gsi_ctx->slock, flags); + gsi_handle_irq(); + gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data); + spin_unlock_irqrestore(&gsi_ctx->slock, flags); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_complete_clk_grant); + +int gsi_map_base(phys_addr_t gsi_base_addr, u32 gsi_size) +{ + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + gsi_ctx->base = devm_ioremap_nocache( + gsi_ctx->dev, gsi_base_addr, gsi_size); + + if (!gsi_ctx->base) { + GSIERR("failed to map access to GSI HW\n"); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + + GSIDBG("GSI base(%pa) mapped to (%pK) with len (0x%x)\n", + &gsi_base_addr, + gsi_ctx->base, + gsi_size); + + return 0; +} +EXPORT_SYMBOL(gsi_map_base); + +int gsi_unmap_base(void) +{ + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!gsi_ctx->base) { + GSIERR("access to GSI HW has not been mapped\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + devm_iounmap(gsi_ctx->dev, gsi_ctx->base); + + gsi_ctx->base = NULL; + + return 0; +} +EXPORT_SYMBOL(gsi_unmap_base); + +int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl) +{ + int res; + uint32_t val; + int needed_reg_ver; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!props || !dev_hdl) { + GSIERR("bad params props=%pK dev_hdl=%pK\n", props, dev_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) { + GSIERR("bad params gsi_ver=%d\n", props->ver); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (!props->notify_cb) { + GSIERR("notify callback must be provided\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->req_clk_cb && !props->rel_clk_cb) { + GSIERR("rel callback must be provided\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (gsi_ctx->per_registered) { + GSIERR("per already registered\n"); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + switch (props->ver) { + case GSI_VER_1_0: + case GSI_VER_1_2: + case GSI_VER_1_3: + case GSI_VER_2_0: + case GSI_VER_2_2: + needed_reg_ver = GSI_REGISTER_VER_1; + break; + case GSI_VER_2_5: + needed_reg_ver = GSI_REGISTER_VER_2; + break; + case GSI_VER_ERR: + case GSI_VER_MAX: + default: + GSIERR("GSI version is not supported %d\n", props->ver); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (needed_reg_ver != GSI_REGISTER_VER_CURRENT) { + GSIERR("Invalid register version. current=%d, needed=%d\n", + GSI_REGISTER_VER_CURRENT, needed_reg_ver); + return -GSI_STATUS_UNSUPPORTED_OP; + } + GSIDBG("gsi ver %d register ver %d needed register ver %d\n", + props->ver, GSI_REGISTER_VER_CURRENT, needed_reg_ver); + + spin_lock_init(&gsi_ctx->slock); + if (props->intr == GSI_INTR_IRQ) { + if (!props->irq) { + GSIERR("bad irq specified %u\n", props->irq); + return -GSI_STATUS_INVALID_PARAMS; + } + /* + * On a real UE, there are two separate interrupt + * vectors that get directed toward the GSI/IPA + * drivers. They are handled by gsi_isr() and + * (ipa_isr() or ipa3_isr()) respectively. In the + * emulation environment, this is not the case; + * instead, interrupt vectors are routed to the + * emualation hardware's interrupt controller, which + * in turn, forwards a single interrupt to the GSI/IPA + * driver. When the new interrupt vector is received, + * the driver needs to probe the interrupt + * controller's registers so see if one, the other, or + * both interrupts have occurred. Given the above, we + * now need to handle both situations, namely: the + * emulator's and the real UE. + */ + if (running_emulation) { + /* + * New scheme involving the emulator's + * interrupt controller. + */ + res = devm_request_threaded_irq( + gsi_ctx->dev, + props->irq, + /* top half handler to follow */ + emulator_hard_irq_isr, + /* threaded bottom half handler to follow */ + emulator_soft_irq_isr, + IRQF_SHARED, + "emulator_intcntrlr", + gsi_ctx); + } else { + /* + * Traditional scheme used on the real UE. + */ + res = devm_request_irq(gsi_ctx->dev, props->irq, + gsi_isr, + props->req_clk_cb ? IRQF_TRIGGER_RISING : + IRQF_TRIGGER_HIGH, + "gsi", + gsi_ctx); + } + if (res) { + GSIERR( + "failed to register isr for %u\n", + props->irq); + return -GSI_STATUS_ERROR; + } + GSIDBG( + "succeeded to register isr for %u\n", + props->irq); + + res = enable_irq_wake(props->irq); + if (res) + GSIERR("failed to enable wake irq %u\n", props->irq); + else + GSIERR("GSI irq is wake enabled %u\n", props->irq); + + } else { + GSIERR("do not support interrupt type %u\n", props->intr); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + /* + * If base not previously mapped via gsi_map_base(), map it + * now... + */ + if (!gsi_ctx->base) { + res = gsi_map_base(props->phys_addr, props->size); + if (res) + return res; + } + + if (running_emulation) { + GSIDBG("GSI SW ver register value 0x%x\n", + gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_SW_VERSION_OFFS(0))); + gsi_ctx->intcntrlr_mem_size = + props->emulator_intcntrlr_size; + gsi_ctx->intcntrlr_base = + devm_ioremap_nocache( + gsi_ctx->dev, + props->emulator_intcntrlr_addr, + props->emulator_intcntrlr_size); + if (!gsi_ctx->intcntrlr_base) { + GSIERR( + "failed to remap emulator's interrupt controller HW\n"); + gsi_unmap_base(); + devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + + GSIDBG( + "Emulator's interrupt controller base(%pa) mapped to (%pK) with len (0x%lx)\n", + &(props->emulator_intcntrlr_addr), + gsi_ctx->intcntrlr_base, + props->emulator_intcntrlr_size); + + gsi_ctx->intcntrlr_gsi_isr = gsi_isr; + gsi_ctx->intcntrlr_client_isr = + props->emulator_intcntrlr_client_isr; + } + + gsi_ctx->per = *props; + gsi_ctx->per_registered = true; + mutex_init(&gsi_ctx->mlock); + atomic_set(&gsi_ctx->num_chan, 0); + atomic_set(&gsi_ctx->num_evt_ring, 0); + gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver); + if (gsi_ctx->max_ch == 0) { + gsi_unmap_base(); + if (running_emulation) + devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base); + gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL; + devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx); + GSIERR("failed to get max channels\n"); + return -GSI_STATUS_ERROR; + } + gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver); + if (gsi_ctx->max_ev == 0) { + gsi_unmap_base(); + if (running_emulation) + devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base); + gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL; + devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx); + GSIERR("failed to get max event rings\n"); + return -GSI_STATUS_ERROR; + } + + if (gsi_ctx->max_ev > GSI_EVT_RING_MAX) { + GSIERR("max event rings are beyond absolute maximum\n"); + return -GSI_STATUS_ERROR; + } + + if (props->mhi_er_id_limits_valid && + props->mhi_er_id_limits[0] > (gsi_ctx->max_ev - 1)) { + gsi_unmap_base(); + if (running_emulation) + devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base); + gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL; + devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx); + GSIERR("MHI event ring start id %u is beyond max %u\n", + props->mhi_er_id_limits[0], gsi_ctx->max_ev); + return -GSI_STATUS_ERROR; + } + + gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1); + + /* exclude reserved mhi events */ + if (props->mhi_er_id_limits_valid) + gsi_ctx->evt_bmap |= + ((1 << (props->mhi_er_id_limits[1] + 1)) - 1) ^ + ((1 << (props->mhi_er_id_limits[0])) - 1); + + /* + * enable all interrupts but GSI_BREAK_POINT. + * Inter EE commands / interrupt are no supported. + */ + __gsi_config_type_irq(props->ee, ~0, ~0); + __gsi_config_ch_irq(props->ee, ~0, ~0); + __gsi_config_evt_irq(props->ee, ~0, ~0); + __gsi_config_ieob_irq(props->ee, ~0, ~0); + __gsi_config_glob_irq(props->ee, ~0, ~0); + __gsi_config_gen_irq(props->ee, ~0, + ~GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK); + + gsi_writel(props->intr, gsi_ctx->base + + GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee)); + + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee)); + if (val & GSI_EE_n_GSI_STATUS_ENABLED_BMSK) + gsi_ctx->enabled = true; + else + GSIERR("Manager EE has not enabled GSI, GSI un-usable\n"); + + if (gsi_ctx->per.ver >= GSI_VER_1_2) + gsi_writel(0, gsi_ctx->base + + GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee)); + + if (running_emulation) { + /* + * Set up the emulator's interrupt controller... + */ + res = setup_emulator_cntrlr( + gsi_ctx->intcntrlr_base, gsi_ctx->intcntrlr_mem_size); + if (res != 0) { + gsi_unmap_base(); + devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base); + gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL; + devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx); + GSIERR("setup_emulator_cntrlr() failed\n"); + return res; + } + } + + *dev_hdl = (uintptr_t)gsi_ctx; + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_register_device); + +int gsi_write_device_scratch(unsigned long dev_hdl, + struct gsi_device_scratch *val) +{ + unsigned int max_usb_pkt_size = 0; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!gsi_ctx->per_registered) { + GSIERR("no client registered\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (dev_hdl != (uintptr_t)gsi_ctx) { + GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl, + gsi_ctx); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (val->max_usb_pkt_size_valid && + val->max_usb_pkt_size != 1024 && + val->max_usb_pkt_size != 512 && + val->max_usb_pkt_size != 64) { + GSIERR("bad USB max pkt size dev_hdl=0x%lx sz=%u\n", dev_hdl, + val->max_usb_pkt_size); + return -GSI_STATUS_INVALID_PARAMS; + } + + mutex_lock(&gsi_ctx->mlock); + if (val->mhi_base_chan_idx_valid) + gsi_ctx->scratch.word0.s.mhi_base_chan_idx = + val->mhi_base_chan_idx; + + if (val->max_usb_pkt_size_valid) { + max_usb_pkt_size = 2; + if (val->max_usb_pkt_size > 64) + max_usb_pkt_size = + (val->max_usb_pkt_size == 1024) ? 1 : 0; + gsi_ctx->scratch.word0.s.max_usb_pkt_size = max_usb_pkt_size; + } + + gsi_writel(gsi_ctx->scratch.word0.val, + gsi_ctx->base + + GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee)); + mutex_unlock(&gsi_ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_write_device_scratch); + +int gsi_deregister_device(unsigned long dev_hdl, bool force) +{ + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!gsi_ctx->per_registered) { + GSIERR("no client registered\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (dev_hdl != (uintptr_t)gsi_ctx) { + GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl, + gsi_ctx); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (!force && atomic_read(&gsi_ctx->num_chan)) { + GSIERR("cannot deregister %u channels are still connected\n", + atomic_read(&gsi_ctx->num_chan)); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (!force && atomic_read(&gsi_ctx->num_evt_ring)) { + GSIERR("cannot deregister %u events are still connected\n", + atomic_read(&gsi_ctx->num_evt_ring)); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + /* disable all interrupts */ + __gsi_config_type_irq(gsi_ctx->per.ee, ~0, 0); + __gsi_config_ch_irq(gsi_ctx->per.ee, ~0, 0); + __gsi_config_evt_irq(gsi_ctx->per.ee, ~0, 0); + __gsi_config_ieob_irq(gsi_ctx->per.ee, ~0, 0); + __gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0); + __gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0); + + devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx); + gsi_unmap_base(); + memset(gsi_ctx, 0, sizeof(*gsi_ctx)); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_deregister_device); + +static void gsi_program_evt_ring_ctx(struct gsi_evt_ring_props *props, + uint8_t evt_id, unsigned int ee) +{ + uint32_t val; + + GSIDBG("intf=%u intr=%u re=%u\n", props->intf, props->intr, + props->re_size); + + val = (((props->intf << GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT) & + GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK) | + ((props->intr << GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT) & + GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK) | + ((props->re_size << GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT) + & GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK)); + + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(evt_id, ee)); + + val = (props->ring_len & GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(evt_id, ee)); + + val = (props->ring_base_addr & + GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(evt_id, ee)); + + val = ((props->ring_base_addr >> 32) & + GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(evt_id, ee)); + + val = (((props->int_modt << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT) & + GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK) | + ((props->int_modc << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT) & + GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(evt_id, ee)); + + val = (props->intvec & GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(evt_id, ee)); + + val = (props->msi_addr & GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(evt_id, ee)); + + val = ((props->msi_addr >> 32) & + GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(evt_id, ee)); + + val = (props->rp_update_addr & + GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(evt_id, ee)); + + val = ((props->rp_update_addr >> 32) & + GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(evt_id, ee)); +} + +static void gsi_init_evt_ring(struct gsi_evt_ring_props *props, + struct gsi_ring_ctx *ctx) +{ + ctx->base_va = (uintptr_t)props->ring_base_vaddr; + ctx->base = props->ring_base_addr; + ctx->wp = ctx->base; + ctx->rp = ctx->base; + ctx->wp_local = ctx->base; + ctx->rp_local = ctx->base; + ctx->len = props->ring_len; + ctx->elem_sz = props->re_size; + ctx->max_num_elem = ctx->len / ctx->elem_sz - 1; + ctx->end = ctx->base + (ctx->max_num_elem + 1) * ctx->elem_sz; +} + +static void gsi_prime_evt_ring(struct gsi_evt_ctx *ctx) +{ + unsigned long flags; + uint32_t val; + + spin_lock_irqsave(&ctx->ring.slock, flags); + memset((void *)ctx->ring.base_va, 0, ctx->ring.len); + ctx->ring.wp_local = ctx->ring.base + + ctx->ring.max_num_elem * ctx->ring.elem_sz; + + /* write order MUST be MSB followed by LSB */ + val = ((ctx->ring.wp_local >> 32) & + GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) << + GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(ctx->id, + gsi_ctx->per.ee)); + + gsi_ring_evt_doorbell(ctx); + spin_unlock_irqrestore(&ctx->ring.slock, flags); +} + +static void gsi_prime_evt_ring_wdi(struct gsi_evt_ctx *ctx) +{ + unsigned long flags; + + spin_lock_irqsave(&ctx->ring.slock, flags); + if (ctx->ring.base_va) + memset((void *)ctx->ring.base_va, 0, ctx->ring.len); + ctx->ring.wp_local = ctx->ring.base + + ((ctx->ring.max_num_elem + 2) * ctx->ring.elem_sz); + gsi_ring_evt_doorbell(ctx); + spin_unlock_irqrestore(&ctx->ring.slock, flags); +} + +static int gsi_validate_evt_ring_props(struct gsi_evt_ring_props *props) +{ + uint64_t ra; + + if ((props->re_size == GSI_EVT_RING_RE_SIZE_4B && + props->ring_len % 4) || + (props->re_size == GSI_EVT_RING_RE_SIZE_8B && + props->ring_len % 8) || + (props->re_size == GSI_EVT_RING_RE_SIZE_16B && + props->ring_len % 16)) { + GSIERR("bad params ring_len %u not a multiple of RE size %u\n", + props->ring_len, props->re_size); + return -GSI_STATUS_INVALID_PARAMS; + } + + ra = props->ring_base_addr; + do_div(ra, roundup_pow_of_two(props->ring_len)); + + if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) { + GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n", + props->ring_base_addr, + roundup_pow_of_two(props->ring_len)); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->intf == GSI_EVT_CHTYPE_GPI_EV && + !props->ring_base_vaddr) { + GSIERR("protocol %u requires ring base VA\n", props->intf); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->intf == GSI_EVT_CHTYPE_MHI_EV && + (!props->evchid_valid || + props->evchid > gsi_ctx->per.mhi_er_id_limits[1] || + props->evchid < gsi_ctx->per.mhi_er_id_limits[0])) { + GSIERR("MHI requires evchid valid=%d val=%u\n", + props->evchid_valid, props->evchid); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->intf != GSI_EVT_CHTYPE_MHI_EV && + props->evchid_valid) { + GSIERR("protocol %u cannot specify evchid\n", props->intf); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (!props->err_cb) { + GSIERR("err callback must be provided\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + return GSI_STATUS_SUCCESS; +} + +int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl, + unsigned long *evt_ring_hdl) +{ + unsigned long evt_id; + enum gsi_evt_ch_cmd_opcode op = GSI_EVT_ALLOCATE; + uint32_t val; + struct gsi_evt_ctx *ctx; + int res; + int ee; + unsigned long flags; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!props || !evt_ring_hdl || dev_hdl != (uintptr_t)gsi_ctx) { + GSIERR("bad params props=%pK dev_hdl=0x%lx evt_ring_hdl=%pK\n", + props, dev_hdl, evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (gsi_validate_evt_ring_props(props)) { + GSIERR("invalid params\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (!props->evchid_valid) { + mutex_lock(&gsi_ctx->mlock); + evt_id = find_first_zero_bit(&gsi_ctx->evt_bmap, + sizeof(unsigned long) * BITS_PER_BYTE); + if (evt_id == sizeof(unsigned long) * BITS_PER_BYTE) { + GSIERR("failed to alloc event ID\n"); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + set_bit(evt_id, &gsi_ctx->evt_bmap); + mutex_unlock(&gsi_ctx->mlock); + } else { + evt_id = props->evchid; + } + GSIDBG("Using %lu as virt evt id\n", evt_id); + + ctx = &gsi_ctx->evtr[evt_id]; + memset(ctx, 0, sizeof(*ctx)); + mutex_init(&ctx->mlock); + init_completion(&ctx->compl); + atomic_set(&ctx->chan_ref_cnt, 0); + ctx->props = *props; + + mutex_lock(&gsi_ctx->mlock); + val = (((evt_id << GSI_EE_n_EV_CH_CMD_CHID_SHFT) & + GSI_EE_n_EV_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_EV_CH_CMD_OPCODE_BMSK)); + ee = gsi_ctx->per.ee; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_CMD_OFFS(ee)); + res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT); + if (res == 0) { + GSIERR("evt_id=%lu timed out\n", evt_id); + if (!props->evchid_valid) + clear_bit(evt_id, &gsi_ctx->evt_bmap); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_TIMED_OUT; + } + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("evt_id=%lu allocation failed state=%u\n", + evt_id, ctx->state); + if (!props->evchid_valid) + clear_bit(evt_id, &gsi_ctx->evt_bmap); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + + gsi_program_evt_ring_ctx(props, evt_id, gsi_ctx->per.ee); + + spin_lock_init(&ctx->ring.slock); + gsi_init_evt_ring(props, &ctx->ring); + + ctx->id = evt_id; + *evt_ring_hdl = evt_id; + atomic_inc(&gsi_ctx->num_evt_ring); + if (props->intf == GSI_EVT_CHTYPE_GPI_EV) + gsi_prime_evt_ring(ctx); + else if (props->intf == GSI_EVT_CHTYPE_WDI2_EV) + gsi_prime_evt_ring_wdi(ctx); + mutex_unlock(&gsi_ctx->mlock); + + spin_lock_irqsave(&gsi_ctx->slock, flags); + gsi_writel(1 << evt_id, gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee)); + + /* enable ieob interrupts for GPI, enable MSI interrupts */ + if ((props->intf != GSI_EVT_CHTYPE_GPI_EV) && + (props->intr != GSI_INTR_MSI)) + __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << evt_id, 0); + else + __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->id, ~0); + spin_unlock_irqrestore(&gsi_ctx->slock, flags); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_alloc_evt_ring); + +static void __gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl, + union __packed gsi_evt_scratch val) +{ + gsi_writel(val.data.word1, gsi_ctx->base + + GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(evt_ring_hdl, + gsi_ctx->per.ee)); + gsi_writel(val.data.word2, gsi_ctx->base + + GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(evt_ring_hdl, + gsi_ctx->per.ee)); +} + +int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl, + union __packed gsi_evt_scratch val) +{ + struct gsi_evt_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("bad state %d\n", + gsi_ctx->evtr[evt_ring_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&ctx->mlock); + ctx->scratch = val; + __gsi_write_evt_ring_scratch(evt_ring_hdl, val); + mutex_unlock(&ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_write_evt_ring_scratch); + +int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl) +{ + uint32_t val; + enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC; + struct gsi_evt_ctx *ctx; + int res; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev || + evt_ring_hdl >= GSI_EVT_RING_MAX) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (atomic_read(&ctx->chan_ref_cnt)) { + GSIERR("%d channels still using this event ring\n", + atomic_read(&ctx->chan_ref_cnt)); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&ctx->compl); + val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) & + GSI_EE_n_EV_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_EV_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee)); + res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT); + if (res == 0) { + GSIERR("evt_id=%lu timed out\n", evt_ring_hdl); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_TIMED_OUT; + } + + if (ctx->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { + GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl, + ctx->state); + /* + * IPA Hardware returned GSI RING not allocated, which is + * unexpected hardware state. + */ + GSI_ASSERT(); + } + mutex_unlock(&gsi_ctx->mlock); + + if (!ctx->props.evchid_valid) { + mutex_lock(&gsi_ctx->mlock); + clear_bit(evt_ring_hdl, &gsi_ctx->evt_bmap); + mutex_unlock(&gsi_ctx->mlock); + } + atomic_dec(&gsi_ctx->num_evt_ring); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_dealloc_evt_ring); + +int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl, + uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb) +{ + struct gsi_evt_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!db_addr_wp_msb || !db_addr_wp_lsb) { + GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb, + db_addr_wp_lsb); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("bad state %d\n", + gsi_ctx->evtr[evt_ring_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + *db_addr_wp_lsb = gsi_ctx->per.phys_addr + + GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(evt_ring_hdl, gsi_ctx->per.ee); + *db_addr_wp_msb = gsi_ctx->per.phys_addr + + GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(evt_ring_hdl, gsi_ctx->per.ee); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_query_evt_ring_db_addr); + +int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value) +{ + struct gsi_evt_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("bad state %d\n", + gsi_ctx->evtr[evt_ring_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + ctx->ring.wp_local = value; + gsi_ring_evt_doorbell(ctx); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_ring_evt_ring_db); + +int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value) +{ + struct gsi_chan_ctx *ctx; + uint32_t val; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state != GSI_CHAN_STATE_STARTED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + ctx->ring.wp_local = value; + + /* write MSB first */ + val = ((ctx->ring.wp_local >> 32) & + GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) << + GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id, + gsi_ctx->per.ee)); + + gsi_ring_chan_doorbell(ctx); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_ring_ch_ring_db); + +int gsi_reset_evt_ring(unsigned long evt_ring_hdl) +{ + uint32_t val; + enum gsi_evt_ch_cmd_opcode op = GSI_EVT_RESET; + struct gsi_evt_ctx *ctx; + int res; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&ctx->compl); + val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) & + GSI_EE_n_EV_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_EV_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee)); + res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT); + if (res == 0) { + GSIERR("evt_id=%lu timed out\n", evt_ring_hdl); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_TIMED_OUT; + } + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl, + ctx->state); + /* + * IPA Hardware returned GSI RING not allocated, which is + * unexpected. Indicates hardware instability. + */ + GSI_ASSERT(); + } + + gsi_program_evt_ring_ctx(&ctx->props, evt_ring_hdl, gsi_ctx->per.ee); + gsi_init_evt_ring(&ctx->props, &ctx->ring); + + /* restore scratch */ + __gsi_write_evt_ring_scratch(evt_ring_hdl, ctx->scratch); + + if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV) + gsi_prime_evt_ring(ctx); + if (ctx->props.intf == GSI_EVT_CHTYPE_WDI2_EV) + gsi_prime_evt_ring_wdi(ctx); + mutex_unlock(&gsi_ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_reset_evt_ring); + +int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl, + struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr) +{ + struct gsi_evt_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!props || !scr) { + GSIERR("bad params props=%pK scr=%pK\n", props, scr); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (ctx->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&ctx->mlock); + *props = ctx->props; + *scr = ctx->scratch; + mutex_unlock(&ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_get_evt_ring_cfg); + +int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl, + struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr) +{ + struct gsi_evt_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!props || gsi_validate_evt_ring_props(props)) { + GSIERR("bad params props=%pK\n", props); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (ctx->props.exclusive != props->exclusive) { + GSIERR("changing immutable fields not supported\n"); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&ctx->mlock); + ctx->props = *props; + if (scr) + ctx->scratch = *scr; + mutex_unlock(&ctx->mlock); + + return gsi_reset_evt_ring(evt_ring_hdl); +} +EXPORT_SYMBOL(gsi_set_evt_ring_cfg); + +static void gsi_program_chan_ctx_qos(struct gsi_chan_props *props, + unsigned int ee) +{ + uint32_t val; + + val = + (((props->low_weight << + GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) & + GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) | + ((props->max_prefetch << + GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) & + GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) | + ((props->use_db_eng << + GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) & + GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK)); + if (gsi_ctx->per.ver >= GSI_VER_2_0) + val |= ((props->prefetch_mode << + GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_SHFT) + & GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_BMSK); + + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee)); +} + +static void gsi_program_chan_ctx_qos_v2_5(struct gsi_chan_props *props, + unsigned int ee) +{ + uint32_t val; + + val = + (((props->low_weight << + GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) & + GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) | + ((props->max_prefetch << + GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) & + GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) | + ((props->use_db_eng << + GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) & + GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK) | + ((props->prefetch_mode << + GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT) & + GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK) | + ((props->empty_lvl_threshold << + GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT) & + GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK)); + + gsi_writel(val, gsi_ctx->base + + GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee)); +} + +static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee, + uint8_t erindex) +{ + uint32_t val; + uint32_t prot; + uint32_t prot_msb; + + switch (props->prot) { + case GSI_CHAN_PROT_MHI: + case GSI_CHAN_PROT_XHCI: + case GSI_CHAN_PROT_GPI: + case GSI_CHAN_PROT_XDCI: + case GSI_CHAN_PROT_WDI2: + case GSI_CHAN_PROT_WDI3: + case GSI_CHAN_PROT_GCI: + case GSI_CHAN_PROT_MHIP: + prot_msb = 0; + break; + case GSI_CHAN_PROT_AQC: + case GSI_CHAN_PROT_11AD: + prot_msb = 1; + break; + default: + GSIERR("Unsupported protocol %d\n", props->prot); + WARN_ON(1); + return; + } + prot = props->prot; + + val = ((prot << + GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT) & + GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK); + if (gsi_ctx->per.ver >= GSI_VER_2_5) { + val |= ((prot_msb << + GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT) & + GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK); + } + + val |= (((props->dir << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT) & + GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK) | + ((erindex << GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT) & + GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK) | + ((props->re_size << GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT) + & GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(props->ch_id, ee)); + + val = (props->ring_len & GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK) << + GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(props->ch_id, ee)); + + val = (props->ring_base_addr & + GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) << + GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(props->ch_id, ee)); + + val = ((props->ring_base_addr >> 32) & + GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) << + GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(props->ch_id, ee)); + + if (gsi_ctx->per.ver >= GSI_VER_2_5) + gsi_program_chan_ctx_qos_v2_5(props, ee); + else + gsi_program_chan_ctx_qos(props, ee); +} + +static void gsi_init_chan_ring(struct gsi_chan_props *props, + struct gsi_ring_ctx *ctx) +{ + ctx->base_va = (uintptr_t)props->ring_base_vaddr; + ctx->base = props->ring_base_addr; + ctx->wp = ctx->base; + ctx->rp = ctx->base; + ctx->wp_local = ctx->base; + ctx->rp_local = ctx->base; + ctx->len = props->ring_len; + ctx->elem_sz = props->re_size; + ctx->max_num_elem = ctx->len / ctx->elem_sz - 1; + ctx->end = ctx->base + (ctx->max_num_elem + 1) * + ctx->elem_sz; +} + +static int gsi_validate_channel_props(struct gsi_chan_props *props) +{ + uint64_t ra; + uint64_t last; + + if (props->ch_id >= gsi_ctx->max_ch) { + GSIERR("ch_id %u invalid\n", props->ch_id); + return -GSI_STATUS_INVALID_PARAMS; + } + + if ((props->re_size == GSI_CHAN_RE_SIZE_4B && + props->ring_len % 4) || + (props->re_size == GSI_CHAN_RE_SIZE_8B && + props->ring_len % 8) || + (props->re_size == GSI_CHAN_RE_SIZE_16B && + props->ring_len % 16) || + (props->re_size == GSI_CHAN_RE_SIZE_32B && + props->ring_len % 32)) { + GSIERR("bad params ring_len %u not a multiple of re size %u\n", + props->ring_len, props->re_size); + return -GSI_STATUS_INVALID_PARAMS; + } + + ra = props->ring_base_addr; + do_div(ra, roundup_pow_of_two(props->ring_len)); + + if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) { + GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n", + props->ring_base_addr, + roundup_pow_of_two(props->ring_len)); + return -GSI_STATUS_INVALID_PARAMS; + } + + last = props->ring_base_addr + props->ring_len - props->re_size; + + /* MSB should stay same within the ring */ + if ((props->ring_base_addr & 0xFFFFFFFF00000000ULL) != + (last & 0xFFFFFFFF00000000ULL)) { + GSIERR("MSB is not fixed on ring base 0x%llx size 0x%x\n", + props->ring_base_addr, + props->ring_len); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->prot == GSI_CHAN_PROT_GPI && + !props->ring_base_vaddr) { + GSIERR("protocol %u requires ring base VA\n", props->prot); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->low_weight > GSI_MAX_CH_LOW_WEIGHT) { + GSIERR("invalid channel low weight %u\n", props->low_weight); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->prot == GSI_CHAN_PROT_GPI && !props->xfer_cb) { + GSIERR("xfer callback must be provided\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (!props->err_cb) { + GSIERR("err callback must be provided\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + return GSI_STATUS_SUCCESS; +} + +int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl, + unsigned long *chan_hdl) +{ + struct gsi_chan_ctx *ctx; + uint32_t val; + int res; + int ee; + enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE; + uint8_t erindex; + void **user_data; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!props || !chan_hdl || dev_hdl != (uintptr_t)gsi_ctx) { + GSIERR("bad params props=%pK dev_hdl=0x%lx chan_hdl=%pK\n", + props, dev_hdl, chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (gsi_validate_channel_props(props)) { + GSIERR("bad params\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->evt_ring_hdl != ~0) { + if (props->evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("invalid evt ring=%lu\n", props->evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (atomic_read( + &gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) && + gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive) { + GSIERR("evt ring=%lu exclusively used by ch_hdl=%pK\n", + props->evt_ring_hdl, chan_hdl); + return -GSI_STATUS_UNSUPPORTED_OP; + } + } + + ctx = &gsi_ctx->chan[props->ch_id]; + if (ctx->allocated) { + GSIERR("chan %d already allocated\n", props->ch_id); + return -GSI_STATUS_NODEV; + } + + memset(ctx, 0, sizeof(*ctx)); + user_data = devm_kzalloc(gsi_ctx->dev, + (props->ring_len / props->re_size) * sizeof(void *), + GFP_KERNEL); + if (user_data == NULL) { + GSIERR("context not allocated\n"); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + + mutex_init(&ctx->mlock); + init_completion(&ctx->compl); + atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK); + ctx->props = *props; + + if (gsi_ctx->per.ver != GSI_VER_2_2) { + mutex_lock(&gsi_ctx->mlock); + ee = gsi_ctx->per.ee; + gsi_ctx->ch_dbg[props->ch_id].ch_allocate++; + val = (((props->ch_id << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) & + GSI_EE_n_GSI_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_CMD_OFFS(ee)); + res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT); + if (res == 0) { + GSIERR("chan_hdl=%u timed out\n", props->ch_id); + mutex_unlock(&gsi_ctx->mlock); + devm_kfree(gsi_ctx->dev, user_data); + return -GSI_STATUS_TIMED_OUT; + } + if (ctx->state != GSI_CHAN_STATE_ALLOCATED) { + GSIERR("chan_hdl=%u allocation failed state=%d\n", + props->ch_id, ctx->state); + mutex_unlock(&gsi_ctx->mlock); + devm_kfree(gsi_ctx->dev, user_data); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + mutex_unlock(&gsi_ctx->mlock); + } else { + mutex_lock(&gsi_ctx->mlock); + ctx->state = GSI_CHAN_STATE_ALLOCATED; + mutex_unlock(&gsi_ctx->mlock); + } + erindex = props->evt_ring_hdl != ~0 ? props->evt_ring_hdl : + GSI_NO_EVT_ERINDEX; + if (erindex != GSI_NO_EVT_ERINDEX) { + ctx->evtr = &gsi_ctx->evtr[erindex]; + atomic_inc(&ctx->evtr->chan_ref_cnt); + if (ctx->evtr->props.exclusive) + ctx->evtr->chan = ctx; + } + + gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex); + + spin_lock_init(&ctx->ring.slock); + gsi_init_chan_ring(props, &ctx->ring); + if (!props->max_re_expected) + ctx->props.max_re_expected = ctx->ring.max_num_elem; + ctx->user_data = user_data; + *chan_hdl = props->ch_id; + ctx->allocated = true; + ctx->stats.dp.last_timestamp = jiffies_to_msecs(jiffies); + atomic_inc(&gsi_ctx->num_chan); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_alloc_channel); + +static int gsi_alloc_ap_channel(unsigned int chan_hdl) +{ + struct gsi_chan_ctx *ctx; + uint32_t val; + int res; + int ee; + enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + if (ctx->allocated) { + GSIERR("chan %d already allocated\n", chan_hdl); + return -GSI_STATUS_NODEV; + } + + memset(ctx, 0, sizeof(*ctx)); + + mutex_init(&ctx->mlock); + init_completion(&ctx->compl); + atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK); + + mutex_lock(&gsi_ctx->mlock); + ee = gsi_ctx->per.ee; + gsi_ctx->ch_dbg[chan_hdl].ch_allocate++; + val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) & + GSI_EE_n_GSI_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_CMD_OFFS(ee)); + res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT); + if (res == 0) { + GSIERR("chan_hdl=%u timed out\n", chan_hdl); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_TIMED_OUT; + } + if (ctx->state != GSI_CHAN_STATE_ALLOCATED) { + GSIERR("chan_hdl=%u allocation failed state=%d\n", + chan_hdl, ctx->state); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + mutex_unlock(&gsi_ctx->mlock); + + return GSI_STATUS_SUCCESS; +} + +static void __gsi_write_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch val) +{ + gsi_writel(val.data.word1, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl, + gsi_ctx->per.ee)); + gsi_writel(val.data.word2, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl, + gsi_ctx->per.ee)); + gsi_writel(val.data.word3, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + gsi_writel(val.data.word4, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl, + gsi_ctx->per.ee)); +} + +int gsi_write_channel_scratch3_reg(unsigned long chan_hdl, + union __packed gsi_wdi_channel_scratch3_reg val) +{ + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + mutex_lock(&ctx->mlock); + + ctx->scratch.wdi.endp_metadatareg_offset = + val.wdi.endp_metadatareg_offset; + ctx->scratch.wdi.qmap_id = val.wdi.qmap_id; + + gsi_writel(val.data.word1, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl, + gsi_ctx->per.ee)); + mutex_unlock(&ctx->mlock); + return GSI_STATUS_SUCCESS; +} + +static void __gsi_read_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch * val) +{ + val->data.word1 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + val->data.word2 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + val->data.word3 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + val->data.word4 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl, + gsi_ctx->per.ee)); +} + +static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch( + unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr) +{ + union __packed gsi_channel_scratch scr; + + /* below sequence is not atomic. assumption is sequencer specific fields + * will remain unchanged across this sequence + */ + + /* READ */ + scr.data.word1 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + scr.data.word2 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + scr.data.word3 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + scr.data.word4 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + /* UPDATE */ + scr.mhi.mhi_host_wp_addr = mscr.mhi_host_wp_addr; + scr.mhi.assert_bit40 = mscr.assert_bit40; + scr.mhi.polling_configuration = mscr.polling_configuration; + scr.mhi.burst_mode_enabled = mscr.burst_mode_enabled; + scr.mhi.polling_mode = mscr.polling_mode; + scr.mhi.oob_mod_threshold = mscr.oob_mod_threshold; + + if (gsi_ctx->per.ver < GSI_VER_2_5) { + scr.mhi.max_outstanding_tre = mscr.max_outstanding_tre; + scr.mhi.outstanding_threshold = mscr.outstanding_threshold; + } + + /* WRITE */ + gsi_writel(scr.data.word1, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + gsi_writel(scr.data.word2, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + gsi_writel(scr.data.word3, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + gsi_writel(scr.data.word4, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + return scr; +} + +int gsi_write_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch val) +{ + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED && + gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) { + GSIERR("bad state %d\n", + gsi_ctx->chan[chan_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + mutex_lock(&ctx->mlock); + ctx->scratch = val; + __gsi_write_channel_scratch(chan_hdl, val); + mutex_unlock(&ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_write_channel_scratch); + +int gsi_read_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch *val) +{ + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED && + gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED && + gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) { + GSIERR("bad state %d\n", + gsi_ctx->chan[chan_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + mutex_lock(&ctx->mlock); + __gsi_read_channel_scratch(chan_hdl, val); + mutex_unlock(&ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_read_channel_scratch); + +int gsi_update_mhi_channel_scratch(unsigned long chan_hdl, + struct __packed gsi_mhi_channel_scratch mscr) +{ + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED && + gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) { + GSIERR("bad state %d\n", + gsi_ctx->chan[chan_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + mutex_lock(&ctx->mlock); + ctx->scratch = __gsi_update_mhi_channel_scratch(chan_hdl, mscr); + mutex_unlock(&ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_update_mhi_channel_scratch); + +int gsi_query_channel_db_addr(unsigned long chan_hdl, + uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb) +{ + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!db_addr_wp_msb || !db_addr_wp_lsb) { + GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb, + db_addr_wp_lsb); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) { + GSIERR("bad state %d\n", + gsi_ctx->chan[chan_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + *db_addr_wp_lsb = gsi_ctx->per.phys_addr + + GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(chan_hdl, gsi_ctx->per.ee); + *db_addr_wp_msb = gsi_ctx->per.phys_addr + + GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(chan_hdl, gsi_ctx->per.ee); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_query_channel_db_addr); + +int gsi_start_channel(unsigned long chan_hdl) +{ + enum gsi_ch_cmd_opcode op = GSI_CH_START; + uint32_t val; + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state != GSI_CHAN_STATE_ALLOCATED && + ctx->state != GSI_CHAN_STATE_STOP_IN_PROC && + ctx->state != GSI_CHAN_STATE_STOPPED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&ctx->compl); + + gsi_ctx->ch_dbg[chan_hdl].ch_start++; + val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) & + GSI_EE_n_GSI_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee)); + + GSIDBG("GSI Channel Start, waiting for completion\n"); + gsi_channel_state_change_wait(chan_hdl, + ctx, + GSI_START_CMD_TIMEOUT_MS); + + if (ctx->state != GSI_CHAN_STATE_STARTED) { + /* + * Hardware returned unexpected status, unexpected + * hardware state. + */ + GSIERR("chan=%lu timed out, unexpected state=%u\n", + chan_hdl, ctx->state); + GSI_ASSERT(); + } + + GSIDBG("GSI Channel=%lu Start success\n", chan_hdl); + + /* write order MUST be MSB followed by LSB */ + val = ((ctx->ring.wp_local >> 32) & + GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) << + GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id, + gsi_ctx->per.ee)); + + mutex_unlock(&gsi_ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_start_channel); + +int gsi_stop_channel(unsigned long chan_hdl) +{ + enum gsi_ch_cmd_opcode op = GSI_CH_STOP; + int res; + uint32_t val; + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state == GSI_CHAN_STATE_STOPPED) { + GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl); + return GSI_STATUS_SUCCESS; + } + + if (ctx->state != GSI_CHAN_STATE_STARTED && + ctx->state != GSI_CHAN_STATE_STOP_IN_PROC && + ctx->state != GSI_CHAN_STATE_ERROR) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&ctx->compl); + + gsi_ctx->ch_dbg[chan_hdl].ch_stop++; + val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) & + GSI_EE_n_GSI_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee)); + + GSIDBG("GSI Channel Stop, waiting for completion\n"); + gsi_channel_state_change_wait(chan_hdl, + ctx, + GSI_STOP_CMD_TIMEOUT_MS); + + if (ctx->state != GSI_CHAN_STATE_STOPPED && + ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) { + GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state); + res = -GSI_STATUS_BAD_STATE; + goto free_lock; + } + + if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) { + GSIERR("chan=%lu busy try again\n", chan_hdl); + res = -GSI_STATUS_AGAIN; + goto free_lock; + } + + res = GSI_STATUS_SUCCESS; + +free_lock: + mutex_unlock(&gsi_ctx->mlock); + return res; +} +EXPORT_SYMBOL(gsi_stop_channel); + +int gsi_stop_db_channel(unsigned long chan_hdl) +{ + enum gsi_ch_cmd_opcode op = GSI_CH_DB_STOP; + int res; + uint32_t val; + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state == GSI_CHAN_STATE_STOPPED) { + GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl); + return GSI_STATUS_SUCCESS; + } + + if (ctx->state != GSI_CHAN_STATE_STARTED && + ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&ctx->compl); + + gsi_ctx->ch_dbg[chan_hdl].ch_db_stop++; + val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) & + GSI_EE_n_GSI_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee)); + res = wait_for_completion_timeout(&ctx->compl, + msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS)); + if (res == 0) { + GSIERR("chan_hdl=%lu timed out\n", chan_hdl); + res = -GSI_STATUS_TIMED_OUT; + goto free_lock; + } + + if (ctx->state != GSI_CHAN_STATE_STOPPED && + ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) { + GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state); + res = -GSI_STATUS_BAD_STATE; + goto free_lock; + } + + if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) { + GSIERR("chan=%lu busy try again\n", chan_hdl); + res = -GSI_STATUS_AGAIN; + goto free_lock; + } + + res = GSI_STATUS_SUCCESS; + +free_lock: + mutex_unlock(&gsi_ctx->mlock); + return res; +} +EXPORT_SYMBOL(gsi_stop_db_channel); + +int gsi_reset_channel(unsigned long chan_hdl) +{ + enum gsi_ch_cmd_opcode op = GSI_CH_RESET; + int res; + uint32_t val; + struct gsi_chan_ctx *ctx; + bool reset_done = false; + uint32_t retry_cnt = 0; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state != GSI_CHAN_STATE_STOPPED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&gsi_ctx->mlock); + +reset: + reinit_completion(&ctx->compl); + gsi_ctx->ch_dbg[chan_hdl].ch_reset++; + val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) & + GSI_EE_n_GSI_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee)); + res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT); + if (res == 0) { + GSIERR("chan_hdl=%lu timed out\n", chan_hdl); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_TIMED_OUT; + } + +revrfy_chnlstate: + if (ctx->state != GSI_CHAN_STATE_ALLOCATED) { + GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl, + ctx->state); + /* GSI register update state not sync with gsi channel + * context state not sync, need to wait for 1ms to sync. + */ + retry_cnt++; + if (retry_cnt <= GSI_CHNL_STATE_MAX_RETRYCNT) { + usleep_range(GSI_RESET_WA_MIN_SLEEP, + GSI_RESET_WA_MAX_SLEEP); + goto revrfy_chnlstate; + } + /* + * Hardware returned incorrect state, unexpected + * hardware state. + */ + GSI_ASSERT(); + } + + /* Hardware issue fixed from GSI 2.0 and no need for the WA */ + if (gsi_ctx->per.ver >= GSI_VER_2_0) + reset_done = true; + + /* workaround: reset GSI producers again */ + if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) { + usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP); + reset_done = true; + goto reset; + } + + gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee, + ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX); + gsi_init_chan_ring(&ctx->props, &ctx->ring); + + /* restore scratch */ + __gsi_write_channel_scratch(chan_hdl, ctx->scratch); + + mutex_unlock(&gsi_ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_reset_channel); + +int gsi_dealloc_channel(unsigned long chan_hdl) +{ + enum gsi_ch_cmd_opcode op = GSI_CH_DE_ALLOC; + int res; + uint32_t val; + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state != GSI_CHAN_STATE_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + /*In GSI_VER_2_2 version deallocation channel not supported*/ + if (gsi_ctx->per.ver != GSI_VER_2_2) { + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&ctx->compl); + + gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++; + val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) & + GSI_EE_n_GSI_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee)); + res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT); + if (res == 0) { + GSIERR("chan_hdl=%lu timed out\n", chan_hdl); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_TIMED_OUT; + } + if (ctx->state != GSI_CHAN_STATE_NOT_ALLOCATED) { + GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl, + ctx->state); + /* Hardware returned incorrect value */ + GSI_ASSERT(); + } + + mutex_unlock(&gsi_ctx->mlock); + } else { + mutex_lock(&gsi_ctx->mlock); + GSIDBG("In GSI_VER_2_2 channel deallocation not supported\n"); + ctx->state = GSI_CHAN_STATE_NOT_ALLOCATED; + GSIDBG("chan_hdl=%lu Channel state = %u\n", chan_hdl, + ctx->state); + mutex_unlock(&gsi_ctx->mlock); + } + devm_kfree(gsi_ctx->dev, ctx->user_data); + ctx->allocated = false; + if (ctx->evtr) + atomic_dec(&ctx->evtr->chan_ref_cnt); + atomic_dec(&gsi_ctx->num_chan); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_dealloc_channel); + +void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used) +{ + unsigned long now = jiffies_to_msecs(jiffies); + unsigned long elapsed; + + if (used == 0) { + elapsed = now - ctx->stats.dp.last_timestamp; + if (ctx->stats.dp.empty_time < elapsed) + ctx->stats.dp.empty_time = elapsed; + } + + if (used <= ctx->props.max_re_expected / 3) + ++ctx->stats.dp.ch_below_lo; + else if (used <= 2 * ctx->props.max_re_expected / 3) + ++ctx->stats.dp.ch_below_hi; + else + ++ctx->stats.dp.ch_above_hi; + ctx->stats.dp.last_timestamp = now; +} + +static void __gsi_query_channel_free_re(struct gsi_chan_ctx *ctx, + uint16_t *num_free_re) +{ + uint16_t start; + uint16_t end; + uint64_t rp; + int ee = gsi_ctx->per.ee; + uint16_t used; + + if (!ctx->evtr) { + rp = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee)); + rp |= ctx->ring.rp & 0xFFFFFFFF00000000; + + ctx->ring.rp = rp; + } else { + rp = ctx->ring.rp_local; + } + + start = gsi_find_idx_from_addr(&ctx->ring, rp); + end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local); + + if (end >= start) + used = end - start; + else + used = ctx->ring.max_num_elem + 1 - (start - end); + + *num_free_re = ctx->ring.max_num_elem - used; +} + +int gsi_query_channel_info(unsigned long chan_hdl, + struct gsi_chan_info *info) +{ + struct gsi_chan_ctx *ctx; + spinlock_t *slock; + unsigned long flags; + uint64_t rp; + uint64_t wp; + int ee; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch || !info) { + GSIERR("bad params chan_hdl=%lu info=%pK\n", chan_hdl, info); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + if (ctx->evtr) { + slock = &ctx->evtr->ring.slock; + info->evt_valid = true; + } else { + slock = &ctx->ring.slock; + info->evt_valid = false; + } + + spin_lock_irqsave(slock, flags); + + ee = gsi_ctx->per.ee; + rp = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee)); + rp |= ((uint64_t)gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32; + ctx->ring.rp = rp; + info->rp = rp; + + wp = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee)); + wp |= ((uint64_t)gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32; + ctx->ring.wp = wp; + info->wp = wp; + + if (info->evt_valid) { + rp = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee)); + rp |= ((uint64_t)gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(ctx->evtr->id, ee))) + << 32; + info->evt_rp = rp; + + wp = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(ctx->evtr->id, ee)); + wp |= ((uint64_t)gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(ctx->evtr->id, ee))) + << 32; + info->evt_wp = wp; + } + + spin_unlock_irqrestore(slock, flags); + + GSIDBG("ch=%lu RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx EWP=0x%llx\n", + chan_hdl, info->rp, info->wp, + info->evt_valid, info->evt_rp, info->evt_wp); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_query_channel_info); + +int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty) +{ + struct gsi_chan_ctx *ctx; + spinlock_t *slock; + unsigned long flags; + uint64_t rp; + uint64_t wp; + int ee; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch || !is_empty) { + GSIERR("bad params chan_hdl=%lu is_empty=%pK\n", + chan_hdl, is_empty); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + ee = gsi_ctx->per.ee; + + if (ctx->props.prot != GSI_CHAN_PROT_GPI) { + GSIERR("op not supported for protocol %u\n", ctx->props.prot); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (ctx->evtr) + slock = &ctx->evtr->ring.slock; + else + slock = &ctx->ring.slock; + + spin_lock_irqsave(slock, flags); + + rp = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee)); + rp |= ctx->ring.rp & 0xFFFFFFFF00000000; + ctx->ring.rp = rp; + + wp = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee)); + wp |= ctx->ring.wp & 0xFFFFFFFF00000000; + ctx->ring.wp = wp; + + if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI) + *is_empty = (ctx->ring.rp_local == rp) ? true : false; + else + *is_empty = (wp == rp) ? true : false; + + spin_unlock_irqrestore(slock, flags); + + GSIDBG("ch=%lu RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n", + chan_hdl, rp, wp, ctx->ring.rp_local); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_is_channel_empty); + +int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers, + struct gsi_xfer_elem *xfer, bool ring_db) +{ + struct gsi_chan_ctx *ctx; + uint16_t free; + struct gsi_tre tre; + struct gsi_tre *tre_ptr; + uint16_t idx; + uint64_t wp_rollback; + int i; + spinlock_t *slock; + unsigned long flags; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch || !num_xfers || !xfer) { + GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n", + chan_hdl, num_xfers, xfer); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->props.prot != GSI_CHAN_PROT_GPI) { + GSIERR("op not supported for protocol %u\n", ctx->props.prot); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (ctx->evtr) + slock = &ctx->evtr->ring.slock; + else + slock = &ctx->ring.slock; + + spin_lock_irqsave(slock, flags); + __gsi_query_channel_free_re(ctx, &free); + + if (num_xfers > free) { + GSIERR("chan_hdl=%lu num_xfers=%u free=%u\n", + chan_hdl, num_xfers, free); + spin_unlock_irqrestore(slock, flags); + return -GSI_STATUS_RING_INSUFFICIENT_SPACE; + } + + wp_rollback = ctx->ring.wp_local; + for (i = 0; i < num_xfers; i++) { + memset(&tre, 0, sizeof(tre)); + tre.buffer_ptr = xfer[i].addr; + tre.buf_len = xfer[i].len; + if (xfer[i].type == GSI_XFER_ELEM_DATA) { + tre.re_type = GSI_RE_XFER; + } else if (xfer[i].type == GSI_XFER_ELEM_IMME_CMD) { + tre.re_type = GSI_RE_IMMD_CMD; + } else if (xfer[i].type == GSI_XFER_ELEM_NOP) { + tre.re_type = GSI_RE_NOP; + } else { + GSIERR("chan_hdl=%lu bad RE type=%u\n", chan_hdl, + xfer[i].type); + break; + } + tre.bei = (xfer[i].flags & GSI_XFER_FLAG_BEI) ? 1 : 0; + tre.ieot = (xfer[i].flags & GSI_XFER_FLAG_EOT) ? 1 : 0; + tre.ieob = (xfer[i].flags & GSI_XFER_FLAG_EOB) ? 1 : 0; + tre.chain = (xfer[i].flags & GSI_XFER_FLAG_CHAIN) ? 1 : 0; + + idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local); + tre_ptr = (struct gsi_tre *)(ctx->ring.base_va + + idx * ctx->ring.elem_sz); + + /* write the TRE to ring */ + *tre_ptr = tre; + ctx->user_data[idx] = xfer[i].xfer_user_data; + gsi_incr_ring_wp(&ctx->ring); + } + + if (i != num_xfers) { + /* reject all the xfers */ + ctx->ring.wp_local = wp_rollback; + spin_unlock_irqrestore(slock, flags); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx->stats.queued += num_xfers; + + /* ensure TRE is set before ringing doorbell */ + wmb(); + + if (ring_db) + gsi_ring_chan_doorbell(ctx); + + spin_unlock_irqrestore(slock, flags); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_queue_xfer); + +int gsi_start_xfer(unsigned long chan_hdl) +{ + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->props.prot != GSI_CHAN_PROT_GPI) { + GSIERR("op not supported for protocol %u\n", ctx->props.prot); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (ctx->ring.wp == ctx->ring.wp_local) + return GSI_STATUS_SUCCESS; + + gsi_ring_chan_doorbell(ctx); + + return GSI_STATUS_SUCCESS; +}; +EXPORT_SYMBOL(gsi_start_xfer); + +int gsi_poll_channel(unsigned long chan_hdl, + struct gsi_chan_xfer_notify *notify) +{ + int unused_var; + + return gsi_poll_n_channel(chan_hdl, notify, 1, &unused_var); +} +EXPORT_SYMBOL(gsi_poll_channel); + +int gsi_poll_n_channel(unsigned long chan_hdl, + struct gsi_chan_xfer_notify *notify, + int expected_num, int *actual_num) +{ + struct gsi_chan_ctx *ctx; + uint64_t rp; + int ee; + int i; + unsigned long flags; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch || !notify || + !actual_num || expected_num <= 0) { + GSIERR("bad params chan_hdl=%lu notify=%pK\n", + chan_hdl, notify); + GSIERR("actual_num=%pK expected_num=%d\n", + actual_num, expected_num); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + ee = gsi_ctx->per.ee; + + if (ctx->props.prot != GSI_CHAN_PROT_GPI) { + GSIERR("op not supported for protocol %u\n", ctx->props.prot); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (!ctx->evtr) { + GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + spin_lock_irqsave(&ctx->evtr->ring.slock, flags); + if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) { + /* update rp to see of we have anything new to process */ + gsi_writel(1 << ctx->evtr->id, gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee)); + rp = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee)); + rp |= ctx->ring.rp & 0xFFFFFFFF00000000; + + ctx->evtr->ring.rp = rp; + } + + if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) { + spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags); + ctx->stats.poll_empty++; + return GSI_STATUS_POLL_EMPTY; + } + + *actual_num = gsi_get_complete_num(&ctx->evtr->ring, + ctx->evtr->ring.rp_local, ctx->evtr->ring.rp); + + if (*actual_num > expected_num) + *actual_num = expected_num; + + for (i = 0; i < *actual_num; i++) + gsi_process_evt_re(ctx->evtr, notify + i, false); + + spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags); + ctx->stats.poll_ok++; + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_poll_n_channel); + +int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode) +{ + struct gsi_chan_ctx *ctx; + enum gsi_chan_mode curr; + unsigned long flags; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->props.prot != GSI_CHAN_PROT_GPI) { + GSIERR("op not supported for protocol %u\n", ctx->props.prot); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (!ctx->evtr || !ctx->evtr->props.exclusive) { + GSIERR("cannot configure mode on chan_hdl=%lu\n", + chan_hdl); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (atomic_read(&ctx->poll_mode)) + curr = GSI_CHAN_MODE_POLL; + else + curr = GSI_CHAN_MODE_CALLBACK; + + if (mode == curr) { + GSIERR("already in requested mode %u chan_hdl=%lu\n", + curr, chan_hdl); + return -GSI_STATUS_UNSUPPORTED_OP; + } + spin_lock_irqsave(&gsi_ctx->slock, flags); + if (curr == GSI_CHAN_MODE_CALLBACK && + mode == GSI_CHAN_MODE_POLL) { + __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0); + gsi_writel(1 << ctx->evtr->id, gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(gsi_ctx->per.ee)); + atomic_set(&ctx->poll_mode, mode); + ctx->stats.callback_to_poll++; + } + + if (curr == GSI_CHAN_MODE_POLL && + mode == GSI_CHAN_MODE_CALLBACK) { + atomic_set(&ctx->poll_mode, mode); + __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0); + + /* + * In GSI 2.2 and 2.5 there is a limitation that can lead + * to losing an interrupt. For these versions an + * explicit check is needed after enabling the interrupt + */ + if (gsi_ctx->per.ver == GSI_VER_2_2 || + gsi_ctx->per.ver == GSI_VER_2_5) { + u32 src = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS( + gsi_ctx->per.ee)); + if (src & (1 << ctx->evtr->id)) { + __gsi_config_ieob_irq( + gsi_ctx->per.ee, 1 << ctx->evtr->id, 0); + gsi_writel(1 << ctx->evtr->id, gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS( + gsi_ctx->per.ee)); + spin_unlock_irqrestore(&gsi_ctx->slock, flags); + spin_lock_irqsave(&ctx->ring.slock, flags); + atomic_set( + &ctx->poll_mode, GSI_CHAN_MODE_POLL); + spin_unlock_irqrestore( + &ctx->ring.slock, flags); + ctx->stats.poll_pending_irq++; + return -GSI_STATUS_PENDING_IRQ; + } + } + ctx->stats.poll_to_callback++; + } + spin_unlock_irqrestore(&gsi_ctx->slock, flags); + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_config_channel_mode); + +int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props, + union gsi_channel_scratch *scr) +{ + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!props || !scr) { + GSIERR("bad params props=%pK scr=%pK\n", props, scr); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&ctx->mlock); + *props = ctx->props; + *scr = ctx->scratch; + mutex_unlock(&ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_get_channel_cfg); + +int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props, + union gsi_channel_scratch *scr) +{ + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!props || gsi_validate_channel_props(props)) { + GSIERR("bad params props=%pK\n", props); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state != GSI_CHAN_STATE_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (ctx->props.ch_id != props->ch_id || + ctx->props.evt_ring_hdl != props->evt_ring_hdl) { + GSIERR("changing immutable fields not supported\n"); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&ctx->mlock); + ctx->props = *props; + if (scr) + ctx->scratch = *scr; + gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee, + ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX); + gsi_init_chan_ring(&ctx->props, &ctx->ring); + + /* restore scratch */ + __gsi_write_channel_scratch(chan_hdl, ctx->scratch); + mutex_unlock(&ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_set_channel_cfg); + +static void gsi_configure_ieps(void *base, enum gsi_ver ver) +{ + void __iomem *gsi_base = base; + + gsi_writel(1, gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS); + gsi_writel(2, gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS); + gsi_writel(3, gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS); + gsi_writel(4, gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS); + gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS); + gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS); + gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS); + gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS); + gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS); + gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS); + gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS); + gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS); + gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS); + gsi_writel(14, gsi_base + GSI_GSI_IRAM_PTR_EV_DB_OFFS); + gsi_writel(15, gsi_base + GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS); + gsi_writel(16, gsi_base + GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS); + + if (ver >= GSI_VER_2_5) + gsi_writel(17, + gsi_base + GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS); +} + +static void gsi_configure_bck_prs_matrix(void *base) +{ + void __iomem *gsi_base = (void __iomem *) base; + + /* + * For now, these are default values. In the future, GSI FW image will + * produce optimized back-pressure values based on the FW image. + */ + gsi_writel(0xfffffffe, + gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffffff, + gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS); + gsi_writel(0xffffffbf, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS); + gsi_writel(0xffffefff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS); + gsi_writel(0xffffefff, + gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffffff, + gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS); + gsi_writel(0x00000000, + gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS); + gsi_writel(0x00000000, + gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS); + gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS); + gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS); + gsi_writel(0xffffffff, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS); + gsi_writel(0xfffffffe, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS); + gsi_writel(0xffffffff, gsi_base + GSI_IC_READ_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffefff, gsi_base + GSI_IC_READ_BCK_PRS_MSB_OFFS); + gsi_writel(0xffffffff, gsi_base + GSI_IC_WRITE_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffdfff, gsi_base + GSI_IC_WRITE_BCK_PRS_MSB_OFFS); + gsi_writel(0xffffffff, + gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS); + gsi_writel(0xff03ffff, + gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS); +} + +int gsi_configure_regs(phys_addr_t per_base_addr, enum gsi_ver ver) +{ + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!gsi_ctx->base) { + GSIERR("access to GSI HW has not been mapped\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) { + GSIERR("Incorrect version %d\n", ver); + return -GSI_STATUS_ERROR; + } + + gsi_writel(0, gsi_ctx->base + GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS); + gsi_writel(per_base_addr, + gsi_ctx->base + GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS); + gsi_configure_bck_prs_matrix((void *)gsi_ctx->base); + gsi_configure_ieps(gsi_ctx->base, ver); + + return 0; +} +EXPORT_SYMBOL(gsi_configure_regs); + +int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver) +{ + void __iomem *gsi_base; + uint32_t value; + + if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) { + GSIERR("Incorrect version %d\n", ver); + return -GSI_STATUS_ERROR; + } + + gsi_base = ioremap_nocache(gsi_base_addr, gsi_size); + if (!gsi_base) { + GSIERR("ioremap failed\n"); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + + /* Enable the MCS and set to x2 clocks */ + if (ver >= GSI_VER_1_2) { + value = ((1 << GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT) & + GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK); + gsi_writel(value, gsi_base + GSI_GSI_MCS_CFG_OFFS); + + value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) & + GSI_GSI_CFG_GSI_ENABLE_BMSK) | + ((0 << GSI_GSI_CFG_MCS_ENABLE_SHFT) & + GSI_GSI_CFG_MCS_ENABLE_BMSK) | + ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) & + GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) | + ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) & + GSI_GSI_CFG_UC_IS_MCS_BMSK) | + ((0 << GSI_GSI_CFG_GSI_PWR_CLPS_SHFT) & + GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) | + ((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) & + GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK)); + } else { + value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) & + GSI_GSI_CFG_GSI_ENABLE_BMSK) | + ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) & + GSI_GSI_CFG_MCS_ENABLE_BMSK) | + ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) & + GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) | + ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) & + GSI_GSI_CFG_UC_IS_MCS_BMSK)); + } + + /* GSI frequency is peripheral frequency divided by 3 (2+1) */ + if (ver >= GSI_VER_2_5) + value |= ((2 << GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_SHFT) & + GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_BMSK); + gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS); + iounmap(gsi_base); + + return 0; + +} +EXPORT_SYMBOL(gsi_enable_fw); + +void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset, + unsigned long *size, enum gsi_ver ver) +{ + unsigned long maxn; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return; + } + + switch (ver) { + case GSI_VER_1_0: + case GSI_VER_1_2: + case GSI_VER_1_3: + maxn = GSI_GSI_INST_RAM_n_MAXn; + break; + case GSI_VER_2_0: + maxn = GSI_V2_0_GSI_INST_RAM_n_MAXn; + break; + case GSI_VER_2_2: + maxn = GSI_V2_2_GSI_INST_RAM_n_MAXn; + break; + case GSI_VER_2_5: + maxn = GSI_V2_5_GSI_INST_RAM_n_MAXn; + break; + case GSI_VER_ERR: + case GSI_VER_MAX: + default: + GSIERR("GSI version is not supported %d\n", ver); + WARN_ON(1); + return; + } + if (size) + *size = GSI_GSI_INST_RAM_n_WORD_SZ * (maxn + 1); + + if (base_offset) { + if (ver < GSI_VER_2_5) + *base_offset = GSI_GSI_INST_RAM_n_OFFS(0); + else + *base_offset = GSI_V2_5_GSI_INST_RAM_n_OFFS(0); + } +} +EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size); + +int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code) +{ + enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_HALT_CHANNEL; + uint32_t val; + int res; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_idx >= gsi_ctx->max_ch || !code) { + GSIERR("bad params chan_idx=%d\n", chan_idx); + return -GSI_STATUS_INVALID_PARAMS; + } + + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&gsi_ctx->gen_ee_cmd_compl); + + /* invalidate the response */ + gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee)); + gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0; + gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base + + GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee)); + + gsi_ctx->gen_ee_cmd_dbg.halt_channel++; + val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) | + ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) & + GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) | + ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) & + GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee)); + + res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl, + msecs_to_jiffies(GSI_CMD_TIMEOUT)); + if (res == 0) { + GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee); + res = -GSI_STATUS_TIMED_OUT; + goto free_lock; + } + + gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee)); + if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == + GSI_GEN_EE_CMD_RETURN_CODE_RETRY) { + GSIDBG("chan_idx=%u ee=%u busy try again\n", chan_idx, ee); + *code = GSI_GEN_EE_CMD_RETURN_CODE_RETRY; + res = -GSI_STATUS_AGAIN; + goto free_lock; + } + if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) { + GSIERR("No response received\n"); + res = -GSI_STATUS_ERROR; + goto free_lock; + } + + res = GSI_STATUS_SUCCESS; + *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code; +free_lock: + mutex_unlock(&gsi_ctx->mlock); + + return res; +} +EXPORT_SYMBOL(gsi_halt_channel_ee); + +int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code) +{ + enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ALLOC_CHANNEL; + struct gsi_chan_ctx *ctx; + uint32_t val; + int res; + + if (chan_idx >= gsi_ctx->max_ch || !code) { + GSIERR("bad params chan_idx=%d\n", chan_idx); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (ee == 0) + return gsi_alloc_ap_channel(chan_idx); + + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&gsi_ctx->gen_ee_cmd_compl); + + /* invalidate the response */ + gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee)); + gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0; + gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base + + GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee)); + + val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) | + ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) & + GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) | + ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) & + GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee)); + + res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl, + msecs_to_jiffies(GSI_CMD_TIMEOUT)); + if (res == 0) { + GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee); + res = -GSI_STATUS_TIMED_OUT; + goto free_lock; + } + + gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee)); + if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == + GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES) { + GSIDBG("chan_idx=%u ee=%u out of resources\n", chan_idx, ee); + *code = GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES; + res = -GSI_STATUS_RES_ALLOC_FAILURE; + goto free_lock; + } + if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) { + GSIERR("No response received\n"); + res = -GSI_STATUS_ERROR; + goto free_lock; + } + if (ee == 0) { + ctx = &gsi_ctx->chan[chan_idx]; + gsi_ctx->ch_dbg[chan_idx].ch_allocate++; + } + res = GSI_STATUS_SUCCESS; + *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code; +free_lock: + mutex_unlock(&gsi_ctx->mlock); + + return res; +} +EXPORT_SYMBOL(gsi_alloc_channel_ee); + +int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index) +{ + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!gsi_ctx->base) { + GSIERR("access to GSI HW has not been mapped\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + gsi_writel(per_ep_index, + gsi_ctx->base + + GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(chan_num, ee)); + + return 0; +} +EXPORT_SYMBOL(gsi_map_virtual_ch_to_per_ep); + +static int msm_gsi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + pr_debug("gsi_probe\n"); + gsi_ctx = devm_kzalloc(dev, sizeof(*gsi_ctx), GFP_KERNEL); + if (!gsi_ctx) { + dev_err(dev, "failed to allocated gsi context\n"); + return -ENOMEM; + } + + gsi_ctx->ipc_logbuf = ipc_log_context_create(GSI_IPC_LOG_PAGES, + "gsi", 0); + if (gsi_ctx->ipc_logbuf == NULL) + GSIERR("failed to create IPC log, continue...\n"); + + gsi_ctx->dev = dev; + init_completion(&gsi_ctx->gen_ee_cmd_compl); + gsi_debugfs_init(); + + return 0; +} + +static struct platform_driver msm_gsi_driver = { + .probe = msm_gsi_probe, + .driver = { + .name = "gsi", + .of_match_table = msm_gsi_match, + }, +}; + +static struct platform_device *pdev; + +/** + * Module Init. + */ +static int __init gsi_init(void) +{ + int ret; + + pr_debug("%s\n", __func__); + + ret = platform_driver_register(&msm_gsi_driver); + if (ret < 0) + goto out; + + if (running_emulation) { + pdev = platform_device_register_simple("gsi", -1, NULL, 0); + if (IS_ERR(pdev)) { + ret = PTR_ERR(pdev); + platform_driver_unregister(&msm_gsi_driver); + goto out; + } + } + +out: + return ret; +} +arch_initcall(gsi_init); + +/* + * Module exit. + */ +static void __exit gsi_exit(void) +{ + if (running_emulation && pdev) + platform_device_unregister(pdev); + platform_driver_unregister(&msm_gsi_driver); +} +module_exit(gsi_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Generic Software Interface (GSI)"); diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h new file mode 100644 index 000000000000..0f862b07ab0a --- /dev/null +++ b/drivers/platform/msm/gsi/gsi.h @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef GSI_H +#define GSI_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * The following for adding code (ie. for EMULATION) not found on x86. + */ +#if defined(CONFIG_IPA_EMULATION) +# include "gsi_emulation_stubs.h" +#endif + +#define GSI_ASSERT() \ + BUG() + +#define GSI_CHAN_MAX 31 +#define GSI_EVT_RING_MAX 24 +#define GSI_NO_EVT_ERINDEX 31 + +#define gsi_readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) +#define gsi_writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); }) + +#define GSI_IPC_LOGGING(buf, fmt, args...) \ + do { \ + if (buf) \ + ipc_log_string((buf), fmt, __func__, __LINE__, \ + ## args); \ + } while (0) + +#define GSIDBG(fmt, args...) \ + do { \ + dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \ + ## args);\ + if (gsi_ctx) { \ + GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \ + "%s:%d " fmt, ## args); \ + GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \ + "%s:%d " fmt, ## args); \ + } \ + } while (0) + +#define GSIDBG_LOW(fmt, args...) \ + do { \ + dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \ + ## args);\ + if (gsi_ctx) { \ + GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \ + "%s:%d " fmt, ## args); \ + } \ + } while (0) + +#define GSIERR(fmt, args...) \ + do { \ + dev_err(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \ + ## args);\ + if (gsi_ctx) { \ + GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \ + "%s:%d " fmt, ## args); \ + GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \ + "%s:%d " fmt, ## args); \ + } \ + } while (0) + +#define GSI_IPC_LOG_PAGES 50 + +enum gsi_evt_ring_state { + GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0, + GSI_EVT_RING_STATE_ALLOCATED = 0x1, + GSI_EVT_RING_STATE_ERROR = 0xf +}; + +enum gsi_chan_state { + GSI_CHAN_STATE_NOT_ALLOCATED = 0x0, + GSI_CHAN_STATE_ALLOCATED = 0x1, + GSI_CHAN_STATE_STARTED = 0x2, + GSI_CHAN_STATE_STOPPED = 0x3, + GSI_CHAN_STATE_STOP_IN_PROC = 0x4, + GSI_CHAN_STATE_ERROR = 0xf +}; + +struct gsi_ring_ctx { + spinlock_t slock; + unsigned long base_va; + uint64_t base; + uint64_t wp; + uint64_t rp; + uint64_t wp_local; + uint64_t rp_local; + uint16_t len; + uint8_t elem_sz; + uint16_t max_num_elem; + uint64_t end; +}; + +struct gsi_chan_dp_stats { + unsigned long ch_below_lo; + unsigned long ch_below_hi; + unsigned long ch_above_hi; + unsigned long empty_time; + unsigned long last_timestamp; +}; + +struct gsi_chan_stats { + unsigned long queued; + unsigned long completed; + unsigned long callback_to_poll; + unsigned long poll_to_callback; + unsigned long poll_pending_irq; + unsigned long invalid_tre_error; + unsigned long poll_ok; + unsigned long poll_empty; + struct gsi_chan_dp_stats dp; +}; + +struct gsi_chan_ctx { + struct gsi_chan_props props; + enum gsi_chan_state state; + struct gsi_ring_ctx ring; + void **user_data; + struct gsi_evt_ctx *evtr; + struct mutex mlock; + struct completion compl; + bool allocated; + atomic_t poll_mode; + union __packed gsi_channel_scratch scratch; + struct gsi_chan_stats stats; + bool enable_dp_stats; + bool print_dp_stats; +}; + +struct gsi_evt_stats { + unsigned long completed; +}; + +struct gsi_evt_ctx { + struct gsi_evt_ring_props props; + enum gsi_evt_ring_state state; + uint8_t id; + struct gsi_ring_ctx ring; + struct mutex mlock; + struct completion compl; + struct gsi_chan_ctx *chan; + atomic_t chan_ref_cnt; + union __packed gsi_evt_scratch scratch; + struct gsi_evt_stats stats; +}; + +struct gsi_ee_scratch { + union __packed { + struct { + uint32_t inter_ee_cmd_return_code:3; + uint32_t resvd1:2; + uint32_t generic_ee_cmd_return_code:3; + uint32_t resvd2:7; + uint32_t max_usb_pkt_size:1; + uint32_t resvd3:8; + uint32_t mhi_base_chan_idx:8; + } s; + uint32_t val; + } word0; + uint32_t word1; +}; + +struct ch_debug_stats { + unsigned long ch_allocate; + unsigned long ch_start; + unsigned long ch_stop; + unsigned long ch_reset; + unsigned long ch_de_alloc; + unsigned long ch_db_stop; + unsigned long cmd_completed; +}; + +struct gsi_generic_ee_cmd_debug_stats { + unsigned long halt_channel; +}; + +struct gsi_ctx { + void __iomem *base; + struct device *dev; + struct gsi_per_props per; + bool per_registered; + struct gsi_chan_ctx chan[GSI_CHAN_MAX]; + struct ch_debug_stats ch_dbg[GSI_CHAN_MAX]; + struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX]; + struct gsi_generic_ee_cmd_debug_stats gen_ee_cmd_dbg; + struct mutex mlock; + spinlock_t slock; + unsigned long evt_bmap; + bool enabled; + atomic_t num_chan; + atomic_t num_evt_ring; + struct gsi_ee_scratch scratch; + int num_ch_dp_stats; + struct workqueue_struct *dp_stat_wq; + u32 max_ch; + u32 max_ev; + struct completion gen_ee_cmd_compl; + void *ipc_logbuf; + void *ipc_logbuf_low; + /* + * The following used only on emulation systems. + */ + void __iomem *intcntrlr_base; + u32 intcntrlr_mem_size; + irq_handler_t intcntrlr_gsi_isr; + irq_handler_t intcntrlr_client_isr; +}; + +enum gsi_re_type { + GSI_RE_XFER = 0x2, + GSI_RE_IMMD_CMD = 0x3, + GSI_RE_NOP = 0x4, +}; + +struct __packed gsi_tre { + uint64_t buffer_ptr; + uint16_t buf_len; + uint16_t resvd1; + uint16_t chain:1; + uint16_t resvd4:7; + uint16_t ieob:1; + uint16_t ieot:1; + uint16_t bei:1; + uint16_t resvd3:5; + uint8_t re_type; + uint8_t resvd2; +}; + +struct __packed gsi_xfer_compl_evt { + uint64_t xfer_ptr; + uint16_t len; + uint8_t resvd1; + uint8_t code; /* see gsi_chan_evt */ + uint16_t resvd; + uint8_t type; + uint8_t chid; +}; + +enum gsi_err_type { + GSI_ERR_TYPE_GLOB = 0x1, + GSI_ERR_TYPE_CHAN = 0x2, + GSI_ERR_TYPE_EVT = 0x3, +}; + +enum gsi_err_code { + GSI_INVALID_TRE_ERR = 0x1, + GSI_OUT_OF_BUFFERS_ERR = 0x2, + GSI_OUT_OF_RESOURCES_ERR = 0x3, + GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4, + GSI_EVT_RING_EMPTY_ERR = 0x5, + GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6, + GSI_HWO_1_ERR = 0x8 +}; + +struct __packed gsi_log_err { + uint32_t arg3:4; + uint32_t arg2:4; + uint32_t arg1:4; + uint32_t code:4; + uint32_t resvd:3; + uint32_t virt_idx:5; + uint32_t err_type:4; + uint32_t ee:4; +}; + +enum gsi_ch_cmd_opcode { + GSI_CH_ALLOCATE = 0x0, + GSI_CH_START = 0x1, + GSI_CH_STOP = 0x2, + GSI_CH_RESET = 0x9, + GSI_CH_DE_ALLOC = 0xa, + GSI_CH_DB_STOP = 0xb, +}; + +enum gsi_evt_ch_cmd_opcode { + GSI_EVT_ALLOCATE = 0x0, + GSI_EVT_RESET = 0x9, + GSI_EVT_DE_ALLOC = 0xa, +}; + +enum gsi_generic_ee_cmd_opcode { + GSI_GEN_EE_CMD_HALT_CHANNEL = 0x1, + GSI_GEN_EE_CMD_ALLOC_CHANNEL = 0x2, +}; + +enum gsi_generic_ee_cmd_return_code { + GSI_GEN_EE_CMD_RETURN_CODE_SUCCESS = 0x1, + GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING = 0x2, + GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_DIRECTION = 0x3, + GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE = 0x4, + GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX = 0x5, + GSI_GEN_EE_CMD_RETURN_CODE_RETRY = 0x6, + GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES = 0x7, +}; + +extern struct gsi_ctx *gsi_ctx; +void gsi_debugfs_init(void); +uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr); +void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used); + +#endif diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c new file mode 100644 index 000000000000..1b0af668b9de --- /dev/null +++ b/drivers/platform/msm/gsi/gsi_dbg.c @@ -0,0 +1,764 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include "gsi_reg.h" +#include "gsi.h" + +#define TERR(fmt, args...) \ + pr_err("%s:%d " fmt, __func__, __LINE__, ## args) +#define TDBG(fmt, args...) \ + pr_debug("%s:%d " fmt, __func__, __LINE__, ## args) +#define PRT_STAT(fmt, args...) \ + pr_err(fmt, ## args) + +static struct dentry *dent; +static char dbg_buff[4096]; +static void *gsi_ipc_logbuf_low; + +static void gsi_wq_print_dp_stats(struct work_struct *work); +static DECLARE_DELAYED_WORK(gsi_print_dp_stats_work, gsi_wq_print_dp_stats); +static void gsi_wq_update_dp_stats(struct work_struct *work); +static DECLARE_DELAYED_WORK(gsi_update_dp_stats_work, gsi_wq_update_dp_stats); + +static ssize_t gsi_dump_evt(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + u32 arg1; + u32 arg2; + unsigned long missing; + char *sptr, *token; + uint32_t val; + struct gsi_evt_ctx *ctx; + uint16_t i; + + if (sizeof(dbg_buff) < count + 1) + return -EINVAL; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + + sptr = dbg_buff; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &arg1)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &arg2)) + return -EINVAL; + + TDBG("arg1=%u arg2=%u\n", arg1, arg2); + + if (arg1 >= gsi_ctx->max_ev) { + TERR("invalid evt ring id %u\n", arg1); + return -EINVAL; + } + + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX0 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX1 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX2 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX3 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX4 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX5 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX6 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX7 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX8 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX9 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX10 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX11 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX12 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX13 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d SCR0 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d SCR1 0x%x\n", arg1, val); + + if (arg2) { + ctx = &gsi_ctx->evtr[arg1]; + + if (ctx->props.ring_base_vaddr) { + for (i = 0; i < ctx->props.ring_len / 16; i++) + TERR("EV%2d (0x%08llx) %08x %08x %08x %08x\n", + arg1, ctx->props.ring_base_addr + i * 16, + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 0), + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 4), + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 8), + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 12)); + } else { + TERR("No VA supplied for event ring id %u\n", arg1); + } + } + + return count; +} + +static ssize_t gsi_dump_ch(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + u32 arg1; + u32 arg2; + unsigned long missing; + char *sptr, *token; + uint32_t val; + struct gsi_chan_ctx *ctx; + uint16_t i; + + if (sizeof(dbg_buff) < count + 1) + return -EINVAL; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + + sptr = dbg_buff; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &arg1)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &arg2)) + return -EINVAL; + + TDBG("arg1=%u arg2=%u\n", arg1, arg2); + + if (arg1 >= gsi_ctx->max_ch) { + TERR("invalid chan id %u\n", arg1); + return -EINVAL; + } + + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX0 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX1 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX2 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX3 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX4 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX5 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX6 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX7 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(arg1, + gsi_ctx->per.ee)); + TERR("CH%2d REFRP 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(arg1, + gsi_ctx->per.ee)); + TERR("CH%2d REFWP 0x%x\n", arg1, val); + if (gsi_ctx->per.ver >= GSI_VER_2_5) { + val = gsi_readl(gsi_ctx->base + + GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(arg1, gsi_ctx->per.ee)); + } else { + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_QOS_OFFS(arg1, gsi_ctx->per.ee)); + } + TERR("CH%2d QOS 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d SCR0 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d SCR1 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d SCR2 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d SCR3 0x%x\n", arg1, val); + + if (arg2) { + ctx = &gsi_ctx->chan[arg1]; + + if (ctx->props.ring_base_vaddr) { + for (i = 0; i < ctx->props.ring_len / 16; i++) + TERR("CH%2d (0x%08llx) %08x %08x %08x %08x\n", + arg1, ctx->props.ring_base_addr + i * 16, + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 0), + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 4), + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 8), + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 12)); + } else { + TERR("No VA supplied for chan id %u\n", arg1); + } + } + + return count; +} + +static void gsi_dump_ch_stats(struct gsi_chan_ctx *ctx) +{ + if (!ctx->allocated) + return; + + PRT_STAT("CH%2d:\n", ctx->props.ch_id); + PRT_STAT("queued=%lu compl=%lu\n", + ctx->stats.queued, + ctx->stats.completed); + PRT_STAT("cb->poll=%lu poll->cb=%lu poll_pend_irq=%lu\n", + ctx->stats.callback_to_poll, + ctx->stats.poll_to_callback, + ctx->stats.poll_pending_irq); + PRT_STAT("invalid_tre_error=%lu\n", + ctx->stats.invalid_tre_error); + PRT_STAT("poll_ok=%lu poll_empty=%lu\n", + ctx->stats.poll_ok, ctx->stats.poll_empty); + if (ctx->evtr) + PRT_STAT("compl_evt=%lu\n", + ctx->evtr->stats.completed); + + PRT_STAT("ch_below_lo=%lu\n", ctx->stats.dp.ch_below_lo); + PRT_STAT("ch_below_hi=%lu\n", ctx->stats.dp.ch_below_hi); + PRT_STAT("ch_above_hi=%lu\n", ctx->stats.dp.ch_above_hi); + PRT_STAT("time_empty=%lums\n", ctx->stats.dp.empty_time); + PRT_STAT("\n"); +} + +static ssize_t gsi_dump_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + int ch_id; + int min, max; + + if (sizeof(dbg_buff) < count + 1) + goto error; + + if (copy_from_user(dbg_buff, buf, count)) + goto error; + + dbg_buff[count] = '\0'; + + if (kstrtos32(dbg_buff, 0, &ch_id)) + goto error; + + if (ch_id == -1) { + min = 0; + max = gsi_ctx->max_ch; + } else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch || + !gsi_ctx->chan[ch_id].allocated) { + goto error; + } else { + min = ch_id; + max = ch_id + 1; + } + + for (ch_id = min; ch_id < max; ch_id++) + gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]); + + return count; +error: + TERR("Usage: echo ch_id > stats. Use -1 for all\n"); + return -EINVAL; +} + +static int gsi_dbg_create_stats_wq(void) +{ + gsi_ctx->dp_stat_wq = + create_singlethread_workqueue("gsi_stat"); + if (!gsi_ctx->dp_stat_wq) { + TERR("failed create workqueue\n"); + return -ENOMEM; + } + + return 0; +} + +static void gsi_dbg_destroy_stats_wq(void) +{ + cancel_delayed_work_sync(&gsi_update_dp_stats_work); + cancel_delayed_work_sync(&gsi_print_dp_stats_work); + flush_workqueue(gsi_ctx->dp_stat_wq); + destroy_workqueue(gsi_ctx->dp_stat_wq); + gsi_ctx->dp_stat_wq = NULL; +} + +static ssize_t gsi_enable_dp_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + int ch_id; + bool enable; + int ret; + + if (sizeof(dbg_buff) < count + 1) + goto error; + + if (copy_from_user(dbg_buff, buf, count)) + goto error; + + dbg_buff[count] = '\0'; + + if (dbg_buff[0] != '+' && dbg_buff[0] != '-') + goto error; + + enable = (dbg_buff[0] == '+'); + + if (kstrtos32(dbg_buff + 1, 0, &ch_id)) + goto error; + + if (ch_id < 0 || ch_id >= gsi_ctx->max_ch || + !gsi_ctx->chan[ch_id].allocated) { + goto error; + } + + if (gsi_ctx->chan[ch_id].enable_dp_stats == enable) { + TERR("ch_%d: already enabled/disabled\n", ch_id); + return -EINVAL; + } + gsi_ctx->chan[ch_id].enable_dp_stats = enable; + + if (enable) + gsi_ctx->num_ch_dp_stats++; + else + gsi_ctx->num_ch_dp_stats--; + + if (enable) { + if (gsi_ctx->num_ch_dp_stats == 1) { + ret = gsi_dbg_create_stats_wq(); + if (ret) + return ret; + } + cancel_delayed_work_sync(&gsi_update_dp_stats_work); + queue_delayed_work(gsi_ctx->dp_stat_wq, + &gsi_update_dp_stats_work, msecs_to_jiffies(10)); + } else if (!enable && gsi_ctx->num_ch_dp_stats == 0) { + gsi_dbg_destroy_stats_wq(); + } + + return count; +error: + TERR("Usage: echo [+-]ch_id > enable_dp_stats\n"); + return -EINVAL; +} + +static ssize_t gsi_set_max_elem_dp_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + u32 ch_id; + u32 max_elem; + unsigned long missing; + char *sptr, *token; + + + if (sizeof(dbg_buff) < count + 1) + goto error; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + goto error; + + dbg_buff[count] = '\0'; + + sptr = dbg_buff; + + token = strsep(&sptr, " "); + if (!token) { + TERR("\n"); + goto error; + } + + if (kstrtou32(token, 0, &ch_id)) { + TERR("\n"); + goto error; + } + + token = strsep(&sptr, " "); + if (!token) { + /* get */ + if (kstrtou32(dbg_buff, 0, &ch_id)) + goto error; + if (ch_id >= gsi_ctx->max_ch) + goto error; + PRT_STAT("ch %d: max_re_expected=%d\n", ch_id, + gsi_ctx->chan[ch_id].props.max_re_expected); + return count; + } + if (kstrtou32(token, 0, &max_elem)) { + TERR("\n"); + goto error; + } + + TDBG("ch_id=%u max_elem=%u\n", ch_id, max_elem); + + if (ch_id >= gsi_ctx->max_ch) { + TERR("invalid chan id %u\n", ch_id); + goto error; + } + + gsi_ctx->chan[ch_id].props.max_re_expected = max_elem; + + return count; + +error: + TERR("Usage: (set) echo > max_elem_dp_stats\n"); + TERR("Usage: (get) echo > max_elem_dp_stats\n"); + return -EINVAL; +} + +static void gsi_wq_print_dp_stats(struct work_struct *work) +{ + int ch_id; + + for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) { + if (gsi_ctx->chan[ch_id].print_dp_stats) + gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]); + } + + queue_delayed_work(gsi_ctx->dp_stat_wq, &gsi_print_dp_stats_work, + msecs_to_jiffies(1000)); +} + +static void gsi_dbg_update_ch_dp_stats(struct gsi_chan_ctx *ctx) +{ + uint16_t start_hw; + uint16_t end_hw; + uint64_t rp_hw; + uint64_t wp_hw; + int ee = gsi_ctx->per.ee; + uint16_t used_hw; + + rp_hw = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee)); + rp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) + << 32; + + wp_hw = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee)); + wp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) + << 32; + + start_hw = gsi_find_idx_from_addr(&ctx->ring, rp_hw); + end_hw = gsi_find_idx_from_addr(&ctx->ring, wp_hw); + + if (end_hw >= start_hw) + used_hw = end_hw - start_hw; + else + used_hw = ctx->ring.max_num_elem + 1 - (start_hw - end_hw); + + TDBG("ch %d used %d\n", ctx->props.ch_id, used_hw); + gsi_update_ch_dp_stats(ctx, used_hw); +} + +static void gsi_wq_update_dp_stats(struct work_struct *work) +{ + int ch_id; + + for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) { + if (gsi_ctx->chan[ch_id].allocated && + gsi_ctx->chan[ch_id].enable_dp_stats) + gsi_dbg_update_ch_dp_stats(&gsi_ctx->chan[ch_id]); + } + + queue_delayed_work(gsi_ctx->dp_stat_wq, &gsi_update_dp_stats_work, + msecs_to_jiffies(10)); +} + + +static ssize_t gsi_rst_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + int ch_id; + int min, max; + + if (sizeof(dbg_buff) < count + 1) + goto error; + + if (copy_from_user(dbg_buff, buf, count)) + goto error; + + dbg_buff[count] = '\0'; + + if (kstrtos32(dbg_buff, 0, &ch_id)) + goto error; + + if (ch_id == -1) { + min = 0; + max = gsi_ctx->max_ch; + } else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch || + !gsi_ctx->chan[ch_id].allocated) { + goto error; + } else { + min = ch_id; + max = ch_id + 1; + } + + for (ch_id = min; ch_id < max; ch_id++) + memset(&gsi_ctx->chan[ch_id].stats, 0, + sizeof(gsi_ctx->chan[ch_id].stats)); + + return count; +error: + TERR("Usage: echo ch_id > rst_stats. Use -1 for all\n"); + return -EINVAL; +} + +static ssize_t gsi_print_dp_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + int ch_id; + bool enable; + int ret; + + if (sizeof(dbg_buff) < count + 1) + goto error; + + if (copy_from_user(dbg_buff, buf, count)) + goto error; + + dbg_buff[count] = '\0'; + + if (dbg_buff[0] != '+' && dbg_buff[0] != '-') + goto error; + + enable = (dbg_buff[0] == '+'); + + if (kstrtos32(dbg_buff + 1, 0, &ch_id)) + goto error; + + if (ch_id < 0 || ch_id >= gsi_ctx->max_ch || + !gsi_ctx->chan[ch_id].allocated) { + goto error; + } + + if (gsi_ctx->chan[ch_id].print_dp_stats == enable) { + TERR("ch_%d: already enabled/disabled\n", ch_id); + return -EINVAL; + } + gsi_ctx->chan[ch_id].print_dp_stats = enable; + + if (enable) + gsi_ctx->num_ch_dp_stats++; + else + gsi_ctx->num_ch_dp_stats--; + + if (enable) { + if (gsi_ctx->num_ch_dp_stats == 1) { + ret = gsi_dbg_create_stats_wq(); + if (ret) + return ret; + } + cancel_delayed_work_sync(&gsi_print_dp_stats_work); + queue_delayed_work(gsi_ctx->dp_stat_wq, + &gsi_print_dp_stats_work, msecs_to_jiffies(10)); + } else if (!enable && gsi_ctx->num_ch_dp_stats == 0) { + gsi_dbg_destroy_stats_wq(); + } + + return count; +error: + TERR("Usage: echo [+-]ch_id > print_dp_stats\n"); + return -EINVAL; +} + +static ssize_t gsi_enable_ipc_low(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EINVAL; + + mutex_lock(&gsi_ctx->mlock); + if (option) { + if (!gsi_ipc_logbuf_low) { + gsi_ipc_logbuf_low = + ipc_log_context_create(GSI_IPC_LOG_PAGES, + "gsi_low", 0); + if (gsi_ipc_logbuf_low == NULL) + TERR("failed to get ipc_logbuf_low\n"); + } + gsi_ctx->ipc_logbuf_low = gsi_ipc_logbuf_low; + } else { + gsi_ctx->ipc_logbuf_low = NULL; + } + mutex_unlock(&gsi_ctx->mlock); + + return count; +} + +const struct file_operations gsi_ev_dump_ops = { + .write = gsi_dump_evt, +}; + +const struct file_operations gsi_ch_dump_ops = { + .write = gsi_dump_ch, +}; + +const struct file_operations gsi_stats_ops = { + .write = gsi_dump_stats, +}; + +const struct file_operations gsi_enable_dp_stats_ops = { + .write = gsi_enable_dp_stats, +}; + +const struct file_operations gsi_max_elem_dp_stats_ops = { + .write = gsi_set_max_elem_dp_stats, +}; + +const struct file_operations gsi_rst_stats_ops = { + .write = gsi_rst_stats, +}; + +const struct file_operations gsi_print_dp_stats_ops = { + .write = gsi_print_dp_stats, +}; + +const struct file_operations gsi_ipc_low_ops = { + .write = gsi_enable_ipc_low, +}; + +void gsi_debugfs_init(void) +{ + static struct dentry *dfile; + const mode_t write_only_mode = 0220; + + dent = debugfs_create_dir("gsi", 0); + if (IS_ERR(dent)) { + TERR("fail to create dir\n"); + return; + } + + dfile = debugfs_create_file("ev_dump", write_only_mode, + dent, 0, &gsi_ev_dump_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create ev_dump file\n"); + goto fail; + } + + dfile = debugfs_create_file("ch_dump", write_only_mode, + dent, 0, &gsi_ch_dump_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create ch_dump file\n"); + goto fail; + } + + dfile = debugfs_create_file("stats", write_only_mode, dent, + 0, &gsi_stats_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create stats file\n"); + goto fail; + } + + dfile = debugfs_create_file("enable_dp_stats", write_only_mode, dent, + 0, &gsi_enable_dp_stats_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create stats file\n"); + goto fail; + } + + dfile = debugfs_create_file("max_elem_dp_stats", write_only_mode, + dent, 0, &gsi_max_elem_dp_stats_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create stats file\n"); + goto fail; + } + + dfile = debugfs_create_file("rst_stats", write_only_mode, + dent, 0, &gsi_rst_stats_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create stats file\n"); + goto fail; + } + + dfile = debugfs_create_file("print_dp_stats", + write_only_mode, dent, 0, &gsi_print_dp_stats_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create stats file\n"); + goto fail; + } + + dfile = debugfs_create_file("ipc_low", write_only_mode, + dent, 0, &gsi_ipc_low_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("could not create ipc_low\n"); + goto fail; + } + + return; +fail: + debugfs_remove_recursive(dent); +} + diff --git a/drivers/platform/msm/gsi/gsi_emulation.c b/drivers/platform/msm/gsi/gsi_emulation.c new file mode 100644 index 000000000000..8a1d6db0022f --- /dev/null +++ b/drivers/platform/msm/gsi/gsi_emulation.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include "gsi_emulation.h" + +/* + * ***************************************************************************** + * The following used to set up the EMULATION interrupt controller... + * ***************************************************************************** + */ +int setup_emulator_cntrlr( + void __iomem *intcntrlr_base, + u32 intcntrlr_mem_size) +{ + uint32_t val, ver, intrCnt, rangeCnt, range; + + val = gsi_emu_readl(intcntrlr_base + GE_INT_CTL_VER_CNT); + + intrCnt = val & 0xFFFF; + ver = (val >> 16) & 0xFFFF; + rangeCnt = intrCnt / 32; + + GSIDBG( + "CTL_VER_CNT reg val(0x%x) intr cnt(%u) cntrlr ver(0x%x) rangeCnt(%u)\n", + val, intrCnt, ver, rangeCnt); + + /* + * Verify the interrupt controller version + */ + if (ver == 0 || ver == 0xFFFF || ver < DEO_IC_INT_CTL_VER_MIN) { + GSIERR( + "Error: invalid interrupt controller version 0x%x\n", + ver); + return -GSI_STATUS_INVALID_PARAMS; + } + + /* + * Verify the interrupt count + * + * NOTE: intrCnt must be at least one block and multiple of 32 + */ + if ((intrCnt % 32) != 0) { + GSIERR( + "Invalid interrupt count read from HW 0x%04x\n", + intrCnt); + return -GSI_STATUS_ERROR; + } + + /* + * Calculate number of ranges used, each range handles 32 int lines + */ + if (rangeCnt > DEO_IC_MAX_RANGE_CNT) { + GSIERR( + "SW interrupt limit(%u) passed, increase DEO_IC_MAX_RANGE_CNT(%u)\n", + rangeCnt, + DEO_IC_MAX_RANGE_CNT); + return -GSI_STATUS_ERROR; + } + + /* + * Let's take the last register offset minus the first + * register offset (ie. range) and compare it to the interrupt + * controller's dtsi defined memory size. The range better + * fit within the size. + */ + val = GE_SOFT_INT_n(rangeCnt-1) - GE_INT_CTL_VER_CNT; + if (val > intcntrlr_mem_size) { + GSIERR( + "Interrupt controller register range (%u) exceeds dtsi provisioned size (%u)\n", + val, intcntrlr_mem_size); + return -GSI_STATUS_ERROR; + } + + /* + * The following will disable the emulators interrupt controller, + * so that we can config it... + */ + GSIDBG("Writing GE_INT_MASTER_ENABLE\n"); + gsi_emu_writel( + 0x0, + intcntrlr_base + GE_INT_MASTER_ENABLE); + + /* + * Init register maps of all ranges + */ + for (range = 0; range < rangeCnt; range++) { + /* + * Disable all int sources by setting all enable clear bits + */ + GSIDBG("Writing GE_INT_ENABLE_CLEAR_n(%u)\n", range); + gsi_emu_writel( + 0xFFFFFFFF, + intcntrlr_base + GE_INT_ENABLE_CLEAR_n(range)); + + /* + * Clear all raw statuses + */ + GSIDBG("Writing GE_INT_CLEAR_n(%u)\n", range); + gsi_emu_writel( + 0xFFFFFFFF, + intcntrlr_base + GE_INT_CLEAR_n(range)); + + /* + * Init all int types + */ + GSIDBG("Writing GE_INT_TYPE_n(%u)\n", range); + gsi_emu_writel( + 0x0, + intcntrlr_base + GE_INT_TYPE_n(range)); + } + + /* + * The following tells the interrupt controller to interrupt us + * when it sees interupts from ipa and/or gsi. + * + * Interrupts: + * =================================================================== + * DUT0 [ 63 : 16 ] + * ipa_irq [ 3 : 0 ] <---HERE + * ipa_gsi_bam_irq [ 7 : 4 ] <---HERE + * ipa_bam_apu_sec_error_irq [ 8 ] + * ipa_bam_apu_non_sec_error_irq [ 9 ] + * ipa_bam_xpu2_msa_intr [ 10 ] + * ipa_vmidmt_nsgcfgirpt [ 11 ] + * ipa_vmidmt_nsgirpt [ 12 ] + * ipa_vmidmt_gcfgirpt [ 13 ] + * ipa_vmidmt_girpt [ 14 ] + * bam_xpu3_qad_non_secure_intr_sp [ 15 ] + */ + GSIDBG("Writing GE_INT_ENABLE_n(0)\n"); + gsi_emu_writel( + 0x00FF, /* See <---HERE above */ + intcntrlr_base + GE_INT_ENABLE_n(0)); + + /* + * The following will enable the IC post config... + */ + GSIDBG("Writing GE_INT_MASTER_ENABLE\n"); + gsi_emu_writel( + 0x1, + intcntrlr_base + GE_INT_MASTER_ENABLE); + + return 0; +} + +/* + * ***************************************************************************** + * The following for EMULATION hard irq... + * ***************************************************************************** + */ +irqreturn_t emulator_hard_irq_isr( + int irq, + void *ctxt) +{ + struct gsi_ctx *gsi_ctx_ptr = (struct gsi_ctx *) ctxt; + + uint32_t val; + + val = gsi_emu_readl(gsi_ctx_ptr->intcntrlr_base + GE_INT_MASTER_STATUS); + + /* + * If bit zero is set, interrupt is for us, hence return IRQ_NONE + * when it's not set... + */ + if (!(val & 0x00000001)) + return IRQ_NONE; + + /* + * The following will mask (ie. turn off) future interrupts from + * the emulator's interrupt controller. It wil stay this way until + * we turn back on...which will be done in the bottom half + * (ie. emulator_soft_irq_isr)... + */ + gsi_emu_writel( + 0x0, + gsi_ctx_ptr->intcntrlr_base + GE_INT_OUT_ENABLE); + + return IRQ_WAKE_THREAD; +} + +/* + * ***************************************************************************** + * The following for EMULATION soft irq... + * ***************************************************************************** + */ +irqreturn_t emulator_soft_irq_isr( + int irq, + void *ctxt) +{ + struct gsi_ctx *gsi_ctx_ptr = (struct gsi_ctx *) ctxt; + + irqreturn_t retVal = IRQ_HANDLED; + uint32_t val; + + val = gsi_emu_readl(gsi_ctx_ptr->intcntrlr_base + GE_IRQ_STATUS_n(0)); + + GSIDBG("Got irq(%d) with status(0x%08X)\n", irq, val); + + if (val & 0xF0 && gsi_ctx_ptr->intcntrlr_gsi_isr) { + GSIDBG("Got gsi interrupt\n"); + retVal = gsi_ctx_ptr->intcntrlr_gsi_isr(irq, ctxt); + } + + if (val & 0x0F && gsi_ctx_ptr->intcntrlr_client_isr) { + GSIDBG("Got ipa interrupt\n"); + retVal = gsi_ctx_ptr->intcntrlr_client_isr(irq, 0); + } + + /* + * The following will clear the interrupts... + */ + gsi_emu_writel( + 0xFFFFFFFF, + gsi_ctx_ptr->intcntrlr_base + GE_INT_CLEAR_n(0)); + + /* + * The following will unmask (ie. turn on) future interrupts from + * the emulator's interrupt controller... + */ + gsi_emu_writel( + 0x1, + gsi_ctx_ptr->intcntrlr_base + GE_INT_OUT_ENABLE); + + return retVal; +} diff --git a/drivers/platform/msm/gsi/gsi_emulation.h b/drivers/platform/msm/gsi/gsi_emulation.h new file mode 100644 index 000000000000..837b58434949 --- /dev/null +++ b/drivers/platform/msm/gsi/gsi_emulation.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#if !defined(_GSI_EMULATION_H_) +# define _GSI_EMULATION_H_ + +# include + +# include "gsi.h" +# include "gsi_reg.h" +# include "gsi_emulation_stubs.h" + +# define gsi_emu_readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) +# define gsi_emu_writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); }) + +# define CNTRLR_BASE 0 + +/* + * The following file contains definitions and declarations that are + * germane only to the IPA emulation system, which is run from an X86 + * environment. Declaration's for non-X86 (ie. arm) are merely stubs + * to facilitate compile and link. + * + * Interrupt controller registers. + * Descriptions taken from the EMULATION interrupt controller SWI. + * - There is only one Master Enable register + * - Each group of 32 interrupt lines (range) is controlled by 8 registers, + * which are consecutive in memory: + * GE_INT_ENABLE_n + * GE_INT_ENABLE_CLEAR_n + * GE_INT_ENABLE_SET_n + * GE_INT_TYPE_n + * GE_IRQ_STATUS_n + * GE_RAW_STATUS_n + * GE_INT_CLEAR_n + * GE_SOFT_INT_n + * - After the above 8 registers, there are the registers of the next + * group (range) of 32 interrupt lines, and so on. + */ + +/** @brief The interrupt controller version and interrupt count register. + * Specifies interrupt controller version (upper 16 bits) and the + * number of interrupt lines supported by HW (lower 16 bits). + */ +# define GE_INT_CTL_VER_CNT \ + (CNTRLR_BASE + 0x0000) + +/** @brief Enable or disable physical IRQ output signal to the system, + * not affecting any status registers. + * + * 0x0 : DISABLE IRQ output disabled + * 0x1 : ENABLE IRQ output enabled + */ +# define GE_INT_OUT_ENABLE \ + (CNTRLR_BASE + 0x0004) + +/** @brief The IRQ master enable register. + * Bit #0: IRQ_ENABLE, set 0 to disable, 1 to enable. + */ +# define GE_INT_MASTER_ENABLE \ + (CNTRLR_BASE + 0x0008) + +# define GE_INT_MASTER_STATUS \ + (CNTRLR_BASE + 0x000C) + +/** @brief Each bit disables (bit=0, default) or enables (bit=1) the + * corresponding interrupt source + */ +# define GE_INT_ENABLE_n(n) \ + (CNTRLR_BASE + 0x0010 + 0x20 * (n)) + +/** @brief Write bit=1 to clear (to 0) the corresponding bit(s) in INT_ENABLE. + * Does nothing for bit=0 + */ +# define GE_INT_ENABLE_CLEAR_n(n) \ + (CNTRLR_BASE + 0x0014 + 0x20 * (n)) + +/** @brief Write bit=1 to set (to 1) the corresponding bit(s) in INT_ENABLE. + * Does nothing for bit=0 + */ +# define GE_INT_ENABLE_SET_n(n) \ + (CNTRLR_BASE + 0x0018 + 0x20 * (n)) + +/** @brief Select level (bit=0, default) or edge (bit=1) sensitive input + * detection logic for each corresponding interrupt source + */ +# define GE_INT_TYPE_n(n) \ + (CNTRLR_BASE + 0x001C + 0x20 * (n)) + +/** @brief Shows the interrupt sources captured in RAW_STATUS that have been + * steered to irq_n by INT_SELECT. Interrupts must also be enabled by + * INT_ENABLE and MASTER_ENABLE. Read only register. + * Bit values: 1=active, 0=inactive + */ +# define GE_IRQ_STATUS_n(n) \ + (CNTRLR_BASE + 0x0020 + 0x20 * (n)) + +/** @brief Shows the interrupt sources that have been latched by the input + * logic of the Interrupt Controller. Read only register. + * Bit values: 1=active, 0=inactive + */ +# define GE_RAW_STATUS_n(n) \ + (CNTRLR_BASE + 0x0024 + 0x20 * (n)) + +/** @brief Write bit=1 to clear the corresponding bit(s) in RAW_STATUS. + * Does nothing for bit=0 + */ +# define GE_INT_CLEAR_n(n) \ + (CNTRLR_BASE + 0x0028 + 0x20 * (n)) + +/** @brief Write bit=1 to set the corresponding bit(s) in RAW_STATUS. + * Does nothing for bit=0. + * @note Only functional for edge detected interrupts + */ +# define GE_SOFT_INT_n(n) \ + (CNTRLR_BASE + 0x002C + 0x20 * (n)) + +/** @brief Maximal number of ranges in SW. Each range supports 32 interrupt + * lines. If HW is extended considerably, increase this value + */ +# define DEO_IC_MAX_RANGE_CNT 8 + +/** @brief Size of the registers of one range in memory, in bytes */ +# define DEO_IC_RANGE_MEM_SIZE 32 /* SWI: 8 registers, no gaps */ + +/** @brief Minimal Interrupt controller HW version */ +# define DEO_IC_INT_CTL_VER_MIN 0x0102 + + +#if defined(CONFIG_IPA_EMULATION) /* declarations to follow */ + +/* + * ***************************************************************************** + * The following used to set up the EMULATION interrupt controller... + * ***************************************************************************** + */ +int setup_emulator_cntrlr( + void __iomem *intcntrlr_base, + u32 intcntrlr_mem_size); + +/* + * ***************************************************************************** + * The following for EMULATION hard irq... + * ***************************************************************************** + */ +irqreturn_t emulator_hard_irq_isr( + int irq, + void *ctxt); + +/* + * ***************************************************************************** + * The following for EMULATION soft irq... + * ***************************************************************************** + */ +irqreturn_t emulator_soft_irq_isr( + int irq, + void *ctxt); + +# else /* #if !defined(CONFIG_IPA_EMULATION) then definitions to follow */ + +static inline int setup_emulator_cntrlr( + void __iomem *intcntrlr_base, + u32 intcntrlr_mem_size) +{ + return 0; +} + +static inline irqreturn_t emulator_hard_irq_isr( + int irq, + void *ctxt) +{ + return IRQ_NONE; +} + +static inline irqreturn_t emulator_soft_irq_isr( + int irq, + void *ctxt) +{ + return IRQ_HANDLED; +} + +# endif /* #if defined(CONFIG_IPA_EMULATION) */ + +#endif /* #if !defined(_GSI_EMULATION_H_) */ diff --git a/drivers/platform/msm/gsi/gsi_emulation_stubs.h b/drivers/platform/msm/gsi/gsi_emulation_stubs.h new file mode 100644 index 000000000000..0b74c0f00ae2 --- /dev/null +++ b/drivers/platform/msm/gsi/gsi_emulation_stubs.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#if !defined(_GSI_EMULATION_STUBS_H_) +# define _GSI_EMULATION_STUBS_H_ + +# include +# define __iormb() rmb() /* used in gsi.h */ +# define __iowmb() wmb() /* used in gsi.h */ + +#endif /* #if !defined(_GSI_EMULATION_STUBS_H_) */ diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h new file mode 100644 index 000000000000..743acdb9512c --- /dev/null +++ b/drivers/platform/msm/gsi/gsi_reg.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef __GSI_REG_H__ +#define __GSI_REG_H__ + +enum gsi_register_ver { + GSI_REGISTER_VER_1 = 0, + GSI_REGISTER_VER_2 = 1, + GSI_REGISTER_MAX, +}; + +#ifdef GSI_REGISTER_VER_CURRENT +#error GSI_REGISTER_VER_CURRENT already defined +#endif + +#ifdef CONFIG_GSI_REGISTER_VERSION_2 +#include "gsi_reg_v2.h" +#define GSI_REGISTER_VER_CURRENT GSI_REGISTER_VER_2 +#endif + +/* The default is V1 */ +#ifndef GSI_REGISTER_VER_CURRENT +#include "gsi_reg_v1.h" +#define GSI_REGISTER_VER_CURRENT GSI_REGISTER_VER_1 +#endif + +#endif /* __GSI_REG_H__ */ diff --git a/drivers/platform/msm/gsi/gsi_reg_v1.h b/drivers/platform/msm/gsi/gsi_reg_v1.h new file mode 100644 index 000000000000..15bbcf2883de --- /dev/null +++ b/drivers/platform/msm/gsi/gsi_reg_v1.h @@ -0,0 +1,1058 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef __GSI_REG_V1_H__ +#define __GSI_REG_V1_H__ + +#define GSI_GSI_REG_BASE_OFFS 0 + +#define GSI_GSI_CFG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000000) +#define GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_BMSK 0xf00 +#define GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_SHFT 0x8 +#define GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20 +#define GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5 +#define GSI_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10 +#define GSI_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4 +#define GSI_GSI_CFG_UC_IS_MCS_BMSK 0x8 +#define GSI_GSI_CFG_UC_IS_MCS_SHFT 0x3 +#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4 +#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT 0x2 +#define GSI_GSI_CFG_MCS_ENABLE_BMSK 0x2 +#define GSI_GSI_CFG_MCS_ENABLE_SHFT 0x1 +#define GSI_GSI_CFG_GSI_ENABLE_BMSK 0x1 +#define GSI_GSI_CFG_GSI_ENABLE_SHFT 0x0 + +#define GSI_GSI_MCS_CFG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000B000) +#define GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK 0x1 +#define GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT 0x0 + +#define GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000018) +#define GSI_GSI_PERIPH_BASE_ADDR_LSB_RMSK 0xffffffff +#define GSI_GSI_PERIPH_BASE_ADDR_LSB_BASE_ADDR_BMSK 0xffffffff +#define GSI_GSI_PERIPH_BASE_ADDR_LSB_BASE_ADDR_SHFT 0x0 + +#define GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000001c) +#define GSI_GSI_PERIPH_BASE_ADDR_MSB_RMSK 0xffffffff +#define GSI_GSI_PERIPH_BASE_ADDR_MSB_BASE_ADDR_BMSK 0xffffffff +#define GSI_GSI_PERIPH_BASE_ADDR_MSB_BASE_ADDR_SHFT 0x0 + +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000a0) +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000a4) +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000a8) +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000ac) +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000b0) +#define GSI_IC_GEN_INT_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_GEN_INT_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000b4) +#define GSI_IC_GEN_INT_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_GEN_INT_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000b8) +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000bc) +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000c0) +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000c4) +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000c8) +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000cc) +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000d0) +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000d4) +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000d8) +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000dc) +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_READ_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000e0) +#define GSI_IC_READ_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_READ_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_READ_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_READ_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_READ_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_READ_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_READ_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_READ_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_READ_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_READ_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_READ_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_READ_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000e4) +#define GSI_IC_READ_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_READ_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_READ_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_READ_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_READ_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_READ_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_READ_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_READ_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_READ_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_WRITE_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000e8) +#define GSI_IC_WRITE_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_WRITE_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_WRITE_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_WRITE_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_WRITE_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_WRITE_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_WRITE_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_WRITE_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_WRITE_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_WRITE_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_WRITE_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_WRITE_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000ec) +#define GSI_IC_WRITE_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_WRITE_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_WRITE_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_WRITE_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_WRITE_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_WRITE_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_WRITE_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_WRITE_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_WRITE_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000f0) +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000f4) +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_CH_CMD_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000400) +#define GSI_GSI_IRAM_PTR_CH_CMD_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000404) +#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_SHFT 0x0 + +#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000408) +#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_RMSK 0xfff +#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_IRAM_PTR_BMSK 0xfff +#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_CH_DB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000418) +#define GSI_GSI_IRAM_PTR_CH_DB_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_DB_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_DB_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_EV_DB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000041c) +#define GSI_GSI_IRAM_PTR_EV_DB_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_EV_DB_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_EV_DB_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_NEW_RE_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000420) +#define GSI_GSI_IRAM_PTR_NEW_RE_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000424) +#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000428) +#define GSI_GSI_IRAM_PTR_CH_EMPTY_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000042c) +#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000430) +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000434) +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000438) +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000043c) +#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000440) +#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000444) +#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000448) +#define GSI_GSI_IRAM_PTR_UC_GP_INT_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_SHFT 0x0 + +/* Real H/W register name is with STOPPED with single P */ +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000044c) +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_INST_RAM_n_WORD_SZ 0x4 +#define GSI_GSI_INST_RAM_n_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00004000 + GSI_GSI_INST_RAM_n_WORD_SZ * (n)) +#define GSI_V2_5_GSI_INST_RAM_n_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001b000 + GSI_GSI_INST_RAM_n_WORD_SZ * (n)) +#define GSI_GSI_INST_RAM_n_RMSK 0xffffffff +#define GSI_GSI_INST_RAM_n_MAXn 4095 +#define GSI_V2_0_GSI_INST_RAM_n_MAXn 6143 +#define GSI_V2_2_GSI_INST_RAM_n_MAXn 4095 +#define GSI_V2_5_GSI_INST_RAM_n_MAXn 8191 +#define GSI_GSI_INST_RAM_n_INST_BYTE_3_BMSK 0xff000000 +#define GSI_GSI_INST_RAM_n_INST_BYTE_3_SHFT 0x18 +#define GSI_GSI_INST_RAM_n_INST_BYTE_2_BMSK 0xff0000 +#define GSI_GSI_INST_RAM_n_INST_BYTE_2_SHFT 0x10 +#define GSI_GSI_INST_RAM_n_INST_BYTE_1_BMSK 0xff00 +#define GSI_GSI_INST_RAM_n_INST_BYTE_1_SHFT 0x8 +#define GSI_GSI_INST_RAM_n_INST_BYTE_0_BMSK 0xff +#define GSI_GSI_INST_RAM_n_INST_BYTE_0_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c000 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT 0x14 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK 0x7c000 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT 0xe +#define GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK 0x2000 +#define GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT 0xd +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHID_BMSK 0x1f00 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHID_SHFT 0x8 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_EE_BMSK 0xf0 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_EE_SHFT 0x4 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK 0x8 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT 0x3 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK 0x7 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c004 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff +#define GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c008 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c00c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c010 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c014 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c018 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c01c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c054 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK 0xffff +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXk 30 +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXn 3 +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_BMSK 0xffff +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c058 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK 0xffff +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXk 30 +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXn 3 +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_BMSK 0xffff +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_QOS_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c05c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_QOS_RMSK 0x303 +#define GSI_EE_n_GSI_CH_k_QOS_MAXk 30 +#define GSI_EE_n_GSI_CH_k_QOS_MAXn 3 +#define GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_BMSK 0x400 +#define GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_SHFT 0xa +#define GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200 +#define GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9 +#define GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100 +#define GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8 +#define GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf +#define GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0 + +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f05c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK 0xff0000 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT 0x10 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK 0x3c00 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT 0xa +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0 + + +#define GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c060 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c064 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c068 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c06c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d000 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000 +#define GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18 +#define GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000 +#define GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT 0x14 +#define GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK 0x10000 +#define GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT 0x10 +#define GSI_EE_n_EV_CH_k_CNTXT_0_EVCHID_BMSK 0xff00 +#define GSI_EE_n_EV_CH_k_CNTXT_0_EVCHID_SHFT 0x8 +#define GSI_EE_n_EV_CH_k_CNTXT_0_EE_BMSK 0xf0 +#define GSI_EE_n_EV_CH_k_CNTXT_0_EE_SHFT 0x4 +#define GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK 0xf +#define GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d004 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff +#define GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d008 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d00c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d010 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d014 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d018 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d01c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d020 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_BMSK 0xff000000 +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_SHFT 0x18 +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK 0xff0000 +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT 0x10 +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK 0xffff +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d024 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d028 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d02c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d030 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d034 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d048 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d04c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001e000 + 0x4000 * (n) + 0x8 * (k)) +#define GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001e004 + 0x4000 * (n) + 0x8 * (k)) +#define GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001e100 + 0x4000 * (n) + 0x8 * (k)) +#define GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001e104 + 0x4000 * (n) + 0x8 * (k)) +#define GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_GSI_STATUS_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f000 + 0x4000 * (n)) +#define GSI_EE_n_GSI_STATUS_ENABLED_BMSK 0x1 +#define GSI_EE_n_GSI_STATUS_ENABLED_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_CMD_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f008 + 0x4000 * (n)) +#define GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK 0xff000000 +#define GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT 0x18 +#define GSI_EE_n_GSI_CH_CMD_CHID_BMSK 0xff +#define GSI_EE_n_GSI_CH_CMD_CHID_SHFT 0x0 + +#define GSI_EE_n_EV_CH_CMD_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f010 + 0x4000 * (n)) +#define GSI_EE_n_EV_CH_CMD_OPCODE_BMSK 0xff000000 +#define GSI_EE_n_EV_CH_CMD_OPCODE_SHFT 0x18 +#define GSI_EE_n_EV_CH_CMD_CHID_BMSK 0xff +#define GSI_EE_n_EV_CH_CMD_CHID_SHFT 0x0 + +#define GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f018 + 0x4000 * (n)) +#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0x1f +#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0 +#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK 0x3e0 +#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT 0x5 +#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK 0x3c00 +#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT 0xa + +#define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a +#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0 + +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n)) +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0 + +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_MAXn 2 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1 + +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_MAXn 2 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3 + +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_BMSK 0x8000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_SHFT 0xf +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_BMSK 0x4000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_SHFT 0xE +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_MAXn 2 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3 + +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012040 + 0x4000 * (n)) +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_BMSK 0x8000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_SHFT 0xf +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_BMSK 0x4000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_SHFT 0xE +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_MAXn 2 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_N_HALF_KB_FVAL 0x4 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_FOUR_KB_FVAL 0x5 + +#define GSI_EE_n_GSI_SW_VERSION_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f044 + 0x4000 * (n)) +#define GSI_EE_n_GSI_SW_VERSION_MAJOR_BMSK 0xf0000000 +#define GSI_EE_n_GSI_SW_VERSION_MAJOR_SHFT 0x1c +#define GSI_EE_n_GSI_SW_VERSION_MINOR_BMSK 0xfff0000 +#define GSI_EE_n_GSI_SW_VERSION_MINOR_SHFT 0x10 +#define GSI_EE_n_GSI_SW_VERSION_STEP_BMSK 0xffff +#define GSI_EE_n_GSI_SW_VERSION_STEP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f080 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK 0x40 +#define GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_SHFT 0x6 +#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK 0x20 +#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_SHFT 0x5 +#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK 0x10 +#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_SHFT 0x4 +#define GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK 0x8 +#define GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_SHFT 0x3 +#define GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK 0x4 +#define GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_SHFT 0x2 +#define GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK 0x2 +#define GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_SHFT 0x1 +#define GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK 0x1 +#define GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_SHFT 0x0 + +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f088 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_BMSK 0x40 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_SHFT 0x6 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_BMSK 0x20 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_SHFT 0x5 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_BMSK 0x10 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_SHFT 0x4 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_BMSK 0x8 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_SHFT 0x3 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_BMSK 0x4 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_SHFT 0x2 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_BMSK 0x2 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_SHFT 0x1 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_BMSK 0x1 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f090 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f094 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f098 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK 0x1ffff +#define GSI_V2_5_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK 0x7fffff +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0 +#define GSI_V2_5_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f09c + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfff +#define GSI_V2_5_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 +#define GSI_V2_5_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f0a0 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f0a4 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f0b0 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f0b8 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfff +#define GSI_V2_5_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 +#define GSI_V2_5_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f0c0 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f100 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_BMSK 0x8 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_SHFT 0x3 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_BMSK 0x4 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_SHFT 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_BMSK 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_SHFT 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f108 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK 0x8 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_SHFT 0x3 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK 0x4 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_SHFT 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_SHFT 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_ERROR_INT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_ERROR_INT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f110 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT3_BMSK 0x8 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT3_SHFT 0x3 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT2_BMSK 0x4 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT2_SHFT 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT1_BMSK 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT1_SHFT 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_ERROR_INT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_ERROR_INT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f118 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_BMSK 0x8 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_SHFT 0x3 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_BMSK 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_SHFT 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f120 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_MCS_STACK_OVRFLOW_BMSK 0x8 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_MCS_STACK_OVRFLOW_SHFT 0x3 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BUS_ERROR_BMSK 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BUS_ERROR_SHFT 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BREAK_POINT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BREAK_POINT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f128 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK 0x8 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_SHFT 0x3 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_SHFT 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_INTSET_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f180 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_INTSET_INTYPE_BMSK 0x1 +#define GSI_EE_n_CNTXT_INTSET_INTYPE_SHFT 0x0 + +#define GSI_EE_n_ERROR_LOG_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f200 + 0x4000 * (n)) +#define GSI_EE_n_ERROR_LOG_TODO_BMSK 0xffffffff +#define GSI_EE_n_ERROR_LOG_TODO_SHFT 0x0 + +#define GSI_EE_n_ERROR_LOG_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f210 + 0x4000 * (n)) +#define GSI_EE_n_ERROR_LOG_CLR_TODO_BMSK 0xffffffff +#define GSI_EE_n_ERROR_LOG_CLR_TODO_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SCRATCH_0_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f400 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SCRATCH_0_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SCRATCH_0_SCRATCH_SHFT 0x0 + +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c018 + 0x1000 * (n)) +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0 + +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c01c + 0x1000 * (n)) +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c028 + 0x1000 * (n)) +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0 + +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c02c + 0x1000 * (n)) +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00003800 + 0x80 * (n) + 0x4 * (k)) +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_BMSK 0x20 +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_SHFT 0x5 +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK 0x1f +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_SHFT 0x0 + +#endif /* __GSI_REG_V1_H__ */ diff --git a/drivers/platform/msm/gsi/gsi_reg_v2.h b/drivers/platform/msm/gsi/gsi_reg_v2.h new file mode 100644 index 000000000000..0f84141826d7 --- /dev/null +++ b/drivers/platform/msm/gsi/gsi_reg_v2.h @@ -0,0 +1,1058 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef __GSI_REG_V2_H__ +#define __GSI_REG_V2_H__ + +#define GSI_GSI_REG_BASE_OFFS 0 + +#define GSI_GSI_CFG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000000) +#define GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_BMSK 0xf00 +#define GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_SHFT 0x8 +#define GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20 +#define GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5 +#define GSI_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10 +#define GSI_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4 +#define GSI_GSI_CFG_UC_IS_MCS_BMSK 0x8 +#define GSI_GSI_CFG_UC_IS_MCS_SHFT 0x3 +#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4 +#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT 0x2 +#define GSI_GSI_CFG_MCS_ENABLE_BMSK 0x2 +#define GSI_GSI_CFG_MCS_ENABLE_SHFT 0x1 +#define GSI_GSI_CFG_GSI_ENABLE_BMSK 0x1 +#define GSI_GSI_CFG_GSI_ENABLE_SHFT 0x0 + +#define GSI_GSI_MCS_CFG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000B000) +#define GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK 0x1 +#define GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT 0x0 + +#define GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000018) +#define GSI_GSI_PERIPH_BASE_ADDR_LSB_RMSK 0xffffffff +#define GSI_GSI_PERIPH_BASE_ADDR_LSB_BASE_ADDR_BMSK 0xffffffff +#define GSI_GSI_PERIPH_BASE_ADDR_LSB_BASE_ADDR_SHFT 0x0 + +#define GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000001c) +#define GSI_GSI_PERIPH_BASE_ADDR_MSB_RMSK 0xffffffff +#define GSI_GSI_PERIPH_BASE_ADDR_MSB_BASE_ADDR_BMSK 0xffffffff +#define GSI_GSI_PERIPH_BASE_ADDR_MSB_BASE_ADDR_SHFT 0x0 + +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000a0) +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000a4) +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000a8) +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000ac) +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000b0) +#define GSI_IC_GEN_INT_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_GEN_INT_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000b4) +#define GSI_IC_GEN_INT_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_GEN_INT_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000b8) +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000bc) +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000c0) +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000c4) +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000c8) +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000cc) +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000d0) +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000d4) +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000d8) +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000dc) +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_READ_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000e0) +#define GSI_IC_READ_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_READ_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_READ_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_READ_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_READ_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_READ_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_READ_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_READ_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_READ_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_READ_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_READ_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_READ_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000e4) +#define GSI_IC_READ_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_READ_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_READ_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_READ_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_READ_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_READ_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_READ_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_READ_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_READ_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_WRITE_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000e8) +#define GSI_IC_WRITE_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_WRITE_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_WRITE_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_WRITE_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_WRITE_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_WRITE_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_WRITE_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_WRITE_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_WRITE_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_WRITE_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_WRITE_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_WRITE_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000ec) +#define GSI_IC_WRITE_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_WRITE_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_WRITE_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_WRITE_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_WRITE_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_WRITE_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_WRITE_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_WRITE_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_WRITE_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000f0) +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000f4) +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_CH_CMD_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000400) +#define GSI_GSI_IRAM_PTR_CH_CMD_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000404) +#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_SHFT 0x0 + +#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000408) +#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_RMSK 0xfff +#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_IRAM_PTR_BMSK 0xfff +#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_CH_DB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000418) +#define GSI_GSI_IRAM_PTR_CH_DB_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_DB_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_DB_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_EV_DB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000041c) +#define GSI_GSI_IRAM_PTR_EV_DB_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_EV_DB_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_EV_DB_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_NEW_RE_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000420) +#define GSI_GSI_IRAM_PTR_NEW_RE_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000424) +#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000428) +#define GSI_GSI_IRAM_PTR_CH_EMPTY_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000042c) +#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000430) +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000434) +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000438) +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000043c) +#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000440) +#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000444) +#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000448) +#define GSI_GSI_IRAM_PTR_UC_GP_INT_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_SHFT 0x0 + +/* Real H/W register name is with STOPPED with single P */ +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000044c) +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_INST_RAM_n_WORD_SZ 0x4 +#define GSI_GSI_INST_RAM_n_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00004000 + GSI_GSI_INST_RAM_n_WORD_SZ * (n)) +#define GSI_V2_5_GSI_INST_RAM_n_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001b000 + GSI_GSI_INST_RAM_n_WORD_SZ * (n)) +#define GSI_GSI_INST_RAM_n_RMSK 0xffffffff +#define GSI_GSI_INST_RAM_n_MAXn 4095 +#define GSI_V2_0_GSI_INST_RAM_n_MAXn 6143 +#define GSI_V2_2_GSI_INST_RAM_n_MAXn 4095 +#define GSI_V2_5_GSI_INST_RAM_n_MAXn 8191 +#define GSI_GSI_INST_RAM_n_INST_BYTE_3_BMSK 0xff000000 +#define GSI_GSI_INST_RAM_n_INST_BYTE_3_SHFT 0x18 +#define GSI_GSI_INST_RAM_n_INST_BYTE_2_BMSK 0xff0000 +#define GSI_GSI_INST_RAM_n_INST_BYTE_2_SHFT 0x10 +#define GSI_GSI_INST_RAM_n_INST_BYTE_1_BMSK 0xff00 +#define GSI_GSI_INST_RAM_n_INST_BYTE_1_SHFT 0x8 +#define GSI_GSI_INST_RAM_n_INST_BYTE_0_BMSK 0xff +#define GSI_GSI_INST_RAM_n_INST_BYTE_0_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f000 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT 0x14 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK 0x7c000 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT 0xe +#define GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK 0x2000 +#define GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT 0xd +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHID_BMSK 0x1f00 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHID_SHFT 0x8 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_EE_BMSK 0xf0 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_EE_SHFT 0x4 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK 0x8 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT 0x3 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK 0x7 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f004 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff +#define GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f008 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f00c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f010 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f014 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f018 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f01c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f054 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK 0xffff +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXk 30 +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXn 3 +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_BMSK 0xffff +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f058 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK 0xffff +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXk 30 +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXn 3 +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_BMSK 0xffff +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_QOS_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c05c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_QOS_RMSK 0x303 +#define GSI_EE_n_GSI_CH_k_QOS_MAXk 30 +#define GSI_EE_n_GSI_CH_k_QOS_MAXn 3 +#define GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_BMSK 0x400 +#define GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_SHFT 0xa +#define GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200 +#define GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9 +#define GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100 +#define GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8 +#define GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf +#define GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0 + +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f05c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK 0xff0000 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT 0x10 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK 0x3c00 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT 0xa +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0 + + +#define GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f060 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f064 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f068 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f06c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00010000 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000 +#define GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18 +#define GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000 +#define GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT 0x14 +#define GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK 0x10000 +#define GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT 0x10 +#define GSI_EE_n_EV_CH_k_CNTXT_0_EVCHID_BMSK 0xff00 +#define GSI_EE_n_EV_CH_k_CNTXT_0_EVCHID_SHFT 0x8 +#define GSI_EE_n_EV_CH_k_CNTXT_0_EE_BMSK 0xf0 +#define GSI_EE_n_EV_CH_k_CNTXT_0_EE_SHFT 0x4 +#define GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK 0xf +#define GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00010004 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff +#define GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00010008 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001000c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00010010 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00010014 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00010018 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001001c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00010020 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_BMSK 0xff000000 +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_SHFT 0x18 +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK 0xff0000 +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT 0x10 +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK 0xffff +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00010024 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00010028 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001002c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00010030 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00010034 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00010048 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001004c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00011000 + 0x4000 * (n) + 0x8 * (k)) +#define GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00011004 + 0x4000 * (n) + 0x8 * (k)) +#define GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00011100 + 0x4000 * (n) + 0x8 * (k)) +#define GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00011104 + 0x4000 * (n) + 0x8 * (k)) +#define GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_GSI_STATUS_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012000 + 0x4000 * (n)) +#define GSI_EE_n_GSI_STATUS_ENABLED_BMSK 0x1 +#define GSI_EE_n_GSI_STATUS_ENABLED_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_CMD_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012008 + 0x4000 * (n)) +#define GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK 0xff000000 +#define GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT 0x18 +#define GSI_EE_n_GSI_CH_CMD_CHID_BMSK 0xff +#define GSI_EE_n_GSI_CH_CMD_CHID_SHFT 0x0 + +#define GSI_EE_n_EV_CH_CMD_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012010 + 0x4000 * (n)) +#define GSI_EE_n_EV_CH_CMD_OPCODE_BMSK 0xff000000 +#define GSI_EE_n_EV_CH_CMD_OPCODE_SHFT 0x18 +#define GSI_EE_n_EV_CH_CMD_CHID_BMSK 0xff +#define GSI_EE_n_EV_CH_CMD_CHID_SHFT 0x0 + +#define GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012018 + 0x4000 * (n)) +#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0x1f +#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0 +#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK 0x3e0 +#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT 0x5 +#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK 0x3c00 +#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT 0xa + +#define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a +#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0 + +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n)) +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0 + +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_MAXn 2 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1 + +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_MAXn 2 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3 + +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_BMSK 0x8000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_SHFT 0xf +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_BMSK 0x4000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_SHFT 0xE +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_MAXn 2 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3 + +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012040 + 0x4000 * (n)) +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_BMSK 0x8000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_SHFT 0xf +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_BMSK 0x4000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_SHFT 0xE +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_MAXn 2 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_N_HALF_KB_FVAL 0x4 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_FOUR_KB_FVAL 0x5 + +#define GSI_EE_n_GSI_SW_VERSION_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012044 + 0x4000 * (n)) +#define GSI_EE_n_GSI_SW_VERSION_MAJOR_BMSK 0xf0000000 +#define GSI_EE_n_GSI_SW_VERSION_MAJOR_SHFT 0x1c +#define GSI_EE_n_GSI_SW_VERSION_MINOR_BMSK 0xfff0000 +#define GSI_EE_n_GSI_SW_VERSION_MINOR_SHFT 0x10 +#define GSI_EE_n_GSI_SW_VERSION_STEP_BMSK 0xffff +#define GSI_EE_n_GSI_SW_VERSION_STEP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012080 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK 0x40 +#define GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_SHFT 0x6 +#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK 0x20 +#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_SHFT 0x5 +#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK 0x10 +#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_SHFT 0x4 +#define GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK 0x8 +#define GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_SHFT 0x3 +#define GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK 0x4 +#define GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_SHFT 0x2 +#define GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK 0x2 +#define GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_SHFT 0x1 +#define GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK 0x1 +#define GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_SHFT 0x0 + +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012088 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_BMSK 0x40 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_SHFT 0x6 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_BMSK 0x20 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_SHFT 0x5 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_BMSK 0x10 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_SHFT 0x4 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_BMSK 0x8 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_SHFT 0x3 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_BMSK 0x4 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_SHFT 0x2 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_BMSK 0x2 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_SHFT 0x1 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_BMSK 0x1 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012090 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012094 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012098 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK 0x1ffff +#define GSI_V2_5_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK 0x7fffff +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0 +#define GSI_V2_5_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001209c + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfff +#define GSI_V2_5_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 +#define GSI_V2_5_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x000120a0 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x000120a4 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x000120b0 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x000120b8 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfff +#define GSI_V2_5_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 +#define GSI_V2_5_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x000120c0 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012100 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_BMSK 0x8 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_SHFT 0x3 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_BMSK 0x4 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_SHFT 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_BMSK 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_SHFT 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012108 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK 0x8 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_SHFT 0x3 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK 0x4 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_SHFT 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_SHFT 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_ERROR_INT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_ERROR_INT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012110 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT3_BMSK 0x8 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT3_SHFT 0x3 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT2_BMSK 0x4 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT2_SHFT 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT1_BMSK 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT1_SHFT 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_ERROR_INT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_ERROR_INT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012118 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_BMSK 0x8 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_SHFT 0x3 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_BMSK 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_SHFT 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012120 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_MCS_STACK_OVRFLOW_BMSK 0x8 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_MCS_STACK_OVRFLOW_SHFT 0x3 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BUS_ERROR_BMSK 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BUS_ERROR_SHFT 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BREAK_POINT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BREAK_POINT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012128 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK 0x8 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_SHFT 0x3 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_SHFT 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_INTSET_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012180 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_INTSET_INTYPE_BMSK 0x1 +#define GSI_EE_n_CNTXT_INTSET_INTYPE_SHFT 0x0 + +#define GSI_EE_n_ERROR_LOG_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012200 + 0x4000 * (n)) +#define GSI_EE_n_ERROR_LOG_TODO_BMSK 0xffffffff +#define GSI_EE_n_ERROR_LOG_TODO_SHFT 0x0 + +#define GSI_EE_n_ERROR_LOG_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012210 + 0x4000 * (n)) +#define GSI_EE_n_ERROR_LOG_CLR_TODO_BMSK 0xffffffff +#define GSI_EE_n_ERROR_LOG_CLR_TODO_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SCRATCH_0_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012400 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SCRATCH_0_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SCRATCH_0_SCRATCH_SHFT 0x0 + +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c018 + 0x1000 * (n)) +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0 + +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c01c + 0x1000 * (n)) +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c028 + 0x1000 * (n)) +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0 + +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c02c + 0x1000 * (n)) +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00003800 + 0x80 * (n) + 0x4 * (k)) +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_BMSK 0x20 +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_SHFT 0x5 +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK 0x1f +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_SHFT 0x0 + +#endif /* __GSI_REG_V2_H__ */ diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile new file mode 100644 index 000000000000..812f11e096c5 --- /dev/null +++ b/drivers/platform/msm/ipa/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/ ipa_common +obj-$(CONFIG_IPA_UT) += test/ + +ipa_common += ipa_api.o ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c new file mode 100644 index 000000000000..cb720084a152 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -0,0 +1,3492 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_api.h" + +/* + * The following for adding code (ie. for EMULATION) not found on x86. + */ +#if defined(CONFIG_IPA_EMULATION) +# include "ipa_v3/ipa_emulation_stubs.h" +#endif + +#define DRV_NAME "ipa" + +#define IPA_API_DISPATCH_RETURN(api, p...) \ + do { \ + if (!ipa_api_ctrl) { \ + pr_err("%s:%d IPA HW is not supported\n", \ + __func__, __LINE__); \ + ret = -EPERM; \ + } \ + else { \ + if (ipa_api_ctrl->api) { \ + ret = ipa_api_ctrl->api(p); \ + } else { \ + WARN(1, \ + "%s not implemented for IPA ver %d\n", \ + __func__, ipa_api_hw_type); \ + ret = -EPERM; \ + } \ + } \ + } while (0) + +#define IPA_API_DISPATCH(api, p...) \ + do { \ + if (!ipa_api_ctrl) \ + pr_err("%s:%d IPA HW is not supported\n", \ + __func__, __LINE__); \ + else { \ + if (ipa_api_ctrl->api) { \ + ipa_api_ctrl->api(p); \ + } else { \ + WARN(1, \ + "%s not implemented for IPA ver %d\n",\ + __func__, ipa_api_hw_type); \ + } \ + } \ + } while (0) + +#define IPA_API_DISPATCH_RETURN_PTR(api, p...) \ + do { \ + if (!ipa_api_ctrl) { \ + pr_err("%s:%d IPA HW is not supported\n", \ + __func__, __LINE__); \ + ret = NULL; \ + } \ + else { \ + if (ipa_api_ctrl->api) { \ + ret = ipa_api_ctrl->api(p); \ + } else { \ + WARN(1, "%s not implemented for IPA ver %d\n",\ + __func__, ipa_api_hw_type); \ + ret = NULL; \ + } \ + } \ + } while (0) + +#define IPA_API_DISPATCH_RETURN_BOOL(api, p...) \ + do { \ + if (!ipa_api_ctrl) { \ + pr_err("%s:%d IPA HW is not supported\n", \ + __func__, __LINE__); \ + ret = false; \ + } \ + else { \ + if (ipa_api_ctrl->api) { \ + ret = ipa_api_ctrl->api(p); \ + } else { \ + WARN(1, "%s not implemented for IPA ver %d\n",\ + __func__, ipa_api_hw_type); \ + ret = false; \ + } \ + } \ + } while (0) + +#if defined(CONFIG_IPA_EMULATION) +static bool running_emulation = true; +#else +static bool running_emulation; +#endif + +static enum ipa_hw_type ipa_api_hw_type; +static struct ipa_api_controller *ipa_api_ctrl; + +const char *ipa_clients_strings[IPA_CLIENT_MAX] = { + __stringify(IPA_CLIENT_HSIC1_PROD), + __stringify(IPA_CLIENT_HSIC1_CONS), + __stringify(IPA_CLIENT_HSIC2_PROD), + __stringify(IPA_CLIENT_HSIC2_CONS), + __stringify(IPA_CLIENT_HSIC3_PROD), + __stringify(IPA_CLIENT_HSIC3_CONS), + __stringify(IPA_CLIENT_HSIC4_PROD), + __stringify(IPA_CLIENT_HSIC4_CONS), + __stringify(IPA_CLIENT_HSIC5_PROD), + __stringify(IPA_CLIENT_HSIC5_CONS), + __stringify(IPA_CLIENT_WLAN1_PROD), + __stringify(IPA_CLIENT_WLAN1_CONS), + __stringify(IPA_CLIENT_A5_WLAN_AMPDU_PROD), + __stringify(IPA_CLIENT_WLAN2_CONS), + __stringify(RESERVERD_PROD_14), + __stringify(IPA_CLIENT_WLAN3_CONS), + __stringify(RESERVERD_PROD_16), + __stringify(IPA_CLIENT_WLAN4_CONS), + __stringify(IPA_CLIENT_USB_PROD), + __stringify(IPA_CLIENT_USB_CONS), + __stringify(IPA_CLIENT_USB2_PROD), + __stringify(IPA_CLIENT_USB2_CONS), + __stringify(IPA_CLIENT_USB3_PROD), + __stringify(IPA_CLIENT_USB3_CONS), + __stringify(IPA_CLIENT_USB4_PROD), + __stringify(IPA_CLIENT_USB4_CONS), + __stringify(IPA_CLIENT_UC_USB_PROD), + __stringify(IPA_CLIENT_USB_DPL_CONS), + __stringify(IPA_CLIENT_A2_EMBEDDED_PROD), + __stringify(IPA_CLIENT_A2_EMBEDDED_CONS), + __stringify(IPA_CLIENT_A2_TETHERED_PROD), + __stringify(IPA_CLIENT_A2_TETHERED_CONS), + __stringify(IPA_CLIENT_APPS_LAN_PROD), + __stringify(IPA_CLIENT_APPS_LAN_CONS), + __stringify(IPA_CLIENT_APPS_WAN_PROD), + __stringify(IPA_CLIENT_APPS_WAN_CONS), + __stringify(IPA_CLIENT_APPS_CMD_PROD), + __stringify(IPA_CLIENT_A5_LAN_WAN_CONS), + __stringify(IPA_CLIENT_ODU_PROD), + __stringify(IPA_CLIENT_ODU_EMB_CONS), + __stringify(RESERVERD_PROD_40), + __stringify(IPA_CLIENT_ODU_TETH_CONS), + __stringify(IPA_CLIENT_MHI_PROD), + __stringify(IPA_CLIENT_MHI_CONS), + __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD), + __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS), + __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD), + __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS), + __stringify(IPA_CLIENT_ETHERNET_PROD), + __stringify(IPA_CLIENT_ETHERNET_CONS), + __stringify(IPA_CLIENT_Q6_LAN_PROD), + __stringify(IPA_CLIENT_Q6_LAN_CONS), + __stringify(IPA_CLIENT_Q6_WAN_PROD), + __stringify(IPA_CLIENT_Q6_WAN_CONS), + __stringify(IPA_CLIENT_Q6_CMD_PROD), + __stringify(IPA_CLIENT_Q6_DUN_CONS), + __stringify(IPA_CLIENT_Q6_DECOMP_PROD), + __stringify(IPA_CLIENT_Q6_DECOMP_CONS), + __stringify(IPA_CLIENT_Q6_DECOMP2_PROD), + __stringify(IPA_CLIENT_Q6_DECOMP2_CONS), + __stringify(RESERVERD_PROD_60), + __stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS), + __stringify(IPA_CLIENT_TEST_PROD), + __stringify(IPA_CLIENT_TEST_CONS), + __stringify(IPA_CLIENT_TEST1_PROD), + __stringify(IPA_CLIENT_TEST1_CONS), + __stringify(IPA_CLIENT_TEST2_PROD), + __stringify(IPA_CLIENT_TEST2_CONS), + __stringify(IPA_CLIENT_TEST3_PROD), + __stringify(IPA_CLIENT_TEST3_CONS), + __stringify(IPA_CLIENT_TEST4_PROD), + __stringify(IPA_CLIENT_TEST4_CONS), + __stringify(RESERVERD_PROD_72), + __stringify(IPA_CLIENT_DUMMY_CONS), + __stringify(IPA_CLIENT_Q6_DL_NLO_DATA_PROD), + __stringify(IPA_CLIENT_Q6_UL_NLO_DATA_CONS), + __stringify(RESERVERD_PROD_76), + __stringify(IPA_CLIENT_Q6_UL_NLO_ACK_CONS), + __stringify(RESERVERD_PROD_78), + __stringify(IPA_CLIENT_Q6_QBAP_STATUS_CONS), + __stringify(RESERVERD_PROD_80), + __stringify(IPA_CLIENT_MHI_DPL_CONS), + __stringify(RESERVERD_PROD_82), + __stringify(IPA_CLIENT_ODL_DPL_CONS), + __stringify(IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD), + __stringify(IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS), +}; + +/** + * ipa_write_64() - convert 64 bit value to byte array + * @w: 64 bit integer + * @dest: byte array + * + * Return value: converted value + */ +u8 *ipa_write_64(u64 w, u8 *dest) +{ + if (unlikely(dest == NULL)) { + pr_err("%s: NULL address\n", __func__); + return dest; + } + *dest++ = (u8)((w) & 0xFF); + *dest++ = (u8)((w >> 8) & 0xFF); + *dest++ = (u8)((w >> 16) & 0xFF); + *dest++ = (u8)((w >> 24) & 0xFF); + *dest++ = (u8)((w >> 32) & 0xFF); + *dest++ = (u8)((w >> 40) & 0xFF); + *dest++ = (u8)((w >> 48) & 0xFF); + *dest++ = (u8)((w >> 56) & 0xFF); + + return dest; +} + +/** + * ipa_write_32() - convert 32 bit value to byte array + * @w: 32 bit integer + * @dest: byte array + * + * Return value: converted value + */ +u8 *ipa_write_32(u32 w, u8 *dest) +{ + if (unlikely(dest == NULL)) { + pr_err("%s: NULL address\n", __func__); + return dest; + } + *dest++ = (u8)((w) & 0xFF); + *dest++ = (u8)((w >> 8) & 0xFF); + *dest++ = (u8)((w >> 16) & 0xFF); + *dest++ = (u8)((w >> 24) & 0xFF); + + return dest; +} + +/** + * ipa_write_16() - convert 16 bit value to byte array + * @hw: 16 bit integer + * @dest: byte array + * + * Return value: converted value + */ +u8 *ipa_write_16(u16 hw, u8 *dest) +{ + if (unlikely(dest == NULL)) { + pr_err("%s: NULL address\n", __func__); + return dest; + } + *dest++ = (u8)((hw) & 0xFF); + *dest++ = (u8)((hw >> 8) & 0xFF); + + return dest; +} + +/** + * ipa_write_8() - convert 8 bit value to byte array + * @hw: 8 bit integer + * @dest: byte array + * + * Return value: converted value + */ +u8 *ipa_write_8(u8 b, u8 *dest) +{ + if (unlikely(dest == NULL)) { + WARN(1, "%s: NULL address\n", __func__); + return dest; + } + *dest++ = (b) & 0xFF; + + return dest; +} + +/** + * ipa_pad_to_64() - pad byte array to 64 bit value + * @dest: byte array + * + * Return value: padded value + */ +u8 *ipa_pad_to_64(u8 *dest) +{ + int i; + int j; + + if (unlikely(dest == NULL)) { + WARN(1, "%s: NULL address\n", __func__); + return dest; + } + + i = (long)dest & 0x7; + + if (i) + for (j = 0; j < (8 - i); j++) + *dest++ = 0; + + return dest; +} + +/** + * ipa_pad_to_32() - pad byte array to 32 bit value + * @dest: byte array + * + * Return value: padded value + */ +u8 *ipa_pad_to_32(u8 *dest) +{ + int i; + int j; + + if (unlikely(dest == NULL)) { + WARN(1, "%s: NULL address\n", __func__); + return dest; + } + + i = (long)dest & 0x7; + + if (i) + for (j = 0; j < (4 - i); j++) + *dest++ = 0; + + return dest; +} + +int ipa_smmu_store_sgt(struct sg_table **out_ch_ptr, + struct sg_table *in_sgt_ptr) +{ + unsigned int nents; + + if (in_sgt_ptr != NULL) { + *out_ch_ptr = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (*out_ch_ptr == NULL) + return -ENOMEM; + + nents = in_sgt_ptr->nents; + + (*out_ch_ptr)->sgl = + kcalloc(nents, sizeof(struct scatterlist), + GFP_KERNEL); + if ((*out_ch_ptr)->sgl == NULL) { + kfree(*out_ch_ptr); + *out_ch_ptr = NULL; + return -ENOMEM; + } + + memcpy((*out_ch_ptr)->sgl, in_sgt_ptr->sgl, + nents*sizeof((*out_ch_ptr)->sgl)); + (*out_ch_ptr)->nents = nents; + (*out_ch_ptr)->orig_nents = in_sgt_ptr->orig_nents; + } + return 0; +} + +int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr) +{ + if (*out_sgt_ptr != NULL) { + kfree((*out_sgt_ptr)->sgl); + (*out_sgt_ptr)->sgl = NULL; + kfree(*out_sgt_ptr); + *out_sgt_ptr = NULL; + } + return 0; +} + +/** + * ipa_clear_endpoint_delay() - Clear ep_delay. + * @clnt_hdl: [in] IPA client handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_clear_endpoint_delay(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_clear_endpoint_delay, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_clear_endpoint_delay); + +/** + * ipa_reset_endpoint() - reset an endpoint from BAM perspective + * @clnt_hdl: [in] IPA client handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_reset_endpoint(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_reset_endpoint, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_reset_endpoint); + +/** + * ipa_disable_endpoint() - Disable an endpoint from IPA perspective + * @clnt_hdl: [in] IPA client handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_disable_endpoint(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disable_endpoint, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_disable_endpoint); + + +/** + * ipa_cfg_ep - IPA end-point configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * This includes nat, header, mode, aggregation and route settings and is a one + * shot API to configure the IPA end-point fully + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep, clnt_hdl, ipa_ep_cfg); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep); + +/** + * ipa_cfg_ep_nat() - IPA end-point NAT configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_nat: [in] IPA NAT end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_nat, clnt_hdl, ep_nat); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_nat); + +/** + * ipa_cfg_ep_conn_track() - IPA end-point IPv6CT configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_conn_track: [in] IPA IPv6CT end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_conn_track(u32 clnt_hdl, + const struct ipa_ep_cfg_conn_track *ep_conn_track) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_conn_track, clnt_hdl, + ep_conn_track); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_conn_track); + +/** + * ipa_cfg_ep_hdr() - IPA end-point header configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_hdr, clnt_hdl, ep_hdr); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_hdr); + +/** + * ipa_cfg_ep_hdr_ext() - IPA end-point extended header configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_hdr_ext: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_hdr_ext, clnt_hdl, ep_hdr_ext); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_hdr_ext); + +/** + * ipa_cfg_ep_mode() - IPA end-point mode configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_mode, clnt_hdl, ep_mode); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_mode); + +/** + * ipa_cfg_ep_aggr() - IPA end-point aggregation configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_aggr, clnt_hdl, ep_aggr); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_aggr); + +/** + * ipa_cfg_ep_deaggr() - IPA end-point deaggregation configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_deaggr: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_deaggr(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ep_deaggr) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_deaggr, clnt_hdl, ep_deaggr); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_deaggr); + +/** + * ipa_cfg_ep_route() - IPA end-point routing configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_route, clnt_hdl, ep_route); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_route); + +/** + * ipa_cfg_ep_holb() - IPA end-point holb configuration + * + * If an IPA producer pipe is full, IPA HW by default will block + * indefinitely till space opens up. During this time no packets + * including those from unrelated pipes will be processed. Enabling + * HOLB means IPA HW will be allowed to drop packets as/when needed + * and indefinite blocking is avoided. + * + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_holb, clnt_hdl, ep_holb); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_holb); + + +/** + * ipa_cfg_ep_cfg() - IPA end-point cfg configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_cfg, clnt_hdl, cfg); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_cfg); + +/** + * ipa_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_metadata_mask(u32 clnt_hdl, const struct ipa_ep_cfg_metadata_mask + *metadata_mask) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_metadata_mask, clnt_hdl, + metadata_mask); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_metadata_mask); + +/** + * ipa_cfg_ep_holb_by_client() - IPA end-point holb configuration + * + * Wrapper function for ipa_cfg_ep_holb() with client name instead of + * client handle. This function is used for clients that does not have + * client handle. + * + * @client: [in] client name + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa_cfg_ep_holb_by_client(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ep_holb) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_holb_by_client, client, ep_holb); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_holb_by_client); + +/** + * ipa_cfg_ep_ctrl() - IPA end-point Control configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_ctrl, clnt_hdl, ep_ctrl); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_ctrl); + +/** + * ipa_add_hdr() - add the specified headers to SW and optionally commit them to + * IPA HW + * @hdrs: [inout] set of headers to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_hdr, hdrs); + + return ret; +} +EXPORT_SYMBOL(ipa_add_hdr); + +/** + * ipa_add_hdr_usr() - add the specified headers to SW and optionally + * commit them to IPA HW + * @hdrs: [inout] set of headers to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_hdr_usr, hdrs, user_only); + + return ret; +} +EXPORT_SYMBOL(ipa_add_hdr_usr); + +/** + * ipa_del_hdr() - Remove the specified headers from SW and optionally + * commit them to IPA HW + * @hdls: [inout] set of headers to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_del_hdr, hdls); + + return ret; +} +EXPORT_SYMBOL(ipa_del_hdr); + +/** + * ipa_commit_hdr() - commit to IPA HW the current header table in SW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_commit_hdr(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_commit_hdr); + + return ret; +} +EXPORT_SYMBOL(ipa_commit_hdr); + +/** + * ipa_reset_hdr() - reset the current header table in SW (does not commit to + * HW) + * + * @user_only: [in] indicate delete rules installed by userspace + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_reset_hdr(bool user_only) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_reset_hdr, user_only); + + return ret; +} +EXPORT_SYMBOL(ipa_reset_hdr); + +/** + * ipa_get_hdr() - Lookup the specified header resource + * @lookup: [inout] header to lookup and its handle + * + * lookup the specified header resource and return handle if it exists + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + * Caller should call ipa_put_hdr later if this function succeeds + */ +int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_hdr, lookup); + + return ret; +} +EXPORT_SYMBOL(ipa_get_hdr); + +/** + * ipa_put_hdr() - Release the specified header handle + * @hdr_hdl: [in] the header handle to release + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_put_hdr(u32 hdr_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_put_hdr, hdr_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_put_hdr); + +/** + * ipa_copy_hdr() - Lookup the specified header resource and return a copy of it + * @copy: [inout] header to lookup and its copy + * + * lookup the specified header resource and return a copy of it (along with its + * attributes) if it exists, this would be called for partial headers + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_copy_hdr, copy); + + return ret; +} +EXPORT_SYMBOL(ipa_copy_hdr); + +/** + * ipa_add_hdr_proc_ctx() - add the specified headers to SW + * and optionally commit them to IPA HW + * @proc_ctxs: [inout] set of processing context headers to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_hdr_proc_ctx, proc_ctxs, user_only); + + return ret; +} +EXPORT_SYMBOL(ipa_add_hdr_proc_ctx); + +/** + * ipa_del_hdr_proc_ctx() - + * Remove the specified processing context headers from SW and + * optionally commit them to IPA HW. + * @hdls: [inout] set of processing context headers to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_del_hdr_proc_ctx, hdls); + + return ret; +} +EXPORT_SYMBOL(ipa_del_hdr_proc_ctx); + +/** + * ipa_add_rt_rule() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_rt_rule, rules); + + return ret; +} +EXPORT_SYMBOL(ipa_add_rt_rule); + +/** + * ipa_add_rt_rule_usr() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_rt_rule_usr, rules, user_only); + + return ret; +} +EXPORT_SYMBOL(ipa_add_rt_rule_usr); + +/** + * ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally + * commit to IPA HW + * @hdls: [inout] set of routing rules to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_del_rt_rule, hdls); + + return ret; +} +EXPORT_SYMBOL(ipa_del_rt_rule); + +/** + * ipa_commit_rt_rule() - Commit the current SW routing table of specified type + * to IPA HW + * @ip: The family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_commit_rt(enum ipa_ip_type ip) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_commit_rt, ip); + + return ret; +} +EXPORT_SYMBOL(ipa_commit_rt); + +/** + * ipa_reset_rt() - reset the current SW routing table of specified type + * (does not commit to HW) + * @ip: The family of routing tables + * @user_only: [in] indicate delete rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_reset_rt(enum ipa_ip_type ip, bool user_only) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_reset_rt, ip, user_only); + + return ret; +} +EXPORT_SYMBOL(ipa_reset_rt); + +/** + * ipa_get_rt_tbl() - lookup the specified routing table and return handle if it + * exists, if lookup succeeds the routing table ref cnt is increased + * @lookup: [inout] routing table to lookup and its handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + * Caller should call ipa_put_rt_tbl later if this function succeeds + */ +int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_rt_tbl, lookup); + + return ret; +} +EXPORT_SYMBOL(ipa_get_rt_tbl); + +/** + * ipa_put_rt_tbl() - Release the specified routing table handle + * @rt_tbl_hdl: [in] the routing table handle to release + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_put_rt_tbl(u32 rt_tbl_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_put_rt_tbl, rt_tbl_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_put_rt_tbl); + +/** + * ipa_query_rt_index() - find the routing table index + * which name and ip type are given as parameters + * @in: [out] the index of the wanted routing table + * + * Returns: the routing table which name is given as parameter, or NULL if it + * doesn't exist + */ +int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_query_rt_index, in); + + return ret; +} +EXPORT_SYMBOL(ipa_query_rt_index); + +/** + * ipa_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally + * commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mdfy_rt_rule, hdls); + + return ret; +} +EXPORT_SYMBOL(ipa_mdfy_rt_rule); + +/** + * ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of filtering rules to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_flt_rule, rules); + + return ret; +} +EXPORT_SYMBOL(ipa_add_flt_rule); + +/** + * ipa_add_flt_rule_usr() - Add the specified filtering rules to + * SW and optionally commit to IPA HW + * @rules: [inout] set of filtering rules to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_flt_rule_usr, rules, user_only); + + return ret; +} +EXPORT_SYMBOL(ipa_add_flt_rule_usr); + +/** + * ipa_del_flt_rule() - Remove the specified filtering rules from SW and + * optionally commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_del_flt_rule, hdls); + + return ret; +} +EXPORT_SYMBOL(ipa_del_flt_rule); + +/** + * ipa_mdfy_flt_rule() - Modify the specified filtering rules in SW and + * optionally commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mdfy_flt_rule, hdls); + + return ret; +} +EXPORT_SYMBOL(ipa_mdfy_flt_rule); + +/** + * ipa_commit_flt() - Commit the current SW filtering table of specified type to + * IPA HW + * @ip: [in] the family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_commit_flt(enum ipa_ip_type ip) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_commit_flt, ip); + + return ret; +} +EXPORT_SYMBOL(ipa_commit_flt); + +/** + * ipa_reset_flt() - Reset the current SW filtering table of specified type + * (does not commit to HW) + * @ip: [in] the family of routing tables + * @user_only: [in] indicate delete rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_reset_flt(enum ipa_ip_type ip, bool user_only) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_reset_flt, ip, user_only); + + return ret; +} +EXPORT_SYMBOL(ipa_reset_flt); + +/** + * ipa_allocate_nat_device() - Allocates memory for the NAT device + * @mem: [in/out] memory parameters + * + * Called by NAT client driver to allocate memory for the NAT entries. Based on + * the request size either shared or system memory will be used. + * + * Returns: 0 on success, negative on failure + */ +int ipa_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_allocate_nat_device, mem); + + return ret; +} +EXPORT_SYMBOL(ipa_allocate_nat_device); + +/** + * ipa_allocate_nat_table() - Allocates memory for the NAT table + * @table_alloc: [in/out] memory parameters + * + * Called by NAT client to allocate memory for the table entries. + * Based on the request size either shared or system memory will be used. + * + * Returns: 0 on success, negative on failure + */ +int ipa_allocate_nat_table(struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_allocate_nat_table, table_alloc); + + return ret; +} +EXPORT_SYMBOL(ipa_allocate_nat_table); + + +/** + * ipa_allocate_ipv6ct_table() - Allocates memory for the IPv6CT table + * @table_alloc: [in/out] memory parameters + * + * Called by IPv6CT client to allocate memory for the table entries. + * Based on the request size either shared or system memory will be used. + * + * Returns: 0 on success, negative on failure + */ +int ipa_allocate_ipv6ct_table( + struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_allocate_ipv6ct_table, table_alloc); + + return ret; +} +EXPORT_SYMBOL(ipa_allocate_ipv6ct_table); + +/** + * ipa_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW + * @init: [in] initialization command attributes + * + * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_nat_init_cmd, init); + + return ret; +} +EXPORT_SYMBOL(ipa_nat_init_cmd); + +/** + * ipa_ipv6ct_init_cmd() - Post IP_V6_CONN_TRACK_INIT command to IPA HW + * @init: [in] initialization command attributes + * + * Called by IPv6CT client driver to post IP_V6_CONN_TRACK_INIT command + * to IPA HW. + * + * Returns: 0 on success, negative on failure + */ +int ipa_ipv6ct_init_cmd(struct ipa_ioc_ipv6ct_init *init) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_ipv6ct_init_cmd, init); + + return ret; +} +EXPORT_SYMBOL(ipa_ipv6ct_init_cmd); + +/** + * ipa_nat_dma_cmd() - Post NAT_DMA command to IPA HW + * @dma: [in] initialization command attributes + * + * Called by NAT client driver to post NAT_DMA command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_nat_dma_cmd, dma); + + return ret; +} +EXPORT_SYMBOL(ipa_nat_dma_cmd); + +/** + * ipa_table_dma_cmd() - Post TABLE_DMA command to IPA HW + * @dma: [in] initialization command attributes + * + * Called by NAT/IPv6CT client to post TABLE_DMA command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_table_dma_cmd, dma); + + return ret; +} +EXPORT_SYMBOL(ipa_table_dma_cmd); + +/** + * ipa_nat_del_cmd() - Delete the NAT table + * @del: [in] delete NAT table parameters + * + * Called by NAT client driver to delete the nat table + * + * Returns: 0 on success, negative on failure + */ +int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_nat_del_cmd, del); + + return ret; +} +EXPORT_SYMBOL(ipa_nat_del_cmd); + +/** + * ipa_del_nat_table() - Delete the NAT table + * @del: [in] delete table parameters + * + * Called by NAT client to delete the table + * + * Returns: 0 on success, negative on failure + */ +int ipa_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_del_nat_table, del); + + return ret; +} +EXPORT_SYMBOL(ipa_del_nat_table); + +/** + * ipa_del_ipv6ct_table() - Delete the IPv6CT table + * @del: [in] delete table parameters + * + * Called by IPv6CT client to delete the table + * + * Returns: 0 on success, negative on failure + */ +int ipa_del_ipv6ct_table(struct ipa_ioc_nat_ipv6ct_table_del *del) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_del_ipv6ct_table, del); + + return ret; +} +EXPORT_SYMBOL(ipa_del_ipv6ct_table); + +/** + * ipa3_nat_mdfy_pdn() - Modify a PDN entry in PDN config table in IPA SRAM + * @mdfy_pdn: [in] PDN info to be written to SRAM + * + * Called by NAT client driver to modify an entry in the PDN config table + * + * Returns: 0 on success, negative on failure + */ +int ipa_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_nat_mdfy_pdn, mdfy_pdn); + + return ret; +} +EXPORT_SYMBOL(ipa_nat_mdfy_pdn); + +/** + * ipa_send_msg() - Send "message" from kernel client to IPA driver + * @meta: [in] message meta-data + * @buff: [in] the payload for message + * @callback: [in] free callback + * + * Client supplies the message meta-data and payload which IPA driver buffers + * till read by user-space. After read from user space IPA driver invokes the + * callback supplied to free the message payload. Client must not touch/free + * the message payload after calling this API. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_send_msg, meta, buff, callback); + + return ret; +} +EXPORT_SYMBOL(ipa_send_msg); + +/** + * ipa_register_pull_msg() - register pull message type + * @meta: [in] message meta-data + * @callback: [in] pull callback + * + * Register message callback by kernel client with IPA driver for IPA driver to + * pull message on-demand. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_register_pull_msg, meta, callback); + + return ret; +} +EXPORT_SYMBOL(ipa_register_pull_msg); + +/** + * ipa_deregister_pull_msg() - De-register pull message type + * @meta: [in] message meta-data + * + * De-register "message" by kernel client from IPA driver + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_deregister_pull_msg(struct ipa_msg_meta *meta) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_deregister_pull_msg, meta); + + return ret; +} +EXPORT_SYMBOL(ipa_deregister_pull_msg); + +/** + * ipa_register_intf() - register "logical" interface + * @name: [in] interface name + * @tx: [in] TX properties of the interface + * @rx: [in] RX properties of the interface + * + * Register an interface and its tx and rx properties, this allows + * configuration of rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_register_intf(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_register_intf, name, tx, rx); + + return ret; +} +EXPORT_SYMBOL(ipa_register_intf); + +/** + * ipa_register_intf_ext() - register "logical" interface which has only + * extended properties + * @name: [in] interface name + * @tx: [in] TX properties of the interface + * @rx: [in] RX properties of the interface + * @ext: [in] EXT properties of the interface + * + * Register an interface and its tx, rx and ext properties, this allows + * configuration of rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_register_intf_ext(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_register_intf_ext, name, tx, rx, ext); + + return ret; +} +EXPORT_SYMBOL(ipa_register_intf_ext); + +/** + * ipa_deregister_intf() - de-register previously registered logical interface + * @name: [in] interface name + * + * De-register a previously registered interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_deregister_intf(const char *name) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_deregister_intf, name); + + return ret; +} +EXPORT_SYMBOL(ipa_deregister_intf); + +/** + * ipa_set_aggr_mode() - Set the aggregation mode which is a global setting + * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM, + * etc + * + * Returns: 0 on success + */ +int ipa_set_aggr_mode(enum ipa_aggr_mode mode) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_set_aggr_mode, mode); + + return ret; +} +EXPORT_SYMBOL(ipa_set_aggr_mode); + + +/** + * ipa_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation + * mode + * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be + * "QND") + * + * Set the NDP signature used for QCNCM aggregation mode. The fourth byte + * (expected to be 'P') needs to be set using the header addition mechanism + * + * Returns: 0 on success, negative on failure + */ +int ipa_set_qcncm_ndp_sig(char sig[3]) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_set_qcncm_ndp_sig, sig); + + return ret; +} +EXPORT_SYMBOL(ipa_set_qcncm_ndp_sig); + +/** + * ipa_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame + * configuration + * @enable: [in] true for single NDP/MBIM; false otherwise + * + * Returns: 0 on success + */ +int ipa_set_single_ndp_per_mbim(bool enable) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_set_single_ndp_per_mbim, enable); + + return ret; +} +EXPORT_SYMBOL(ipa_set_single_ndp_per_mbim); + +/** + * ipa_tx_dp() - Data-path tx handler + * @dst: [in] which IPA destination to route tx packets to + * @skb: [in] the packet to send + * @metadata: [in] TX packet meta-data + * + * Data-path tx handler, this is used for both SW data-path which by-passes most + * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If + * dst is a "valid" CONS type, then SW data-path is used. If dst is the + * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else + * is an error. For errors, client needs to free the skb as needed. For success, + * IPA driver will later invoke client callback if one was supplied. That + * callback should free the skb. If no callback supplied, IPA driver will free + * the skb internally + * + * The function will use two descriptors for this send command + * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent), + * the first descriptor will be used to inform the IPA hardware that + * apps need to push data into the IPA (IP_PACKET_INIT immediate command). + * Once this send was done from SPS point-of-view the IPA driver will + * get notified by the supplied callback - ipa_sps_irq_tx_comp() + * + * ipa_sps_irq_tx_comp will call to the user supplied + * callback (from ipa_connect) + * + * Returns: 0 on success, negative on failure + */ +int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *meta) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_tx_dp, dst, skb, meta); + + return ret; +} +EXPORT_SYMBOL(ipa_tx_dp); + +/** + * ipa_tx_dp_mul() - Data-path tx handler for multiple packets + * @src: [in] - Client that is sending data + * @ipa_tx_data_desc: [in] data descriptors from wlan + * + * this is used for to transfer data descriptors that received + * from WLAN1_PROD pipe to IPA HW + * + * The function will send data descriptors from WLAN1_PROD (one + * at a time) using sps_transfer_one. Will set EOT flag for last + * descriptor Once this send was done from SPS point-of-view the + * IPA driver will get notified by the supplied callback - + * ipa_sps_irq_tx_no_aggr_notify() + * + * ipa_sps_irq_tx_no_aggr_notify will call to the user supplied + * callback (from ipa_connect) + * + * Returns: 0 on success, negative on failure + */ +int ipa_tx_dp_mul(enum ipa_client_type src, + struct ipa_tx_data_desc *data_desc) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_tx_dp_mul, src, data_desc); + + return ret; +} +EXPORT_SYMBOL(ipa_tx_dp_mul); + +void ipa_free_skb(struct ipa_rx_data *data) +{ + IPA_API_DISPATCH(ipa_free_skb, data); +} +EXPORT_SYMBOL(ipa_free_skb); + +/** + * ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform + * IPA EP configuration + * @sys_in: [in] input needed to setup BAM pipe and configure EP + * @clnt_hdl: [out] client handle + * + * - configure the end-point registers with the supplied + * parameters from the user. + * - call SPS APIs to create a system-to-bam connection with IPA. + * - allocate descriptor FIFO + * - register callback function(ipa_sps_irq_rx_notify or + * ipa_sps_irq_tx_notify - depends on client type) in case the driver is + * not configured to pulling mode + * + * Returns: 0 on success, negative on failure + */ +int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_setup_sys_pipe, sys_in, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_setup_sys_pipe); + +/** + * ipa_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP + * @clnt_hdl: [in] the handle obtained from ipa_setup_sys_pipe + * + * Returns: 0 on success, negative on failure + */ +int ipa_teardown_sys_pipe(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_teardown_sys_pipe, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_teardown_sys_pipe); + +int ipa_sys_setup(struct ipa_sys_connect_params *sys_in, + unsigned long *ipa_bam_or_gsi_hdl, + u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status) + +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_sys_setup, sys_in, ipa_bam_or_gsi_hdl, + ipa_pipe_num, clnt_hdl, en_status); + + return ret; +} +EXPORT_SYMBOL(ipa_sys_setup); + +int ipa_sys_teardown(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_sys_teardown, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_sys_teardown); + +int ipa_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl, + unsigned long gsi_ev_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_sys_update_gsi_hdls, clnt_hdl, + gsi_ch_hdl, gsi_ev_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_sys_update_gsi_hdls); + +/** + * ipa_connect_wdi_pipe() - WDI client connect + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_connect_wdi_pipe, in, out); + + return ret; +} +EXPORT_SYMBOL(ipa_connect_wdi_pipe); + +/** + * ipa_disconnect_wdi_pipe() - WDI client disconnect + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_disconnect_wdi_pipe(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disconnect_wdi_pipe, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_disconnect_wdi_pipe); + +/** + * ipa_enable_wdi_pipe() - WDI client enable + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_enable_wdi_pipe(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_enable_wdi_pipe, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_enable_wdi_pipe); + +/** + * ipa_disable_wdi_pipe() - WDI client disable + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_disable_wdi_pipe(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disable_wdi_pipe, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_disable_wdi_pipe); + +/** + * ipa_resume_wdi_pipe() - WDI client resume + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_resume_wdi_pipe(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_resume_wdi_pipe, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_resume_wdi_pipe); + +/** + * ipa_suspend_wdi_pipe() - WDI client suspend + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_suspend_wdi_pipe(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_suspend_wdi_pipe, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_suspend_wdi_pipe); + +/** + * ipa_get_wdi_stats() - Query WDI statistics from uc + * @stats: [inout] stats blob from client populated by driver + * + * Returns: 0 on success, negative on failure + * + * @note Cannot be called from atomic context + * + */ +int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_wdi_stats, stats); + + return ret; +} +EXPORT_SYMBOL(ipa_get_wdi_stats); + +/** + * ipa_get_smem_restr_bytes()- Return IPA smem restricted bytes + * + * Return value: u16 - number of IPA smem restricted bytes + */ +u16 ipa_get_smem_restr_bytes(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_smem_restr_bytes); + + return ret; +} +EXPORT_SYMBOL(ipa_get_smem_restr_bytes); + +/** + * ipa_broadcast_wdi_quota_reach_ind() - quota reach + * @uint32_t fid: [in] input netdev ID + * @uint64_t num_bytes: [in] used bytes + * + * Returns: 0 on success, negative on failure + */ +int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid, + uint64_t num_bytes) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_broadcast_wdi_quota_reach_ind, + fid, num_bytes); + + return ret; +} +EXPORT_SYMBOL(ipa_broadcast_wdi_quota_reach_ind); + +/** + * ipa_uc_wdi_get_dbpa() - To retrieve + * doorbell physical address of wlan pipes + * @param: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_uc_wdi_get_dbpa( + struct ipa_wdi_db_params *param) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_wdi_get_dbpa, param); + + return ret; +} +EXPORT_SYMBOL(ipa_uc_wdi_get_dbpa); + +/** + * ipa_uc_reg_rdyCB() - To register uC + * ready CB if uC not ready + * @inout: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_uc_reg_rdyCB( + struct ipa_wdi_uc_ready_params *inout) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_reg_rdyCB, inout); + + return ret; +} +EXPORT_SYMBOL(ipa_uc_reg_rdyCB); + +/** + * ipa_uc_dereg_rdyCB() - To de-register uC ready CB + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_uc_dereg_rdyCB(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_dereg_rdyCB); + + return ret; +} +EXPORT_SYMBOL(ipa_uc_dereg_rdyCB); + +/** + * teth_bridge_init() - Initialize the Tethering bridge driver + * @params - in/out params for USB initialization API (please look at struct + * definition for more info) + * + * USB driver gets a pointer to a callback function (usb_notify_cb) and an + * associated data. USB driver installs this callback function in the call to + * ipa_connect(). + * + * Builds IPA resource manager dependency graph. + * + * Return codes: 0: success, + * -EINVAL - Bad parameter + * Other negative value - Failure + */ +int teth_bridge_init(struct teth_bridge_init_params *params) +{ + int ret; + + IPA_API_DISPATCH_RETURN(teth_bridge_init, params); + + return ret; +} +EXPORT_SYMBOL(teth_bridge_init); + +/** + * teth_bridge_disconnect() - Disconnect tethering bridge module + */ +int teth_bridge_disconnect(enum ipa_client_type client) +{ + int ret; + + IPA_API_DISPATCH_RETURN(teth_bridge_disconnect, client); + + return ret; +} +EXPORT_SYMBOL(teth_bridge_disconnect); + +/** + * teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call + * @connect_params: Connection info + * + * Return codes: 0: success + * -EINVAL: invalid parameters + * -EPERM: Operation not permitted as the bridge is already + * connected + */ +int teth_bridge_connect(struct teth_bridge_connect_params *connect_params) +{ + int ret; + + IPA_API_DISPATCH_RETURN(teth_bridge_connect, connect_params); + + return ret; +} +EXPORT_SYMBOL(teth_bridge_connect); + +/* ipa_set_client() - provide client mapping + * @client: client type + * + * Return value: none + */ + +void ipa_set_client(int index, enum ipacm_client_enum client, bool uplink) +{ + IPA_API_DISPATCH(ipa_set_client, index, client, uplink); +} +EXPORT_SYMBOL(ipa_set_client); + +/** + * ipa_get_client() - provide client mapping + * @client: client type + * + * Return value: none + */ +enum ipacm_client_enum ipa_get_client(int pipe_idx) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_client, pipe_idx); + + return ret; +} +EXPORT_SYMBOL(ipa_get_client); + +/** + * ipa_get_client_uplink() - provide client mapping + * @client: client type + * + * Return value: none + */ +bool ipa_get_client_uplink(int pipe_idx) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_client_uplink, pipe_idx); + + return ret; +} +EXPORT_SYMBOL(ipa_get_client_uplink); + +/** + * ipa_dma_init() -Initialize IPADMA. + * + * This function initialize all IPADMA internal data and connect in dma: + * MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS + * MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS + * + * Return codes: 0: success + * -EFAULT: IPADMA is already initialized + * -ENOMEM: allocating memory error + * -EPERM: pipe connection failed + */ +int ipa_dma_init(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_dma_init); + + return ret; +} +EXPORT_SYMBOL(ipa_dma_init); + +/** + * ipa_dma_enable() -Vote for IPA clocks. + * + *Return codes: 0: success + * -EINVAL: IPADMA is not initialized + * -EPERM: Operation not permitted as ipa_dma is already + * enabled + */ +int ipa_dma_enable(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_dma_enable); + + return ret; +} +EXPORT_SYMBOL(ipa_dma_enable); + +/** + * ipa_dma_disable()- Unvote for IPA clocks. + * + * enter to power save mode. + * + * Return codes: 0: success + * -EINVAL: IPADMA is not initialized + * -EPERM: Operation not permitted as ipa_dma is already + * diabled + * -EFAULT: can not disable ipa_dma as there are pending + * memcopy works + */ +int ipa_dma_disable(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_dma_disable); + + return ret; +} +EXPORT_SYMBOL(ipa_dma_disable); + +/** + * ipa_dma_sync_memcpy()- Perform synchronous memcpy using IPA. + * + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -SPS_ERROR: on sps faliures + * -EFAULT: other + */ +int ipa_dma_sync_memcpy(u64 dest, u64 src, int len) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_dma_sync_memcpy, dest, src, len); + + return ret; +} +EXPORT_SYMBOL(ipa_dma_sync_memcpy); + +/** + * ipa_dma_async_memcpy()- Perform asynchronous memcpy using IPA. + * + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * @user_cb: callback function to notify the client when the copy was done. + * @user_param: cookie for user_cb. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -SPS_ERROR: on sps faliures + * -EFAULT: descr fifo is full. + */ +int ipa_dma_async_memcpy(u64 dest, u64 src, int len, + void (*user_cb)(void *user1), void *user_param) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_dma_async_memcpy, dest, src, len, user_cb, + user_param); + + return ret; +} +EXPORT_SYMBOL(ipa_dma_async_memcpy); + +/** + * ipa_dma_uc_memcpy() - Perform a memcpy action using IPA uC + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -EBADF: IPA uC is not loaded + */ +int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_dma_uc_memcpy, dest, src, len); + + return ret; +} +EXPORT_SYMBOL(ipa_dma_uc_memcpy); + +/** + * ipa_dma_destroy() -teardown IPADMA pipes and release ipadma. + * + * this is a blocking function, returns just after destroying IPADMA. + */ +void ipa_dma_destroy(void) +{ + IPA_API_DISPATCH(ipa_dma_destroy); +} +EXPORT_SYMBOL(ipa_dma_destroy); + +int ipa_mhi_init_engine(struct ipa_mhi_init_engine *params) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mhi_init_engine, params); + + return ret; +} +EXPORT_SYMBOL(ipa_mhi_init_engine); + +/** + * ipa_connect_mhi_pipe() - Connect pipe to IPA and start corresponding + * MHI channel + * @in: connect parameters + * @clnt_hdl: [out] client handle for this pipe + * + * This function is called by IPA MHI client driver on MHI channel start. + * This function is called after MHI engine was started. + * This function is doing the following: + * - Send command to uC to start corresponding MHI channel + * - Configure IPA EP control + * + * Return codes: 0 : success + * negative : error + */ +int ipa_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in, + u32 *clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_connect_mhi_pipe, in, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_connect_mhi_pipe); + +/** + * ipa_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding + * MHI channel + * @in: connect parameters + * @clnt_hdl: [out] client handle for this pipe + * + * This function is called by IPA MHI client driver on MHI channel reset. + * This function is called after MHI channel was started. + * This function is doing the following: + * - Send command to uC to reset corresponding MHI channel + * - Configure IPA EP control + * + * Return codes: 0 : success + * negative : error + */ +int ipa_disconnect_mhi_pipe(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disconnect_mhi_pipe, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_disconnect_mhi_pipe); + +bool ipa_mhi_stop_gsi_channel(enum ipa_client_type client) +{ + bool ret; + + IPA_API_DISPATCH_RETURN_BOOL(ipa_mhi_stop_gsi_channel, client); + + return ret; +} + +int ipa_uc_mhi_reset_channel(int channelHandle) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_mhi_reset_channel, channelHandle); + + return ret; +} + +bool ipa_mhi_sps_channel_empty(enum ipa_client_type client) +{ + bool ret; + + IPA_API_DISPATCH_RETURN_BOOL(ipa_mhi_sps_channel_empty, client); + + return ret; +} + +int ipa_qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_qmi_enable_force_clear_datapath_send, req); + + return ret; +} + +int ipa_qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_qmi_disable_force_clear_datapath_send, req); + + return ret; +} + +int ipa_generate_tag_process(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_generate_tag_process); + + return ret; +} + +int ipa_disable_sps_pipe(enum ipa_client_type client) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disable_sps_pipe, client); + + return ret; +} + +int ipa_mhi_reset_channel_internal(enum ipa_client_type client) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mhi_reset_channel_internal, client); + + return ret; +} + +int ipa_mhi_start_channel_internal(enum ipa_client_type client) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mhi_start_channel_internal, client); + + return ret; +} + +void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb) +{ + IPA_API_DISPATCH(ipa_get_holb, ep_idx, holb); +} + +void ipa_set_tag_process_before_gating(bool val) +{ + IPA_API_DISPATCH(ipa_set_tag_process_before_gating, val); +} + +int ipa_mhi_query_ch_info(enum ipa_client_type client, + struct gsi_chan_info *ch_info) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mhi_query_ch_info, client, ch_info); + + return ret; +} + +int ipa_uc_mhi_suspend_channel(int channelHandle) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_mhi_suspend_channel, channelHandle); + + return ret; +} + +int ipa_uc_mhi_stop_event_update_channel(int channelHandle) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_mhi_stop_event_update_channel, + channelHandle); + + return ret; +} + +bool ipa_has_open_aggr_frame(enum ipa_client_type client) +{ + bool ret; + + IPA_API_DISPATCH_RETURN_BOOL(ipa_has_open_aggr_frame, client); + + return ret; +} + +int ipa_mhi_resume_channels_internal(enum ipa_client_type client, + bool LPTransitionRejected, bool brstmode_enabled, + union __packed gsi_channel_scratch ch_scratch, u8 index) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mhi_resume_channels_internal, client, + LPTransitionRejected, brstmode_enabled, ch_scratch, + index); + + return ret; +} + +int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_mhi_send_dl_ul_sync_info, + cmd); + + return ret; +} + +int ipa_mhi_destroy_channel(enum ipa_client_type client) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mhi_destroy_channel, client); + + return ret; +} + +int ipa_uc_mhi_init(void (*ready_cb)(void), + void (*wakeup_request_cb)(void)) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_mhi_init, ready_cb, wakeup_request_cb); + + return ret; +} + +void ipa_uc_mhi_cleanup(void) +{ + IPA_API_DISPATCH(ipa_uc_mhi_cleanup); +} + +int ipa_uc_mhi_print_stats(char *dbg_buff, int size) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_mhi_print_stats, dbg_buff, size); + + return ret; +} + +/** + * ipa_uc_state_check() - Check the status of the uC interface + * + * Return value: 0 if the uC is loaded, interface is initialized + * and there was no recent failure in one of the commands. + * A negative value is returned otherwise. + */ +int ipa_uc_state_check(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_state_check); + + return ret; +} + +int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_write_qmap_id, param_in); + + return ret; +} +EXPORT_SYMBOL(ipa_write_qmap_id); + +/** + * ipa_add_interrupt_handler() - Adds handler to an interrupt type + * @interrupt: Interrupt type + * @handler: The handler to be added + * @deferred_flag: whether the handler processing should be deferred in + * a workqueue + * @private_data: the client's private data + * + * Adds handler to an interrupt type and enable the specific bit + * in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled + */ +int ipa_add_interrupt_handler(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_interrupt_handler, interrupt, handler, + deferred_flag, private_data); + + return ret; +} +EXPORT_SYMBOL(ipa_add_interrupt_handler); + +/** + * ipa_remove_interrupt_handler() - Removes handler to an interrupt type + * @interrupt: Interrupt type + * + * Removes the handler and disable the specific bit in IRQ_EN register + */ +int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_remove_interrupt_handler, interrupt); + + return ret; +} +EXPORT_SYMBOL(ipa_remove_interrupt_handler); + +/** + * ipa_restore_suspend_handler() - restores the original suspend IRQ handler + * as it was registered in the IPA init sequence. + * Return codes: + * 0: success + * -EPERM: failed to remove current handler or failed to add original handler + */ +int ipa_restore_suspend_handler(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_restore_suspend_handler); + + return ret; +} +EXPORT_SYMBOL(ipa_restore_suspend_handler); + +/** + * ipa_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM + * + * Function is rate limited to avoid flooding kernel log buffer + */ +void ipa_bam_reg_dump(void) +{ + IPA_API_DISPATCH(ipa_bam_reg_dump); +} +EXPORT_SYMBOL(ipa_bam_reg_dump); + +/** + * ipa_get_ep_mapping() - provide endpoint mapping + * @client: client type + * + * Return value: endpoint mapping + */ +int ipa_get_ep_mapping(enum ipa_client_type client) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_ep_mapping, client); + + return ret; +} +EXPORT_SYMBOL(ipa_get_ep_mapping); + +/** + * ipa_is_ready() - check if IPA module was initialized + * successfully + * + * Return value: true for yes; false for no + */ +bool ipa_is_ready(void) +{ + if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_is_ready) + return false; + return ipa_api_ctrl->ipa_is_ready(); +} +EXPORT_SYMBOL(ipa_is_ready); + +/** + * ipa_proxy_clk_vote() - called to add IPA clock proxy vote + * + * Return value: none + */ +void ipa_proxy_clk_vote(void) +{ + IPA_API_DISPATCH(ipa_proxy_clk_vote); +} +EXPORT_SYMBOL(ipa_proxy_clk_vote); + +/** + * ipa_proxy_clk_unvote() - called to remove IPA clock proxy vote + * + * Return value: none + */ +void ipa_proxy_clk_unvote(void) +{ + IPA_API_DISPATCH(ipa_proxy_clk_unvote); +} +EXPORT_SYMBOL(ipa_proxy_clk_unvote); + +/** + * ipa_get_hw_type() - Return IPA HW version + * + * Return value: enum ipa_hw_type + */ +enum ipa_hw_type ipa_get_hw_type(void) +{ + return ipa_api_hw_type; +} +EXPORT_SYMBOL(ipa_get_hw_type); + +/** + * ipa_is_client_handle_valid() - check if IPA client handle is valid handle + * + * Return value: true for yes; false for no + */ +bool ipa_is_client_handle_valid(u32 clnt_hdl) +{ + if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_is_client_handle_valid) + return false; + return ipa_api_ctrl->ipa_is_client_handle_valid(clnt_hdl); +} +EXPORT_SYMBOL(ipa_is_client_handle_valid); + +/** + * ipa_get_client_mapping() - provide client mapping + * @pipe_idx: IPA end-point number + * + * Return value: client mapping + */ +enum ipa_client_type ipa_get_client_mapping(int pipe_idx) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_client_mapping, pipe_idx); + + return ret; +} +EXPORT_SYMBOL(ipa_get_client_mapping); + +/** + * ipa_get_rm_resource_from_ep() - get the IPA_RM resource which is related to + * the supplied pipe index. + * + * @pipe_idx: + * + * Return value: IPA_RM resource related to the pipe, -1 if a resource was not + * found. + */ +enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(int pipe_idx) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_rm_resource_from_ep, pipe_idx); + + return ret; +} +EXPORT_SYMBOL(ipa_get_rm_resource_from_ep); + +/** + * ipa_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt + * + * Return value: true if modem configures embedded pipe flt, false otherwise + */ +bool ipa_get_modem_cfg_emb_pipe_flt(void) +{ + if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_get_modem_cfg_emb_pipe_flt) + return false; + return ipa_api_ctrl->ipa_get_modem_cfg_emb_pipe_flt(); +} +EXPORT_SYMBOL(ipa_get_modem_cfg_emb_pipe_flt); + +/** + * ipa_get_transport_type()- Return ipa_ctx->transport_prototype + * + * Return value: enum ipa_transport_type + */ +enum ipa_transport_type ipa_get_transport_type(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_transport_type); + + return ret; +} +EXPORT_SYMBOL(ipa_get_transport_type); + +/** + * ipa_get_smmu_domain()- Return the smmu domain + * + * Return value: pointer to iommu domain if smmu_cb valid, NULL otherwise + */ +struct iommu_domain *ipa_get_smmu_domain(void) +{ + struct iommu_domain *ret; + + IPA_API_DISPATCH_RETURN_PTR(ipa_get_smmu_domain); + + return ret; +} +EXPORT_SYMBOL(ipa_get_smmu_domain); + +/** + * ipa_disable_apps_wan_cons_deaggr()- set + * ipa_ctx->ipa_client_apps_wan_cons_agg_gro + * + * Return value: 0 or negative in case of failure + */ +int ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disable_apps_wan_cons_deaggr, agg_size, + agg_count); + + return ret; +} +EXPORT_SYMBOL(ipa_disable_apps_wan_cons_deaggr); + +/** + * ipa_get_dma_dev()- Returns ipa_ctx dma dev pointer + * + * Return value: pointer to ipa_ctx dma dev pointer + */ +struct device *ipa_get_dma_dev(void) +{ + struct device *ret; + + IPA_API_DISPATCH_RETURN_PTR(ipa_get_dma_dev); + + return ret; +} +EXPORT_SYMBOL(ipa_get_dma_dev); + +/** + * ipa_release_wdi_mapping() - release iommu mapping + * + * + * @num_buffers: number of buffers to be released + * + * @info: pointer to wdi buffers info array + * + * Return codes: 0 : success + * negative : error + */ +int ipa_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_release_wdi_mapping, num_buffers, info); + + return ret; +} +EXPORT_SYMBOL(ipa_release_wdi_mapping); + +/** + * ipa_create_wdi_mapping() - Perform iommu mapping + * + * + * @num_buffers: number of buffers to be mapped + * + * @info: pointer to wdi buffers info array + * + * Return codes: 0 : success + * negative : error + */ +int ipa_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_create_wdi_mapping, num_buffers, info); + + return ret; +} +EXPORT_SYMBOL(ipa_create_wdi_mapping); + +/** + * ipa_get_gsi_ep_info() - provide gsi ep information + * @client: IPA client type + * + * Return value: pointer to ipa_gsi_ep_info + */ +const struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(enum ipa_client_type client) +{ + if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_get_gsi_ep_info) + return NULL; + return ipa_api_ctrl->ipa_get_gsi_ep_info(client); +} +EXPORT_SYMBOL(ipa_get_gsi_ep_info); + +/** + * ipa_stop_gsi_channel()- Stops a GSI channel in IPA + * + * Return value: 0 on success, negative otherwise + */ +int ipa_stop_gsi_channel(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_stop_gsi_channel, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_stop_gsi_channel); + +/** + * ipa_start_gsi_channel()- Startsa GSI channel in IPA + * + * Return value: 0 on success, negative otherwise + */ +int ipa_start_gsi_channel(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_start_gsi_channel, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_start_gsi_channel); + +/** + * ipa_is_vlan_mode - check if a LAN driver should load in VLAN mode + * @iface - type of vlan capable device + * @res - query result: true for vlan mode, false for non vlan mode + * + * API must be called after ipa_is_ready() returns true, otherwise it will fail + * + * Returns: 0 on success, negative on failure + */ +int ipa_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_is_vlan_mode, iface, res); + + return ret; + +} +EXPORT_SYMBOL(ipa_is_vlan_mode); + +/** + * ipa_get_version_string() - Get string representation of IPA version + * @ver: IPA version + * + * Return: Constant string representation + */ +const char *ipa_get_version_string(enum ipa_hw_type ver) +{ + const char *str; + + switch (ver) { + case IPA_HW_v1_0: + str = "1.0"; + break; + case IPA_HW_v1_1: + str = "1.1"; + break; + case IPA_HW_v2_0: + str = "2.0"; + break; + case IPA_HW_v2_1: + str = "2.1"; + break; + case IPA_HW_v2_5: + str = "2.5/2.6"; + break; + case IPA_HW_v2_6L: + str = "2.6L"; + break; + case IPA_HW_v3_0: + str = "3.0"; + break; + case IPA_HW_v3_1: + str = "3.1"; + break; + case IPA_HW_v3_5: + str = "3.5"; + break; + case IPA_HW_v3_5_1: + str = "3.5.1"; + break; + case IPA_HW_v4_0: + str = "4.0"; + break; + case IPA_HW_v4_1: + str = "4.1"; + break; + case IPA_HW_v4_2: + str = "4.2"; + break; + case IPA_HW_v4_5: + str = "4.5"; + break; + default: + str = "Invalid version"; + break; + } + + return str; +} +EXPORT_SYMBOL(ipa_get_version_string); + +static const struct of_device_id ipa_plat_drv_match[] = { + { .compatible = "qcom,ipa", }, + { .compatible = "qcom,ipa-smmu-ap-cb", }, + { .compatible = "qcom,ipa-smmu-wlan-cb", }, + { .compatible = "qcom,ipa-smmu-uc-cb", }, + { .compatible = "qcom,smp2p-map-ipa-1-in", }, + { .compatible = "qcom,smp2p-map-ipa-1-out", }, + {} +}; + +/*********************************************************/ +/* PCIe Version */ +/*********************************************************/ + +static const struct of_device_id ipa_pci_drv_match[] = { + { .compatible = "qcom,ipa", }, + {} +}; + +/* + * Forward declarations of static functions required for PCI + * registraion + * + * VENDOR and DEVICE should be defined in pci_ids.h + */ +static int ipa_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void ipa_pci_remove(struct pci_dev *pdev); +static void ipa_pci_shutdown(struct pci_dev *pdev); +static pci_ers_result_t ipa_pci_io_error_detected(struct pci_dev *dev, + pci_channel_state_t state); +static pci_ers_result_t ipa_pci_io_slot_reset(struct pci_dev *dev); +static void ipa_pci_io_resume(struct pci_dev *dev); + +#define LOCAL_VENDOR 0x17CB +#define LOCAL_DEVICE 0x00ff + +static const char ipa_pci_driver_name[] = "qcipav3"; + +static const struct pci_device_id ipa_pci_tbl[] = { + { PCI_DEVICE(LOCAL_VENDOR, LOCAL_DEVICE) }, + { 0, 0, 0, 0, 0, 0, 0 } +}; + +MODULE_DEVICE_TABLE(pci, ipa_pci_tbl); + +/* PCI Error Recovery */ +static const struct pci_error_handlers ipa_pci_err_handler = { + .error_detected = ipa_pci_io_error_detected, + .slot_reset = ipa_pci_io_slot_reset, + .resume = ipa_pci_io_resume, +}; + +static struct pci_driver ipa_pci_driver = { + .name = ipa_pci_driver_name, + .id_table = ipa_pci_tbl, + .probe = ipa_pci_probe, + .remove = ipa_pci_remove, + .shutdown = ipa_pci_shutdown, + .err_handler = &ipa_pci_err_handler +}; + +static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p) +{ + int result; + + /* + * IPA probe function can be called for multiple times as the same probe + * function handles multiple compatibilities + */ + pr_debug("ipa: IPA driver probing started for %s\n", + pdev_p->dev.of_node->name); + + if (!ipa_api_ctrl) { + ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL); + if (!ipa_api_ctrl) + return -ENOMEM; + + /* Get IPA HW Version */ + result = of_property_read_u32(pdev_p->dev.of_node, + "qcom,ipa-hw-ver", &ipa_api_hw_type); + if ((result) || (ipa_api_hw_type == 0)) { + pr_err("ipa: get resource failed for ipa-hw-ver!\n"); + kfree(ipa_api_ctrl); + ipa_api_ctrl = 0; + return -ENODEV; + } + pr_debug("ipa: ipa_api_hw_type = %d\n", ipa_api_hw_type); + } + + /* call probe based on IPA HW version */ + switch (ipa_api_hw_type) { + case IPA_HW_v3_0: + case IPA_HW_v3_1: + case IPA_HW_v3_5: + case IPA_HW_v3_5_1: + case IPA_HW_v4_0: + case IPA_HW_v4_1: + case IPA_HW_v4_2: + case IPA_HW_v4_5: + result = ipa3_plat_drv_probe(pdev_p, ipa_api_ctrl, + ipa_plat_drv_match); + break; + default: + pr_err("ipa: unsupported version %d\n", ipa_api_hw_type); + return -EPERM; + } + + if (result && result != -EPROBE_DEFER) + pr_err("ipa: ipa_plat_drv_probe failed\n"); + + return result; +} + +static int ipa_ap_suspend(struct device *dev) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_ap_suspend, dev); + + return ret; +} + +static int ipa_ap_resume(struct device *dev) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_ap_resume, dev); + + return ret; +} + +int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data), + void *user_data) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_register_ipa_ready_cb, + ipa_ready_cb, user_data); + + return ret; +} +EXPORT_SYMBOL(ipa_register_ipa_ready_cb); + +/** + * ipa_inc_client_enable_clks() - Increase active clients counter, and + * enable ipa clocks if necessary + * + * Please do not use this API, use the wrapper macros instead (ipa_i.h) + * IPA_ACTIVE_CLIENTS_INC_XXX(); + * + * Return codes: + * None + */ +void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id) +{ + IPA_API_DISPATCH(ipa_inc_client_enable_clks, id); +} +EXPORT_SYMBOL(ipa_inc_client_enable_clks); + +/** + * ipa_dec_client_disable_clks() - Increase active clients counter, and + * enable ipa clocks if necessary + * + * Please do not use this API, use the wrapper macros instead (ipa_i.h) + * IPA_ACTIVE_CLIENTS_DEC_XXX(); + * + * Return codes: + * None + */ +void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id) +{ + IPA_API_DISPATCH(ipa_dec_client_disable_clks, id); +} +EXPORT_SYMBOL(ipa_dec_client_disable_clks); + +/** + * ipa_inc_client_enable_clks_no_block() - Only increment the number of active + * clients if no asynchronous actions should be done.Asynchronous actions are + * locking a mutex and waking up IPA HW. + * + * Please do not use this API, use the wrapper macros instead(ipa_i.h) + * + * + * Return codes : 0 for success + * -EPERM if an asynchronous action should have been done + */ +int ipa_inc_client_enable_clks_no_block( + struct ipa_active_client_logging_info *id) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_inc_client_enable_clks_no_block, id); + + return ret; +} +EXPORT_SYMBOL(ipa_inc_client_enable_clks_no_block); + +/** + * ipa_suspend_resource_no_block() - suspend client endpoints related to the + * IPA_RM resource and decrement active clients counter. This function is + * guaranteed to avoid sleeping. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_suspend_resource_no_block, resource); + + return ret; +} +EXPORT_SYMBOL(ipa_suspend_resource_no_block); +/** + * ipa_resume_resource() - resume client endpoints related to the IPA_RM + * resource. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa_resume_resource(enum ipa_rm_resource_name resource) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_resume_resource, resource); + + return ret; +} +EXPORT_SYMBOL(ipa_resume_resource); + +/** + * ipa_suspend_resource_sync() - suspend client endpoints related to the IPA_RM + * resource and decrement active clients counter, which may result in clock + * gating of IPA clocks. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_suspend_resource_sync, resource); + + return ret; +} +EXPORT_SYMBOL(ipa_suspend_resource_sync); + +/** + * ipa_set_required_perf_profile() - set IPA to the specified performance + * profile based on the bandwidth, unless minimum voltage required is + * higher. In this case the floor_voltage specified will be used. + * @floor_voltage: minimum voltage to operate + * @bandwidth_mbps: needed bandwidth from IPA + * + * Return codes: 0 on success, negative on failure. + */ +int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage, + u32 bandwidth_mbps) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_set_required_perf_profile, floor_voltage, + bandwidth_mbps); + + return ret; +} +EXPORT_SYMBOL(ipa_set_required_perf_profile); + +/** + * ipa_get_ipc_logbuf() - return a pointer to IPA driver IPC log + */ +void *ipa_get_ipc_logbuf(void) +{ + void *ret; + + IPA_API_DISPATCH_RETURN_PTR(ipa_get_ipc_logbuf); + + return ret; +} +EXPORT_SYMBOL(ipa_get_ipc_logbuf); + +/** + * ipa_get_ipc_logbuf_low() - return a pointer to IPA driver IPC low prio log + */ +void *ipa_get_ipc_logbuf_low(void) +{ + void *ret; + + IPA_API_DISPATCH_RETURN_PTR(ipa_get_ipc_logbuf_low); + + return ret; +} +EXPORT_SYMBOL(ipa_get_ipc_logbuf_low); + +/** + * ipa_assert() - general function for assertion + */ +void ipa_assert(void) +{ + pr_err("IPA: unrecoverable error has occurred, asserting\n"); + BUG(); +} + +/** + * ipa_rx_poll() - Poll the rx packets from IPA HW in the + * softirq context + * + * @budget: number of packets to be polled in single iteration + * + * Return codes: >= 0 : Actual number of packets polled + * + */ +int ipa_rx_poll(u32 clnt_hdl, int budget) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_rx_poll, clnt_hdl, budget); + + return ret; +} +EXPORT_SYMBOL(ipa_rx_poll); + +/** + * ipa_recycle_wan_skb() - Recycle the Wan skb + * + * @skb: skb that needs to recycle + * + */ +void ipa_recycle_wan_skb(struct sk_buff *skb) +{ + IPA_API_DISPATCH(ipa_recycle_wan_skb, skb); +} +EXPORT_SYMBOL(ipa_recycle_wan_skb); + +/** + * ipa_setup_uc_ntn_pipes() - setup uc offload pipes + */ +int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_setup_uc_ntn_pipes, inp, + notify, priv, hdr_len, outp); + + return ret; +} + +/** + * ipa_tear_down_uc_offload_pipes() - tear down uc offload pipes + */ +int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, + int ipa_ep_idx_dl, struct ipa_ntn_conn_in_params *params) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_tear_down_uc_offload_pipes, ipa_ep_idx_ul, + ipa_ep_idx_dl, params); + + return ret; +} + +/** + * ipa_get_pdev() - return a pointer to IPA dev struct + * + * Return value: a pointer to IPA dev struct + * + */ +struct device *ipa_get_pdev(void) +{ + struct device *ret; + + IPA_API_DISPATCH_RETURN_PTR(ipa_get_pdev); + + return ret; +} +EXPORT_SYMBOL(ipa_get_pdev); + +int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data), + void *user_data) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_ntn_uc_reg_rdyCB, + ipauc_ready_cb, user_data); + + return ret; +} +EXPORT_SYMBOL(ipa_ntn_uc_reg_rdyCB); + +void ipa_ntn_uc_dereg_rdyCB(void) +{ + IPA_API_DISPATCH(ipa_ntn_uc_dereg_rdyCB); +} +EXPORT_SYMBOL(ipa_ntn_uc_dereg_rdyCB); + +int ipa_get_smmu_params(struct ipa_smmu_in_params *in, + struct ipa_smmu_out_params *out) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_smmu_params, in, out); + + return ret; +} +EXPORT_SYMBOL(ipa_get_smmu_params); + +/** + * ipa_conn_wdi_pipes() - connect wdi pipes + */ +int ipa_conn_wdi_pipes(struct ipa_wdi_conn_in_params *in, + struct ipa_wdi_conn_out_params *out, + ipa_wdi_meter_notifier_cb wdi_notify) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_conn_wdi_pipes, in, out, wdi_notify); + + return ret; +} + +/** + * ipa_disconn_wdi_pipes() - disconnect wdi pipes + */ +int ipa_disconn_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disconn_wdi_pipes, ipa_ep_idx_tx, + ipa_ep_idx_rx); + + return ret; +} + +/** + * ipa_enable_wdi_pipes() - enable wdi pipes + */ +int ipa_enable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_enable_wdi_pipes, ipa_ep_idx_tx, + ipa_ep_idx_rx); + + return ret; +} + +/** + * ipa_disable_wdi_pipes() - disable wdi pipes + */ +int ipa_disable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disable_wdi_pipes, ipa_ep_idx_tx, + ipa_ep_idx_rx); + + return ret; +} + +/** + * ipa_tz_unlock_reg() - Allow AP access to memory regions controlled by TZ + */ +int ipa_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_tz_unlock_reg, reg_info, num_regs); + + return ret; +} + +/** + * ipa_pm_is_used() - Returns if IPA PM framework is used + */ +bool ipa_pm_is_used(void) +{ + bool ret; + + IPA_API_DISPATCH_RETURN(ipa_pm_is_used); + + return ret; +} + +static const struct dev_pm_ops ipa_pm_ops = { + .suspend_noirq = ipa_ap_suspend, + .resume_noirq = ipa_ap_resume, +}; + +static struct platform_driver ipa_plat_drv = { + .probe = ipa_generic_plat_drv_probe, + .driver = { + .name = DRV_NAME, + .pm = &ipa_pm_ops, + .of_match_table = ipa_plat_drv_match, + }, +}; + +/*********************************************************/ +/* PCIe Version */ +/*********************************************************/ + +static int ipa_pci_probe( + struct pci_dev *pci_dev, + const struct pci_device_id *ent) +{ + int result; + + if (!pci_dev || !ent) { + pr_err( + "Bad arg: pci_dev (%pK) and/or ent (%pK)\n", + pci_dev, ent); + return -EOPNOTSUPP; + } + + if (!ipa_api_ctrl) { + ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL); + if (ipa_api_ctrl == NULL) + return -ENOMEM; + /* Get IPA HW Version */ + result = of_property_read_u32(NULL, + "qcom,ipa-hw-ver", &ipa_api_hw_type); + if (result || ipa_api_hw_type == 0) { + pr_err("ipa: get resource failed for ipa-hw-ver!\n"); + kfree(ipa_api_ctrl); + ipa_api_ctrl = NULL; + return -ENODEV; + } + pr_debug("ipa: ipa_api_hw_type = %d\n", ipa_api_hw_type); + } + + /* + * Call a reduced version of platform_probe appropriate for PCIe + */ + result = ipa3_pci_drv_probe(pci_dev, ipa_api_ctrl, ipa_pci_drv_match); + + if (result && result != -EPROBE_DEFER) + pr_err("ipa: ipa3_pci_drv_probe failed\n"); + + if (running_emulation) + ipa_ut_module_init(); + + return result; +} + +static void ipa_pci_remove(struct pci_dev *pci_dev) +{ + if (running_emulation) + ipa_ut_module_exit(); +} + +static void ipa_pci_shutdown(struct pci_dev *pci_dev) +{ +} + +static pci_ers_result_t ipa_pci_io_error_detected(struct pci_dev *pci_dev, + pci_channel_state_t state) +{ + return 0; +} + +static pci_ers_result_t ipa_pci_io_slot_reset(struct pci_dev *pci_dev) +{ + return 0; +} + +static void ipa_pci_io_resume(struct pci_dev *pci_dev) +{ +} + +static int __init ipa_module_init(void) +{ + pr_debug("IPA module init\n"); + + if (running_emulation) { + /* Register as a PCI device driver */ + return pci_register_driver(&ipa_pci_driver); + } + /* Register as a platform device driver */ + return platform_driver_register(&ipa_plat_drv); +} +subsys_initcall(ipa_module_init); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA HW device driver"); diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h new file mode 100644 index 000000000000..4da9ba14412c --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_api.h @@ -0,0 +1,450 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include "ipa_common_i.h" + +#ifndef _IPA_API_H_ +#define _IPA_API_H_ + +struct ipa_api_controller { + int (*ipa_reset_endpoint)(u32 clnt_hdl); + + int (*ipa_clear_endpoint_delay)(u32 clnt_hdl); + + int (*ipa_disable_endpoint)(u32 clnt_hdl); + + int (*ipa_cfg_ep)(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg); + + int (*ipa_cfg_ep_nat)(u32 clnt_hdl, + const struct ipa_ep_cfg_nat *ipa_ep_cfg); + + int (*ipa_cfg_ep_conn_track)(u32 clnt_hdl, + const struct ipa_ep_cfg_conn_track *ipa_ep_cfg); + + int (*ipa_cfg_ep_hdr)(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr *ipa_ep_cfg); + + int (*ipa_cfg_ep_hdr_ext)(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg); + + int (*ipa_cfg_ep_mode)(u32 clnt_hdl, + const struct ipa_ep_cfg_mode *ipa_ep_cfg); + + int (*ipa_cfg_ep_aggr)(u32 clnt_hdl, + const struct ipa_ep_cfg_aggr *ipa_ep_cfg); + + int (*ipa_cfg_ep_deaggr)(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ipa_ep_cfg); + + int (*ipa_cfg_ep_route)(u32 clnt_hdl, + const struct ipa_ep_cfg_route *ipa_ep_cfg); + + int (*ipa_cfg_ep_holb)(u32 clnt_hdl, + const struct ipa_ep_cfg_holb *ipa_ep_cfg); + + int (*ipa_cfg_ep_cfg)(u32 clnt_hdl, + const struct ipa_ep_cfg_cfg *ipa_ep_cfg); + + int (*ipa_cfg_ep_metadata_mask)(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg); + + int (*ipa_cfg_ep_holb_by_client)(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ipa_ep_cfg); + + int (*ipa_cfg_ep_ctrl)(u32 clnt_hdl, + const struct ipa_ep_cfg_ctrl *ep_ctrl); + + int (*ipa_add_hdr)(struct ipa_ioc_add_hdr *hdrs); + + int (*ipa_add_hdr_usr)(struct ipa_ioc_add_hdr *hdrs, bool user_only); + + int (*ipa_del_hdr)(struct ipa_ioc_del_hdr *hdls); + + int (*ipa_commit_hdr)(void); + + int (*ipa_reset_hdr)(bool user_only); + + int (*ipa_get_hdr)(struct ipa_ioc_get_hdr *lookup); + + int (*ipa_put_hdr)(u32 hdr_hdl); + + int (*ipa_copy_hdr)(struct ipa_ioc_copy_hdr *copy); + + int (*ipa_add_hdr_proc_ctx)(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only); + + int (*ipa_del_hdr_proc_ctx)(struct ipa_ioc_del_hdr_proc_ctx *hdls); + + int (*ipa_add_rt_rule)(struct ipa_ioc_add_rt_rule *rules); + + int (*ipa_add_rt_rule_usr)(struct ipa_ioc_add_rt_rule *rules, + bool user_only); + + int (*ipa_del_rt_rule)(struct ipa_ioc_del_rt_rule *hdls); + + int (*ipa_commit_rt)(enum ipa_ip_type ip); + + int (*ipa_reset_rt)(enum ipa_ip_type ip, bool user_only); + + int (*ipa_get_rt_tbl)(struct ipa_ioc_get_rt_tbl *lookup); + + int (*ipa_put_rt_tbl)(u32 rt_tbl_hdl); + + int (*ipa_query_rt_index)(struct ipa_ioc_get_rt_tbl_indx *in); + + int (*ipa_mdfy_rt_rule)(struct ipa_ioc_mdfy_rt_rule *rules); + + int (*ipa_add_flt_rule)(struct ipa_ioc_add_flt_rule *rules); + + int (*ipa_add_flt_rule_usr)(struct ipa_ioc_add_flt_rule *rules, + bool user_only); + + int (*ipa_del_flt_rule)(struct ipa_ioc_del_flt_rule *hdls); + + int (*ipa_mdfy_flt_rule)(struct ipa_ioc_mdfy_flt_rule *rules); + + int (*ipa_commit_flt)(enum ipa_ip_type ip); + + int (*ipa_reset_flt)(enum ipa_ip_type ip, bool user_only); + + int (*ipa_allocate_nat_device)(struct ipa_ioc_nat_alloc_mem *mem); + + int (*ipa_allocate_nat_table)( + struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc); + + int (*ipa_allocate_ipv6ct_table)( + struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc); + + int (*ipa_nat_init_cmd)(struct ipa_ioc_v4_nat_init *init); + + int (*ipa_ipv6ct_init_cmd)(struct ipa_ioc_ipv6ct_init *init); + + int (*ipa_nat_dma_cmd)(struct ipa_ioc_nat_dma_cmd *dma); + + int (*ipa_table_dma_cmd)(struct ipa_ioc_nat_dma_cmd *dma); + + int (*ipa_nat_del_cmd)(struct ipa_ioc_v4_nat_del *del); + + int (*ipa_del_nat_table)(struct ipa_ioc_nat_ipv6ct_table_del *del); + + int (*ipa_del_ipv6ct_table)(struct ipa_ioc_nat_ipv6ct_table_del *del); + + int (*ipa_nat_mdfy_pdn)(struct ipa_ioc_nat_pdn_entry *mdfy_pdn); + + int (*ipa_send_msg)(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback); + + int (*ipa_register_pull_msg)(struct ipa_msg_meta *meta, + ipa_msg_pull_fn callback); + + int (*ipa_deregister_pull_msg)(struct ipa_msg_meta *meta); + + int (*ipa_register_intf)(const char *name, + const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx); + + int (*ipa_register_intf_ext)(const char *name, + const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext); + + int (*ipa_deregister_intf)(const char *name); + + int (*ipa_set_aggr_mode)(enum ipa_aggr_mode mode); + + int (*ipa_set_qcncm_ndp_sig)(char sig[3]); + + int (*ipa_set_single_ndp_per_mbim)(bool enable); + + int (*ipa_tx_dp)(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *metadata); + + int (*ipa_tx_dp_mul)(enum ipa_client_type dst, + struct ipa_tx_data_desc *data_desc); + + void (*ipa_free_skb)(struct ipa_rx_data *data); + + int (*ipa_setup_sys_pipe)(struct ipa_sys_connect_params *sys_in, + u32 *clnt_hdl); + + int (*ipa_teardown_sys_pipe)(u32 clnt_hdl); + + int (*ipa_sys_setup)(struct ipa_sys_connect_params *sys_in, + unsigned long *ipa_bam_hdl, + u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status); + + int (*ipa_sys_teardown)(u32 clnt_hdl); + + int (*ipa_sys_update_gsi_hdls)(u32 clnt_hdl, unsigned long gsi_ch_hdl, + unsigned long gsi_ev_hdl); + + int (*ipa_connect_wdi_pipe)(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out); + + int (*ipa_disconnect_wdi_pipe)(u32 clnt_hdl); + + int (*ipa_enable_wdi_pipe)(u32 clnt_hdl); + + int (*ipa_disable_wdi_pipe)(u32 clnt_hdl); + + int (*ipa_resume_wdi_pipe)(u32 clnt_hdl); + + int (*ipa_suspend_wdi_pipe)(u32 clnt_hdl); + + int (*ipa_get_wdi_stats)(struct IpaHwStatsWDIInfoData_t *stats); + + u16 (*ipa_get_smem_restr_bytes)(void); + + int (*ipa_broadcast_wdi_quota_reach_ind)(uint32_t fid, + uint64_t num_bytes); + + int (*ipa_uc_wdi_get_dbpa)(struct ipa_wdi_db_params *out); + + int (*ipa_uc_reg_rdyCB)(struct ipa_wdi_uc_ready_params *param); + + int (*ipa_uc_dereg_rdyCB)(void); + + int (*teth_bridge_init)(struct teth_bridge_init_params *params); + + int (*teth_bridge_disconnect)(enum ipa_client_type client); + + int (*teth_bridge_connect)( + struct teth_bridge_connect_params *connect_params); + + void (*ipa_set_client)( + int index, enum ipacm_client_enum client, bool uplink); + + enum ipacm_client_enum (*ipa_get_client)(int pipe_idx); + + bool (*ipa_get_client_uplink)(int pipe_idx); + + int (*ipa_dma_init)(void); + + int (*ipa_dma_enable)(void); + + int (*ipa_dma_disable)(void); + + int (*ipa_dma_sync_memcpy)(u64 dest, u64 src, int len); + + int (*ipa_dma_async_memcpy)(u64 dest, u64 src, int len, + void (*user_cb)(void *user1), void *user_param); + + int (*ipa_dma_uc_memcpy)(phys_addr_t dest, phys_addr_t src, int len); + + void (*ipa_dma_destroy)(void); + + bool (*ipa_has_open_aggr_frame)(enum ipa_client_type client); + + int (*ipa_generate_tag_process)(void); + + int (*ipa_disable_sps_pipe)(enum ipa_client_type client); + + void (*ipa_set_tag_process_before_gating)(bool val); + + int (*ipa_mhi_init_engine)(struct ipa_mhi_init_engine *params); + + int (*ipa_connect_mhi_pipe)(struct ipa_mhi_connect_params_internal *in, + u32 *clnt_hdl); + + int (*ipa_disconnect_mhi_pipe)(u32 clnt_hdl); + + bool (*ipa_mhi_stop_gsi_channel)(enum ipa_client_type client); + + int (*ipa_qmi_disable_force_clear)(u32 request_id); + + int (*ipa_qmi_enable_force_clear_datapath_send)( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req); + + int (*ipa_qmi_disable_force_clear_datapath_send)( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req); + + bool (*ipa_mhi_sps_channel_empty)(enum ipa_client_type client); + + int (*ipa_mhi_reset_channel_internal)(enum ipa_client_type client); + + int (*ipa_mhi_start_channel_internal)(enum ipa_client_type client); + + void (*ipa_get_holb)(int ep_idx, struct ipa_ep_cfg_holb *holb); + + int (*ipa_mhi_query_ch_info)(enum ipa_client_type client, + struct gsi_chan_info *ch_info); + + int (*ipa_mhi_resume_channels_internal)( + enum ipa_client_type client, + bool LPTransitionRejected, + bool brstmode_enabled, + union __packed gsi_channel_scratch ch_scratch, + u8 index); + + int (*ipa_mhi_destroy_channel)(enum ipa_client_type client); + + int (*ipa_uc_mhi_send_dl_ul_sync_info) + (union IpaHwMhiDlUlSyncCmdData_t *cmd); + + int (*ipa_uc_mhi_init) + (void (*ready_cb)(void), void (*wakeup_request_cb)(void)); + + void (*ipa_uc_mhi_cleanup)(void); + + int (*ipa_uc_mhi_print_stats)(char *dbg_buff, int size); + + int (*ipa_uc_mhi_reset_channel)(int channelHandle); + + int (*ipa_uc_mhi_suspend_channel)(int channelHandle); + + int (*ipa_uc_mhi_stop_event_update_channel)(int channelHandle); + + int (*ipa_uc_state_check)(void); + + int (*ipa_write_qmap_id)(struct ipa_ioc_write_qmapid *param_in); + + int (*ipa_add_interrupt_handler)(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data); + + int (*ipa_remove_interrupt_handler)(enum ipa_irq_type interrupt); + + int (*ipa_restore_suspend_handler)(void); + + void (*ipa_bam_reg_dump)(void); + + int (*ipa_get_ep_mapping)(enum ipa_client_type client); + + bool (*ipa_is_ready)(void); + + void (*ipa_proxy_clk_vote)(void); + + void (*ipa_proxy_clk_unvote)(void); + + bool (*ipa_is_client_handle_valid)(u32 clnt_hdl); + + enum ipa_client_type (*ipa_get_client_mapping)(int pipe_idx); + + enum ipa_rm_resource_name (*ipa_get_rm_resource_from_ep)(int pipe_idx); + + bool (*ipa_get_modem_cfg_emb_pipe_flt)(void); + + enum ipa_transport_type (*ipa_get_transport_type)(void); + + int (*ipa_ap_suspend)(struct device *dev); + + int (*ipa_ap_resume)(struct device *dev); + + int (*ipa_stop_gsi_channel)(u32 clnt_hdl); + + int (*ipa_start_gsi_channel)(u32 clnt_hdl); + + struct iommu_domain *(*ipa_get_smmu_domain)(void); + + int (*ipa_disable_apps_wan_cons_deaggr)(uint32_t agg_size, + uint32_t agg_count); + + struct device *(*ipa_get_dma_dev)(void); + + int (*ipa_release_wdi_mapping)(u32 num_buffers, + struct ipa_wdi_buffer_info *info); + + int (*ipa_create_wdi_mapping)(u32 num_buffers, + struct ipa_wdi_buffer_info *info); + + const struct ipa_gsi_ep_config *(*ipa_get_gsi_ep_info) + (enum ipa_client_type client); + + int (*ipa_register_ipa_ready_cb)(void (*ipa_ready_cb)(void *user_data), + void *user_data); + + void (*ipa_inc_client_enable_clks)( + struct ipa_active_client_logging_info *id); + + void (*ipa_dec_client_disable_clks)( + struct ipa_active_client_logging_info *id); + + int (*ipa_inc_client_enable_clks_no_block)( + struct ipa_active_client_logging_info *id); + + int (*ipa_suspend_resource_no_block)( + enum ipa_rm_resource_name resource); + + int (*ipa_resume_resource)(enum ipa_rm_resource_name name); + + int (*ipa_suspend_resource_sync)(enum ipa_rm_resource_name resource); + + int (*ipa_set_required_perf_profile)( + enum ipa_voltage_level floor_voltage, u32 bandwidth_mbps); + + void *(*ipa_get_ipc_logbuf)(void); + + void *(*ipa_get_ipc_logbuf_low)(void); + + int (*ipa_rx_poll)(u32 clnt_hdl, int budget); + + void (*ipa_recycle_wan_skb)(struct sk_buff *skb); + + int (*ipa_setup_uc_ntn_pipes)(struct ipa_ntn_conn_in_params *in, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp); + + int (*ipa_tear_down_uc_offload_pipes)(int ipa_ep_idx_ul, + int ipa_ep_idx_dl, struct ipa_ntn_conn_in_params *params); + + struct device *(*ipa_get_pdev)(void); + + int (*ipa_ntn_uc_reg_rdyCB)(void (*ipauc_ready_cb)(void *user_data), + void *user_data); + + void (*ipa_ntn_uc_dereg_rdyCB)(void); + + int (*ipa_conn_wdi_pipes)(struct ipa_wdi_conn_in_params *in, + struct ipa_wdi_conn_out_params *out, + ipa_wdi_meter_notifier_cb wdi_notify); + + int (*ipa_disconn_wdi_pipes)(int ipa_ep_idx_tx, + int ipa_ep_idx_rx); + + int (*ipa_enable_wdi_pipes)(int ipa_ep_idx_tx, + int ipa_ep_idx_rx); + + int (*ipa_disable_wdi_pipes)(int ipa_ep_idx_tx, + int ipa_ep_idx_rx); + + int (*ipa_tz_unlock_reg)(struct ipa_tz_unlock_reg_info *reg_info, + u16 num_regs); + + int (*ipa_get_smmu_params)(struct ipa_smmu_in_params *in, + struct ipa_smmu_out_params *out); + int (*ipa_is_vlan_mode)(enum ipa_vlan_ifaces iface, bool *res); + + bool (*ipa_pm_is_used)(void); +}; + +#ifdef CONFIG_IPA3 +int ipa3_plat_drv_probe(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match); +int ipa3_pci_drv_probe( + struct pci_dev *pci_dev, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match); +#else +static inline int ipa3_plat_drv_probe(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + return -ENODEV; +} +static inline int ipa3_pci_drv_probe( + struct pci_dev *pci_dev, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + return -ENODEV; +} +#endif /* (CONFIG_IPA3) */ + +#endif /* _IPA_API_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile new file mode 100644 index 000000000000..04f69b8d7d38 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o ipa_gsb.o +obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o +obj-$(CONFIG_ECM_IPA) += ecm_ipa.o +obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o diff --git a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c new file mode 100644 index 000000000000..d12aa83000d5 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c @@ -0,0 +1,1630 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ipa_common_i.h" +#include "../ipa_v3/ipa_pm.h" + +#define DRIVER_NAME "ecm_ipa" +#define ECM_IPA_IPV4_HDR_NAME "ecm_eth_ipv4" +#define ECM_IPA_IPV6_HDR_NAME "ecm_eth_ipv6" +#define INACTIVITY_MSEC_DELAY 100 +#define DEFAULT_OUTSTANDING_HIGH 64 +#define DEFAULT_OUTSTANDING_LOW 32 +#define DEBUGFS_TEMP_BUF_SIZE 4 +#define TX_TIMEOUT (5 * HZ) + +#define IPA_ECM_IPC_LOG_PAGES 50 + +#define IPA_ECM_IPC_LOGGING(buf, fmt, args...) \ + do { \ + if (buf) \ + ipc_log_string((buf), fmt, __func__, __LINE__, \ + ## args); \ + } while (0) + +static void *ipa_ecm_logbuf; + +#define ECM_IPA_DEBUG(fmt, args...) \ + do { \ + pr_debug(DRIVER_NAME " %s:%d "\ + fmt, __func__, __LINE__, ## args);\ + if (ipa_ecm_logbuf) { \ + IPA_ECM_IPC_LOGGING(ipa_ecm_logbuf, \ + DRIVER_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define ECM_IPA_DEBUG_XMIT(fmt, args...) \ + pr_debug(DRIVER_NAME " %s:%d " fmt, __func__, __LINE__, ## args) + +#define ECM_IPA_INFO(fmt, args...) \ + do { \ + pr_info(DRIVER_NAME "@%s@%d@ctx:%s: "\ + fmt, __func__, __LINE__, current->comm, ## args);\ + if (ipa_ecm_logbuf) { \ + IPA_ECM_IPC_LOGGING(ipa_ecm_logbuf, \ + DRIVER_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define ECM_IPA_ERROR(fmt, args...) \ + do { \ + pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\ + fmt, __func__, __LINE__, current->comm, ## args);\ + if (ipa_ecm_logbuf) { \ + IPA_ECM_IPC_LOGGING(ipa_ecm_logbuf, \ + DRIVER_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define NULL_CHECK(ptr) \ + do { \ + if (!(ptr)) { \ + ECM_IPA_ERROR("null pointer #ptr\n"); \ + ret = -EINVAL; \ + } \ + } \ + while (0) + +#define ECM_IPA_LOG_ENTRY() ECM_IPA_DEBUG("begin\n") +#define ECM_IPA_LOG_EXIT() ECM_IPA_DEBUG("end\n") + +/** + * enum ecm_ipa_state - specify the current driver internal state + * which is guarded by a state machine. + * + * The driver internal state changes due to its external API usage. + * The driver saves its internal state to guard from caller illegal + * call sequence. + * states: + * UNLOADED is the first state which is the default one and is also the state + * after the driver gets unloaded(cleanup). + * INITIALIZED is the driver state once it finished registering + * the network device and all internal data struct were initialized + * CONNECTED is the driver state once the USB pipes were connected to IPA + * UP is the driver state after the interface mode was set to UP but the + * pipes are not connected yet - this state is meta-stable state. + * CONNECTED_AND_UP is the driver state when the pipe were connected and + * the interface got UP request from the network stack. this is the driver + * idle operation state which allows it to transmit/receive data. + * INVALID is a state which is not allowed. + */ +enum ecm_ipa_state { + ECM_IPA_UNLOADED = 0, + ECM_IPA_INITIALIZED, + ECM_IPA_CONNECTED, + ECM_IPA_UP, + ECM_IPA_CONNECTED_AND_UP, + ECM_IPA_INVALID, +}; + +/** + * enum ecm_ipa_operation - enumerations used to descibe the API operation + * + * Those enums are used as input for the driver state machine. + */ +enum ecm_ipa_operation { + ECM_IPA_INITIALIZE, + ECM_IPA_CONNECT, + ECM_IPA_OPEN, + ECM_IPA_STOP, + ECM_IPA_DISCONNECT, + ECM_IPA_CLEANUP, +}; + +#define ECM_IPA_STATE_DEBUG(ecm_ipa_ctx) \ + ECM_IPA_DEBUG("Driver state - %s\n",\ + ecm_ipa_state_string((ecm_ipa_ctx)->state)) + +/** + * struct ecm_ipa_dev - main driver context parameters + * @net: network interface struct implemented by this driver + * @directory: debugfs directory for various debuging switches + * @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table + * @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table + * @usb_to_ipa_hdl: save handle for IPA pipe operations + * @ipa_to_usb_hdl: save handle for IPA pipe operations + * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed + * @outstanding_high: number of outstanding packets allowed + * @outstanding_low: number of outstanding packets which shall cause + * to netdev queue start (after stopped due to outstanding_high reached) + * @state: current state of ecm_ipa driver + * @device_ready_notify: callback supplied by USB core driver + * This callback shall be called by the Netdev once the Netdev internal + * state is changed to RNDIS_IPA_CONNECTED_AND_UP + * @ipa_to_usb_client: consumer client + * @usb_to_ipa_client: producer client + * @ipa_rm_resource_name_prod: IPA resource manager producer resource + * @ipa_rm_resource_name_cons: IPA resource manager consumer resource + * @pm_hdl: handle for IPA PM + * @is_vlan_mode: does the driver need to work in VLAN mode? + */ +struct ecm_ipa_dev { + struct net_device *net; + struct dentry *directory; + u32 eth_ipv4_hdr_hdl; + u32 eth_ipv6_hdr_hdl; + u32 usb_to_ipa_hdl; + u32 ipa_to_usb_hdl; + atomic_t outstanding_pkts; + u8 outstanding_high; + u8 outstanding_low; + enum ecm_ipa_state state; + void (*device_ready_notify)(void); + enum ipa_client_type ipa_to_usb_client; + enum ipa_client_type usb_to_ipa_client; + enum ipa_rm_resource_name ipa_rm_resource_name_prod; + enum ipa_rm_resource_name ipa_rm_resource_name_cons; + u32 pm_hdl; + bool is_vlan_mode; +}; + +static int ecm_ipa_open(struct net_device *net); +static void ecm_ipa_packet_receive_notify + (void *priv, enum ipa_dp_evt_type evt, unsigned long data); +static void ecm_ipa_tx_complete_notify + (void *priv, enum ipa_dp_evt_type evt, unsigned long data); +static void ecm_ipa_tx_timeout(struct net_device *net); +static int ecm_ipa_stop(struct net_device *net); +static void ecm_ipa_enable_data_path(struct ecm_ipa_dev *ecm_ipa_ctx); +static int ecm_ipa_rules_cfg + (struct ecm_ipa_dev *ecm_ipa_ctx, const void *dst_mac, + const void *src_mac); +static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *ecm_ipa_ctx); +static int ecm_ipa_register_properties(struct ecm_ipa_dev *ecm_ipa_ctx); +static void ecm_ipa_deregister_properties(void); +static void ecm_ipa_rm_notify + (void *user_data, enum ipa_rm_event event, unsigned long data); +static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net); +static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx); +static void ecm_ipa_destroy_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx); +static int ecm_ipa_register_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx); +static void ecm_ipa_deregister_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx); +static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx); +static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx); +static netdev_tx_t ecm_ipa_start_xmit + (struct sk_buff *skb, struct net_device *net); +static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file); +static ssize_t ecm_ipa_debugfs_atomic_read + (struct file *file, char __user *ubuf, size_t count, loff_t *ppos); +static void ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx); +static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx); +static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, + bool is_vlan_mode); +static int ecm_ipa_set_device_ethernet_addr + (u8 *dev_ethaddr, u8 device_ethaddr[]); +static enum ecm_ipa_state ecm_ipa_next_state + (enum ecm_ipa_state current_state, enum ecm_ipa_operation operation); +static const char *ecm_ipa_state_string(enum ecm_ipa_state state); +static int ecm_ipa_init_module(void); +static void ecm_ipa_cleanup_module(void); + +static const struct net_device_ops ecm_ipa_netdev_ops = { + .ndo_open = ecm_ipa_open, + .ndo_stop = ecm_ipa_stop, + .ndo_start_xmit = ecm_ipa_start_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_tx_timeout = ecm_ipa_tx_timeout, + .ndo_get_stats = ecm_ipa_get_stats, +}; + +const struct file_operations ecm_ipa_debugfs_atomic_ops = { + .open = ecm_ipa_debugfs_atomic_open, + .read = ecm_ipa_debugfs_atomic_read, +}; + +static void ecm_ipa_msg_free_cb(void *buff, u32 len, u32 type) +{ + kfree(buff); +} + +/** + * ecm_ipa_init() - create network device and initializes internal + * data structures + * @params: in/out parameters required for ecm_ipa initialization + * + * Shall be called prior to pipe connection. + * The out parameters (the callbacks) shall be supplied to ipa_connect. + * Detailed description: + * - allocate the network device + * - set default values for driver internals + * - create debugfs folder and files + * - create IPA resource manager client + * - add header insertion rules for IPA driver (based on host/device + * Ethernet addresses given in input params) + * - register tx/rx properties to IPA driver (will be later used + * by IPA configuration manager to configure reset of the IPA rules) + * - set the carrier state to "off" (until ecm_ipa_connect is called) + * - register the network device + * - set the out parameters + * + * Returns negative errno, or zero on success + */ +int ecm_ipa_init(struct ecm_ipa_params *params) +{ + int result = 0; + struct net_device *net; + struct ecm_ipa_dev *ecm_ipa_ctx; + int ret; + + ECM_IPA_LOG_ENTRY(); + + ECM_IPA_DEBUG("%s initializing\n", DRIVER_NAME); + ret = 0; + NULL_CHECK(params); + if (ret) + return ret; + + ECM_IPA_DEBUG + ("host_ethaddr=%pM, device_ethaddr=%pM\n", + params->host_ethaddr, + params->device_ethaddr); + + net = alloc_etherdev(sizeof(struct ecm_ipa_dev)); + if (!net) { + result = -ENOMEM; + ECM_IPA_ERROR("fail to allocate etherdev\n"); + goto fail_alloc_etherdev; + } + ECM_IPA_DEBUG("network device was successfully allocated\n"); + + ecm_ipa_ctx = netdev_priv(net); + if (!ecm_ipa_ctx) { + ECM_IPA_ERROR("fail to extract netdev priv\n"); + result = -ENOMEM; + goto fail_netdev_priv; + } + memset(ecm_ipa_ctx, 0, sizeof(*ecm_ipa_ctx)); + ECM_IPA_DEBUG("ecm_ipa_ctx (private) = %pK\n", ecm_ipa_ctx); + + ecm_ipa_ctx->net = net; + ecm_ipa_ctx->outstanding_high = DEFAULT_OUTSTANDING_HIGH; + ecm_ipa_ctx->outstanding_low = DEFAULT_OUTSTANDING_LOW; + atomic_set(&ecm_ipa_ctx->outstanding_pkts, 0); + snprintf(net->name, sizeof(net->name), "%s%%d", "ecm"); + net->netdev_ops = &ecm_ipa_netdev_ops; + net->watchdog_timeo = TX_TIMEOUT; + ECM_IPA_DEBUG("internal data structures were initialized\n"); + + if (!params->device_ready_notify) + ECM_IPA_DEBUG("device_ready_notify() was not supplied"); + ecm_ipa_ctx->device_ready_notify = params->device_ready_notify; + + ecm_ipa_debugfs_init(ecm_ipa_ctx); + + result = ecm_ipa_set_device_ethernet_addr + (net->dev_addr, params->device_ethaddr); + if (result) { + ECM_IPA_ERROR("set device MAC failed\n"); + goto fail_set_device_ethernet; + } + ECM_IPA_DEBUG("Device Ethernet address set %pM\n", net->dev_addr); + + if (ipa_is_vlan_mode(IPA_VLAN_IF_ECM, &ecm_ipa_ctx->is_vlan_mode)) { + ECM_IPA_ERROR("couldn't acquire vlan mode, is ipa ready?\n"); + goto fail_get_vlan_mode; + } + ECM_IPA_DEBUG("is vlan mode %d\n", ecm_ipa_ctx->is_vlan_mode); + + result = ecm_ipa_rules_cfg + (ecm_ipa_ctx, params->host_ethaddr, params->device_ethaddr); + if (result) { + ECM_IPA_ERROR("fail on ipa rules set\n"); + goto fail_rules_cfg; + } + ECM_IPA_DEBUG("Ethernet header insertion set\n"); + + netif_carrier_off(net); + ECM_IPA_DEBUG("netif_carrier_off() was called\n"); + + netif_stop_queue(ecm_ipa_ctx->net); + ECM_IPA_DEBUG("netif_stop_queue() was called"); + + result = register_netdev(net); + if (result) { + ECM_IPA_ERROR("register_netdev failed: %d\n", result); + goto fail_register_netdev; + } + ECM_IPA_DEBUG("register_netdev succeeded\n"); + + params->ecm_ipa_rx_dp_notify = ecm_ipa_packet_receive_notify; + params->ecm_ipa_tx_dp_notify = ecm_ipa_tx_complete_notify; + params->private = (void *)ecm_ipa_ctx; + params->skip_ep_cfg = false; + ecm_ipa_ctx->state = ECM_IPA_INITIALIZED; + ECM_IPA_STATE_DEBUG(ecm_ipa_ctx); + + ECM_IPA_INFO("ECM_IPA was initialized successfully\n"); + + ECM_IPA_LOG_EXIT(); + + return 0; + +fail_register_netdev: + ecm_ipa_rules_destroy(ecm_ipa_ctx); +fail_rules_cfg: +fail_get_vlan_mode: +fail_set_device_ethernet: + ecm_ipa_debugfs_destroy(ecm_ipa_ctx); +fail_netdev_priv: + free_netdev(net); +fail_alloc_etherdev: + return result; +} +EXPORT_SYMBOL(ecm_ipa_init); + +/** + * ecm_ipa_connect() - notify ecm_ipa for IPA<->USB pipes connection + * @usb_to_ipa_hdl: handle of IPA driver client for USB->IPA + * @ipa_to_usb_hdl: handle of IPA driver client for IPA->USB + * @priv: same value that was set by ecm_ipa_init(), this + * parameter holds the network device pointer. + * + * Once USB driver finishes the pipe connection between IPA core + * and USB core this method shall be called in order to + * allow ecm_ipa complete the data path configurations. + * Caller should make sure that it is calling this function + * from a context that allows it to handle device_ready_notify(). + * Detailed description: + * - configure the IPA end-points register + * - notify the Linux kernel for "carrier_on" + * After this function is done the driver state changes to "Connected". + * This API is expected to be called after ecm_ipa_init() or + * after a call to ecm_ipa_disconnect. + */ +int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, void *priv) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = priv; + int next_state; + struct ipa_ecm_msg *ecm_msg; + struct ipa_msg_meta msg_meta; + int retval; + int ret; + + ECM_IPA_LOG_ENTRY(); + ret = 0; + NULL_CHECK(priv); + if (ret) + return ret; + ECM_IPA_DEBUG("usb_to_ipa_hdl = %d, ipa_to_usb_hdl = %d, priv=0x%pK\n", + usb_to_ipa_hdl, ipa_to_usb_hdl, priv); + + next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_CONNECT); + if (next_state == ECM_IPA_INVALID) { + ECM_IPA_ERROR("can't call connect before calling initialize\n"); + return -EPERM; + } + ecm_ipa_ctx->state = next_state; + ECM_IPA_STATE_DEBUG(ecm_ipa_ctx); + + if (!ipa_is_client_handle_valid(usb_to_ipa_hdl)) { + ECM_IPA_ERROR + ("usb_to_ipa_hdl(%d) is not a valid ipa handle\n", + usb_to_ipa_hdl); + return -EINVAL; + } + if (!ipa_is_client_handle_valid(ipa_to_usb_hdl)) { + ECM_IPA_ERROR + ("ipa_to_usb_hdl(%d) is not a valid ipa handle\n", + ipa_to_usb_hdl); + return -EINVAL; + } + + ecm_ipa_ctx->ipa_to_usb_hdl = ipa_to_usb_hdl; + ecm_ipa_ctx->usb_to_ipa_hdl = usb_to_ipa_hdl; + + ecm_ipa_ctx->ipa_to_usb_client = ipa_get_client_mapping(ipa_to_usb_hdl); + if (ecm_ipa_ctx->ipa_to_usb_client < 0) { + ECM_IPA_ERROR( + "Error getting IPA->USB client from handle %d\n", + ecm_ipa_ctx->ipa_to_usb_client); + return -EINVAL; + } + ECM_IPA_DEBUG("ipa_to_usb_client = %d\n", + ecm_ipa_ctx->ipa_to_usb_client); + + ecm_ipa_ctx->usb_to_ipa_client = ipa_get_client_mapping(usb_to_ipa_hdl); + if (ecm_ipa_ctx->usb_to_ipa_client < 0) { + ECM_IPA_ERROR( + "Error getting USB->IPA client from handle %d\n", + ecm_ipa_ctx->usb_to_ipa_client); + return -EINVAL; + } + ECM_IPA_DEBUG("usb_to_ipa_client = %d\n", + ecm_ipa_ctx->usb_to_ipa_client); + + if (ipa_pm_is_used()) { + retval = ecm_ipa_register_pm_client(ecm_ipa_ctx); + } else { + ecm_ipa_ctx->ipa_rm_resource_name_cons = + ipa_get_rm_resource_from_ep(ipa_to_usb_hdl); + if (ecm_ipa_ctx->ipa_rm_resource_name_cons < 0) { + ECM_IPA_ERROR( + "Error getting CONS RM resource from handle %d\n", + ecm_ipa_ctx->ipa_rm_resource_name_cons); + return -EINVAL; + } + ECM_IPA_DEBUG("ipa_rm_resource_name_cons = %d\n", + ecm_ipa_ctx->ipa_rm_resource_name_cons); + + ecm_ipa_ctx->ipa_rm_resource_name_prod = + ipa_get_rm_resource_from_ep(usb_to_ipa_hdl); + if (ecm_ipa_ctx->ipa_rm_resource_name_prod < 0) { + ECM_IPA_ERROR( + "Error getting PROD RM resource from handle %d\n", + ecm_ipa_ctx->ipa_rm_resource_name_prod); + return -EINVAL; + } + ECM_IPA_DEBUG("ipa_rm_resource_name_prod = %d\n", + ecm_ipa_ctx->ipa_rm_resource_name_prod); + + retval = ecm_ipa_create_rm_resource(ecm_ipa_ctx); + } + + if (retval) { + ECM_IPA_ERROR("fail on RM create\n"); + goto fail_create_rm; + } + ECM_IPA_DEBUG("RM resource was created\n"); + + retval = ecm_ipa_register_properties(ecm_ipa_ctx); + if (retval) { + ECM_IPA_ERROR("fail on properties set\n"); + goto fail_create_rm; + } + ECM_IPA_DEBUG("ecm_ipa 2 Tx and 2 Rx properties were registered\n"); + + retval = ecm_ipa_ep_registers_cfg(usb_to_ipa_hdl, ipa_to_usb_hdl, + ecm_ipa_ctx->is_vlan_mode); + if (retval) { + ECM_IPA_ERROR("fail on ep cfg\n"); + goto fail; + } + ECM_IPA_DEBUG("end-point configured\n"); + + netif_carrier_on(ecm_ipa_ctx->net); + + ecm_msg = kzalloc(sizeof(*ecm_msg), GFP_KERNEL); + if (!ecm_msg) { + retval = -ENOMEM; + goto fail; + } + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = ECM_CONNECT; + msg_meta.msg_len = sizeof(struct ipa_ecm_msg); + strlcpy(ecm_msg->name, ecm_ipa_ctx->net->name, + IPA_RESOURCE_NAME_MAX); + ecm_msg->ifindex = ecm_ipa_ctx->net->ifindex; + + retval = ipa_send_msg(&msg_meta, ecm_msg, ecm_ipa_msg_free_cb); + if (retval) { + ECM_IPA_ERROR("fail to send ECM_CONNECT message\n"); + kfree(ecm_msg); + goto fail; + } + + if (!netif_carrier_ok(ecm_ipa_ctx->net)) { + ECM_IPA_ERROR("netif_carrier_ok error\n"); + retval = -EBUSY; + goto fail; + } + ECM_IPA_DEBUG("carrier_on notified\n"); + + if (ecm_ipa_ctx->state == ECM_IPA_CONNECTED_AND_UP) + ecm_ipa_enable_data_path(ecm_ipa_ctx); + else + ECM_IPA_DEBUG("data path was not enabled yet\n"); + + ECM_IPA_INFO("ECM_IPA was connected successfully\n"); + + ECM_IPA_LOG_EXIT(); + + return 0; + +fail: + ecm_ipa_deregister_properties(); +fail_create_rm: + if (ipa_pm_is_used()) + ecm_ipa_deregister_pm_client(ecm_ipa_ctx); + else + ecm_ipa_destroy_rm_resource(ecm_ipa_ctx); + return retval; +} +EXPORT_SYMBOL(ecm_ipa_connect); + +/** + * ecm_ipa_open() - notify Linux network stack to start sending packets + * @net: the network interface supplied by the network stack + * + * Linux uses this API to notify the driver that the network interface + * transitions to the up state. + * The driver will instruct the Linux network stack to start + * delivering data packets. + */ +static int ecm_ipa_open(struct net_device *net) +{ + struct ecm_ipa_dev *ecm_ipa_ctx; + int next_state; + + ECM_IPA_LOG_ENTRY(); + + ecm_ipa_ctx = netdev_priv(net); + + next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_OPEN); + if (next_state == ECM_IPA_INVALID) { + ECM_IPA_ERROR("can't bring driver up before initialize\n"); + return -EPERM; + } + ecm_ipa_ctx->state = next_state; + ECM_IPA_STATE_DEBUG(ecm_ipa_ctx); + + if (ecm_ipa_ctx->state == ECM_IPA_CONNECTED_AND_UP) + ecm_ipa_enable_data_path(ecm_ipa_ctx); + else + ECM_IPA_DEBUG("data path was not enabled yet\n"); + + ECM_IPA_LOG_EXIT(); + + return 0; +} + +/** + * ecm_ipa_start_xmit() - send data from APPs to USB core via IPA core + * @skb: packet received from Linux network stack + * @net: the network device being used to send this packet + * + * Several conditions needed in order to send the packet to IPA: + * - Transmit queue for the network driver is currently + * in "send" state + * - The driver internal state is in "UP" state. + * - Filter Tx switch is turned off + * - The IPA resource manager state for the driver producer client + * is "Granted" which implies that all the resources in the dependency + * graph are valid for data flow. + * - outstanding high boundary did not reach. + * + * In case all of the above conditions are met, the network driver will + * send the packet by using the IPA API for Tx. + * In case the outstanding packet high boundary is reached, the driver will + * stop the send queue until enough packet were proceeded by the IPA core. + */ +static netdev_tx_t ecm_ipa_start_xmit + (struct sk_buff *skb, struct net_device *net) +{ + int ret; + netdev_tx_t status = NETDEV_TX_BUSY; + struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net); + + netif_trans_update(net); + + ECM_IPA_DEBUG_XMIT + ("Tx, len=%d, skb->protocol=%d, outstanding=%d\n", + skb->len, skb->protocol, + atomic_read(&ecm_ipa_ctx->outstanding_pkts)); + + if (unlikely(netif_queue_stopped(net))) { + ECM_IPA_ERROR("interface queue is stopped\n"); + goto out; + } + + if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) { + ECM_IPA_ERROR("Missing pipe connected and/or iface up\n"); + return NETDEV_TX_BUSY; + } + + ret = resource_request(ecm_ipa_ctx); + if (ret) { + ECM_IPA_DEBUG("Waiting to resource\n"); + netif_stop_queue(net); + goto resource_busy; + } + + if (atomic_read(&ecm_ipa_ctx->outstanding_pkts) >= + ecm_ipa_ctx->outstanding_high) { + ECM_IPA_DEBUG + ("outstanding high (%d)- stopping\n", + ecm_ipa_ctx->outstanding_high); + netif_stop_queue(net); + status = NETDEV_TX_BUSY; + goto out; + } + + if (ecm_ipa_ctx->is_vlan_mode) + if (unlikely(skb->protocol != htons(ETH_P_8021Q))) + ECM_IPA_DEBUG( + "ether_type != ETH_P_8021Q && vlan, prot = 0x%X\n" + , skb->protocol); + + ret = ipa_tx_dp(ecm_ipa_ctx->ipa_to_usb_client, skb, NULL); + if (ret) { + ECM_IPA_ERROR("ipa transmit failed (%d)\n", ret); + goto fail_tx_packet; + } + + atomic_inc(&ecm_ipa_ctx->outstanding_pkts); + + status = NETDEV_TX_OK; + goto out; + +fail_tx_packet: +out: + resource_release(ecm_ipa_ctx); +resource_busy: + return status; +} + +/** + * ecm_ipa_packet_receive_notify() - Rx notify + * + * @priv: ecm driver context + * @evt: event type + * @data: data provided with event + * + * IPA will pass a packet to the Linux network stack with skb->data pointing + * to Ethernet packet frame. + */ +static void ecm_ipa_packet_receive_notify + (void *priv, enum ipa_dp_evt_type evt, unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + struct ecm_ipa_dev *ecm_ipa_ctx = priv; + int result; + unsigned int packet_len; + + if (!skb) { + ECM_IPA_ERROR("Bad SKB received from IPA driver\n"); + return; + } + + packet_len = skb->len; + ECM_IPA_DEBUG("packet RX, len=%d\n", skb->len); + + if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) { + ECM_IPA_DEBUG("Missing pipe connected and/or iface up\n"); + return; + } + + if (evt != IPA_RECEIVE) { + ECM_IPA_ERROR("A none IPA_RECEIVE event in ecm_ipa_receive\n"); + return; + } + + skb->dev = ecm_ipa_ctx->net; + skb->protocol = eth_type_trans(skb, ecm_ipa_ctx->net); + + result = netif_rx(skb); + if (result) + ECM_IPA_ERROR("fail on netif_rx\n"); + ecm_ipa_ctx->net->stats.rx_packets++; + ecm_ipa_ctx->net->stats.rx_bytes += packet_len; +} + +/** ecm_ipa_stop() - called when network device transitions to the down + * state. + * @net: the network device being stopped. + * + * This API is used by Linux network stack to notify the network driver that + * its state was changed to "down" + * The driver will stop the "send" queue and change its internal + * state to "Connected". + */ +static int ecm_ipa_stop(struct net_device *net) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net); + int next_state; + + ECM_IPA_LOG_ENTRY(); + + next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_STOP); + if (next_state == ECM_IPA_INVALID) { + ECM_IPA_ERROR("can't do network interface down without up\n"); + return -EPERM; + } + ecm_ipa_ctx->state = next_state; + ECM_IPA_STATE_DEBUG(ecm_ipa_ctx); + + netif_stop_queue(net); + ECM_IPA_DEBUG("network device stopped\n"); + + ECM_IPA_LOG_EXIT(); + return 0; +} + +/** ecm_ipa_disconnect() - called when the USB cable is unplugged. + * @priv: same value that was set by ecm_ipa_init(), this + * parameter holds the network device pointer. + * + * Once the USB cable is unplugged the USB driver will notify the network + * interface driver. + * The internal driver state will returned to its initialized state and + * Linux network stack will be informed for carrier off and the send queue + * will be stopped. + */ +int ecm_ipa_disconnect(void *priv) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = priv; + int next_state; + struct ipa_ecm_msg *ecm_msg; + struct ipa_msg_meta msg_meta; + int retval; + int outstanding_dropped_pkts; + int ret; + + ECM_IPA_LOG_ENTRY(); + ret = 0; + NULL_CHECK(ecm_ipa_ctx); + if (ret) + return ret; + ECM_IPA_DEBUG("priv=0x%pK\n", priv); + + next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_DISCONNECT); + if (next_state == ECM_IPA_INVALID) { + ECM_IPA_ERROR("can't disconnect before connect\n"); + return -EPERM; + } + ecm_ipa_ctx->state = next_state; + ECM_IPA_STATE_DEBUG(ecm_ipa_ctx); + + netif_carrier_off(ecm_ipa_ctx->net); + ECM_IPA_DEBUG("carrier_off notifcation was sent\n"); + + ecm_msg = kzalloc(sizeof(*ecm_msg), GFP_KERNEL); + if (!ecm_msg) + return -ENOMEM; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = ECM_DISCONNECT; + msg_meta.msg_len = sizeof(struct ipa_ecm_msg); + strlcpy(ecm_msg->name, ecm_ipa_ctx->net->name, + IPA_RESOURCE_NAME_MAX); + ecm_msg->ifindex = ecm_ipa_ctx->net->ifindex; + + retval = ipa_send_msg(&msg_meta, ecm_msg, ecm_ipa_msg_free_cb); + if (retval) { + ECM_IPA_ERROR("fail to send ECM_DISCONNECT message\n"); + kfree(ecm_msg); + return -EPERM; + } + + netif_stop_queue(ecm_ipa_ctx->net); + ECM_IPA_DEBUG("queue stopped\n"); + + if (ipa_pm_is_used()) + ecm_ipa_deregister_pm_client(ecm_ipa_ctx); + else + ecm_ipa_destroy_rm_resource(ecm_ipa_ctx); + + outstanding_dropped_pkts = + atomic_read(&ecm_ipa_ctx->outstanding_pkts); + ecm_ipa_ctx->net->stats.tx_errors += outstanding_dropped_pkts; + atomic_set(&ecm_ipa_ctx->outstanding_pkts, 0); + + ECM_IPA_INFO("ECM_IPA was disconnected successfully\n"); + + ECM_IPA_LOG_EXIT(); + + return 0; +} +EXPORT_SYMBOL(ecm_ipa_disconnect); + +/** + * ecm_ipa_cleanup() - unregister the network interface driver and free + * internal data structs. + * @priv: same value that was set by ecm_ipa_init(), this + * parameter holds the network device pointer. + * + * This function shall be called once the network interface is not + * needed anymore, e.g: when the USB composition does not support ECM. + * This function shall be called after the pipes were disconnected. + * Detailed description: + * - delete the driver dependency defined for IPA resource manager and + * destroy the producer resource. + * - remove the debugfs entries + * - deregister the network interface from Linux network stack + * - free all internal data structs + */ +void ecm_ipa_cleanup(void *priv) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = priv; + int next_state; + + ECM_IPA_LOG_ENTRY(); + + ECM_IPA_DEBUG("priv=0x%pK\n", priv); + + if (!ecm_ipa_ctx) { + ECM_IPA_ERROR("ecm_ipa_ctx NULL pointer\n"); + return; + } + + next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_CLEANUP); + if (next_state == ECM_IPA_INVALID) { + ECM_IPA_ERROR("can't clean driver without cable disconnect\n"); + return; + } + ecm_ipa_ctx->state = next_state; + ECM_IPA_STATE_DEBUG(ecm_ipa_ctx); + + ecm_ipa_rules_destroy(ecm_ipa_ctx); + ecm_ipa_debugfs_destroy(ecm_ipa_ctx); + + unregister_netdev(ecm_ipa_ctx->net); + free_netdev(ecm_ipa_ctx->net); + + ECM_IPA_INFO("ECM_IPA was destroyed successfully\n"); + + ECM_IPA_LOG_EXIT(); +} +EXPORT_SYMBOL(ecm_ipa_cleanup); + +static void ecm_ipa_enable_data_path(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + if (ecm_ipa_ctx->device_ready_notify) { + ecm_ipa_ctx->device_ready_notify(); + ECM_IPA_DEBUG("USB device_ready_notify() was called\n"); + } else { + ECM_IPA_DEBUG("device_ready_notify() not supplied\n"); + } + + netif_start_queue(ecm_ipa_ctx->net); + ECM_IPA_DEBUG("queue started\n"); +} + +static void ecm_ipa_prepare_header_insertion( + int eth_type, + const char *hdr_name, struct ipa_hdr_add *add_hdr, + const void *dst_mac, const void *src_mac, bool is_vlan_mode) +{ + struct ethhdr *eth_hdr; + struct vlan_ethhdr *eth_vlan_hdr; + + ECM_IPA_LOG_ENTRY(); + + add_hdr->is_partial = 0; + strlcpy(add_hdr->name, hdr_name, IPA_RESOURCE_NAME_MAX); + add_hdr->is_eth2_ofst_valid = true; + add_hdr->eth2_ofst = 0; + + if (is_vlan_mode) { + eth_vlan_hdr = (struct vlan_ethhdr *)add_hdr->hdr; + memcpy(eth_vlan_hdr->h_dest, dst_mac, ETH_ALEN); + memcpy(eth_vlan_hdr->h_source, src_mac, ETH_ALEN); + eth_vlan_hdr->h_vlan_encapsulated_proto = + htons(eth_type); + eth_vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); + add_hdr->hdr_len = VLAN_ETH_HLEN; + add_hdr->type = IPA_HDR_L2_802_1Q; + } else { + eth_hdr = (struct ethhdr *)add_hdr->hdr; + memcpy(eth_hdr->h_dest, dst_mac, ETH_ALEN); + memcpy(eth_hdr->h_source, src_mac, ETH_ALEN); + eth_hdr->h_proto = htons(eth_type); + add_hdr->hdr_len = ETH_HLEN; + add_hdr->type = IPA_HDR_L2_ETHERNET_II; + } + ECM_IPA_LOG_EXIT(); +} + +/** + * ecm_ipa_rules_cfg() - set header insertion and register Tx/Rx properties + * Headers will be committed to HW + * @ecm_ipa_ctx: main driver context parameters + * @dst_mac: destination MAC address + * @src_mac: source MAC address + * + * Returns negative errno, or zero on success + */ +static int ecm_ipa_rules_cfg + (struct ecm_ipa_dev *ecm_ipa_ctx, + const void *dst_mac, const void *src_mac) +{ + struct ipa_ioc_add_hdr *hdrs; + struct ipa_hdr_add *ipv4_hdr; + struct ipa_hdr_add *ipv6_hdr; + int result = 0; + + ECM_IPA_LOG_ENTRY(); + hdrs = kzalloc + (sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr), + GFP_KERNEL); + if (!hdrs) { + result = -ENOMEM; + goto out; + } + + ipv4_hdr = &hdrs->hdr[0]; + ecm_ipa_prepare_header_insertion( + ETH_P_IP, ECM_IPA_IPV4_HDR_NAME, + ipv4_hdr, dst_mac, src_mac, ecm_ipa_ctx->is_vlan_mode); + + ipv6_hdr = &hdrs->hdr[1]; + ecm_ipa_prepare_header_insertion( + ETH_P_IPV6, ECM_IPA_IPV6_HDR_NAME, + ipv6_hdr, dst_mac, src_mac, ecm_ipa_ctx->is_vlan_mode); + + hdrs->commit = 1; + hdrs->num_hdrs = 2; + result = ipa_add_hdr(hdrs); + if (result) { + ECM_IPA_ERROR("Fail on Header-Insertion(%d)\n", result); + goto out_free_mem; + } + if (ipv4_hdr->status) { + ECM_IPA_ERROR + ("Fail on Header-Insertion ipv4(%d)\n", + ipv4_hdr->status); + result = ipv4_hdr->status; + goto out_free_mem; + } + if (ipv6_hdr->status) { + ECM_IPA_ERROR + ("Fail on Header-Insertion ipv6(%d)\n", + ipv6_hdr->status); + result = ipv6_hdr->status; + goto out_free_mem; + } + ecm_ipa_ctx->eth_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl; + ecm_ipa_ctx->eth_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl; + ECM_IPA_LOG_EXIT(); +out_free_mem: + kfree(hdrs); +out: + return result; +} + +/** + * ecm_ipa_rules_destroy() - remove the IPA core configuration done for + * the driver data path. + * @ecm_ipa_ctx: the driver context + * + * Revert the work done on ecm_ipa_rules_cfg. + */ +static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + struct ipa_ioc_del_hdr *del_hdr; + struct ipa_hdr_del *ipv4; + struct ipa_hdr_del *ipv6; + int result; + + del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) + + sizeof(*ipv6), GFP_KERNEL); + if (!del_hdr) + return; + del_hdr->commit = 1; + del_hdr->num_hdls = 2; + ipv4 = &del_hdr->hdl[0]; + ipv4->hdl = ecm_ipa_ctx->eth_ipv4_hdr_hdl; + ipv6 = &del_hdr->hdl[1]; + ipv6->hdl = ecm_ipa_ctx->eth_ipv6_hdr_hdl; + result = ipa_del_hdr(del_hdr); + if (result || ipv4->status || ipv6->status) + ECM_IPA_ERROR("ipa_del_hdr failed\n"); + kfree(del_hdr); +} + +/* ecm_ipa_register_properties() - set Tx/Rx properties for ipacm + * + * Register ecm0 interface with 2 Tx properties and 2 Rx properties: + * The 2 Tx properties are for data flowing from IPA to USB, they + * have Header-Insertion properties both for Ipv4 and Ipv6 Ethernet framing. + * The 2 Rx properties are for data flowing from USB to IPA, they have + * simple rule which always "hit". + * + */ +static int ecm_ipa_register_properties(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + struct ipa_tx_intf tx_properties = {0}; + struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} }; + struct ipa_ioc_tx_intf_prop *ipv4_property; + struct ipa_ioc_tx_intf_prop *ipv6_property; + struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} }; + struct ipa_rx_intf rx_properties = {0}; + struct ipa_ioc_rx_intf_prop *rx_ipv4_property; + struct ipa_ioc_rx_intf_prop *rx_ipv6_property; + enum ipa_hdr_l2_type hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + int result = 0; + + ECM_IPA_LOG_ENTRY(); + + if (ecm_ipa_ctx->is_vlan_mode) + hdr_l2_type = IPA_HDR_L2_802_1Q; + + tx_properties.prop = properties; + ipv4_property = &tx_properties.prop[0]; + ipv4_property->ip = IPA_IP_v4; + ipv4_property->dst_pipe = ecm_ipa_ctx->ipa_to_usb_client; + strlcpy + (ipv4_property->hdr_name, ECM_IPA_IPV4_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + ipv4_property->hdr_l2_type = hdr_l2_type; + ipv6_property = &tx_properties.prop[1]; + ipv6_property->ip = IPA_IP_v6; + ipv6_property->dst_pipe = ecm_ipa_ctx->ipa_to_usb_client; + ipv6_property->hdr_l2_type = hdr_l2_type; + strlcpy + (ipv6_property->hdr_name, ECM_IPA_IPV6_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + tx_properties.num_props = 2; + + rx_properties.prop = rx_ioc_properties; + rx_ipv4_property = &rx_properties.prop[0]; + rx_ipv4_property->ip = IPA_IP_v4; + rx_ipv4_property->attrib.attrib_mask = 0; + rx_ipv4_property->src_pipe = ecm_ipa_ctx->usb_to_ipa_client; + rx_ipv4_property->hdr_l2_type = hdr_l2_type; + rx_ipv6_property = &rx_properties.prop[1]; + rx_ipv6_property->ip = IPA_IP_v6; + rx_ipv6_property->attrib.attrib_mask = 0; + rx_ipv6_property->src_pipe = ecm_ipa_ctx->usb_to_ipa_client; + rx_ipv6_property->hdr_l2_type = hdr_l2_type; + rx_properties.num_props = 2; + + result = ipa_register_intf("ecm0", &tx_properties, &rx_properties); + if (result) + ECM_IPA_ERROR("fail on Tx/Rx properties registration\n"); + + ECM_IPA_LOG_EXIT(); + + return result; +} + +static void ecm_ipa_deregister_properties(void) +{ + int result; + + ECM_IPA_LOG_ENTRY(); + result = ipa_deregister_intf("ecm0"); + if (result) + ECM_IPA_DEBUG("Fail on Tx prop deregister\n"); + ECM_IPA_LOG_EXIT(); +} + +/** + * ecm_ipa_configure() - make IPA core end-point specific configuration + * @usb_to_ipa_hdl: handle of usb_to_ipa end-point for IPA driver + * @ipa_to_usb_hdl: handle of ipa_to_usb end-point for IPA driver + * @host_ethaddr: host Ethernet address in network order + * @device_ethaddr: device Ethernet address in network order + * + * Configure the usb_to_ipa and ipa_to_usb end-point registers + * - USB->IPA end-point: disable de-aggregation, enable link layer + * header removal (Ethernet removal), source NATing and default routing. + * - IPA->USB end-point: disable aggregation, add link layer header (Ethernet) + * - allocate Ethernet device + * - register to Linux network stack + * + * Returns negative errno, or zero on success + */ + +static void ecm_ipa_rm_notify + (void *user_data, enum ipa_rm_event event, unsigned long data) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = user_data; + + ECM_IPA_LOG_ENTRY(); + if + (event == IPA_RM_RESOURCE_GRANTED && + netif_queue_stopped(ecm_ipa_ctx->net)) { + ECM_IPA_DEBUG("Resource Granted - starting queue\n"); + netif_start_queue(ecm_ipa_ctx->net); + } else { + ECM_IPA_DEBUG("Resource released\n"); + } + ECM_IPA_LOG_EXIT(); +} + +static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net) +{ + return &net->stats; +} + +static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + struct ipa_rm_create_params create_params = {0}; + struct ipa_rm_perf_profile profile; + int result; + + ECM_IPA_LOG_ENTRY(); + create_params.name = IPA_RM_RESOURCE_STD_ECM_PROD; + create_params.reg_params.user_data = ecm_ipa_ctx; + create_params.reg_params.notify_cb = ecm_ipa_rm_notify; + result = ipa_rm_create_resource(&create_params); + if (result) { + ECM_IPA_ERROR("Fail on ipa_rm_create_resource\n"); + goto fail_rm_create; + } + ECM_IPA_DEBUG("rm client was created"); + + profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; + ipa_rm_set_perf_profile(IPA_RM_RESOURCE_STD_ECM_PROD, &profile); + + result = ipa_rm_inactivity_timer_init + (IPA_RM_RESOURCE_STD_ECM_PROD, + INACTIVITY_MSEC_DELAY); + if (result) { + ECM_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n"); + goto fail_it; + } + ECM_IPA_DEBUG("rm_it client was created"); + + result = ipa_rm_add_dependency_sync + (IPA_RM_RESOURCE_STD_ECM_PROD, + ecm_ipa_ctx->ipa_rm_resource_name_cons); + if (result && result != -EINPROGRESS) + ECM_IPA_ERROR + ("unable to add ECM/USB dependency (%d)\n", result); + + result = ipa_rm_add_dependency_sync + (ecm_ipa_ctx->ipa_rm_resource_name_prod, + IPA_RM_RESOURCE_APPS_CONS); + if (result && result != -EINPROGRESS) + ECM_IPA_ERROR + ("unable to add USB/APPS dependency (%d)\n", result); + + ECM_IPA_DEBUG("rm dependency was set\n"); + + ECM_IPA_LOG_EXIT(); + return 0; + +fail_it: +fail_rm_create: + return result; +} + +static void ecm_ipa_destroy_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + int result; + + ECM_IPA_LOG_ENTRY(); + + ipa_rm_delete_dependency(IPA_RM_RESOURCE_STD_ECM_PROD, + ecm_ipa_ctx->ipa_rm_resource_name_cons); + ipa_rm_delete_dependency(ecm_ipa_ctx->ipa_rm_resource_name_prod, + IPA_RM_RESOURCE_APPS_CONS); + ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_STD_ECM_PROD); + result = ipa_rm_delete_resource(IPA_RM_RESOURCE_STD_ECM_PROD); + if (result) + ECM_IPA_ERROR("resource deletion failed\n"); + + ECM_IPA_LOG_EXIT(); +} + +static void ecm_ipa_pm_cb(void *p, enum ipa_pm_cb_event event) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = p; + + ECM_IPA_LOG_ENTRY(); + if (event != IPA_PM_CLIENT_ACTIVATED) { + ECM_IPA_ERROR("unexpected event %d\n", event); + WARN_ON(1); + return; + } + + if (netif_queue_stopped(ecm_ipa_ctx->net)) { + ECM_IPA_DEBUG("Resource Granted - starting queue\n"); + netif_start_queue(ecm_ipa_ctx->net); + } + ECM_IPA_LOG_EXIT(); +} + +static int ecm_ipa_register_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + int result; + struct ipa_pm_register_params pm_reg; + + memset(&pm_reg, 0, sizeof(pm_reg)); + pm_reg.name = ecm_ipa_ctx->net->name; + pm_reg.user_data = ecm_ipa_ctx; + pm_reg.callback = ecm_ipa_pm_cb; + pm_reg.group = IPA_PM_GROUP_APPS; + result = ipa_pm_register(&pm_reg, &ecm_ipa_ctx->pm_hdl); + if (result) { + ECM_IPA_ERROR("failed to create IPA PM client %d\n", result); + return result; + } + return 0; +} + +static void ecm_ipa_deregister_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + ipa_pm_deactivate_sync(ecm_ipa_ctx->pm_hdl); + ipa_pm_deregister(ecm_ipa_ctx->pm_hdl); + ecm_ipa_ctx->pm_hdl = ~0; +} + +static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + if (ipa_pm_is_used()) + return ipa_pm_activate(ecm_ipa_ctx->pm_hdl); + + return ipa_rm_inactivity_timer_request_resource( + IPA_RM_RESOURCE_STD_ECM_PROD); +} + +static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + if (ipa_pm_is_used()) + ipa_pm_deferred_deactivate(ecm_ipa_ctx->pm_hdl); + else + ipa_rm_inactivity_timer_release_resource( + IPA_RM_RESOURCE_STD_ECM_PROD); +} + +/** + * ecm_ipa_tx_complete_notify() - Rx notify + * + * @priv: ecm driver context + * @evt: event type + * @data: data provided with event + * + * Check that the packet is the one we sent and release it + * This function will be called in defered context in IPA wq. + */ +static void ecm_ipa_tx_complete_notify + (void *priv, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + struct ecm_ipa_dev *ecm_ipa_ctx = priv; + + if (!skb) { + ECM_IPA_ERROR("Bad SKB received from IPA driver\n"); + return; + } + + if (!ecm_ipa_ctx) { + ECM_IPA_ERROR("ecm_ipa_ctx is NULL pointer\n"); + return; + } + + ECM_IPA_DEBUG + ("Tx-complete, len=%d, skb->prot=%d, outstanding=%d\n", + skb->len, skb->protocol, + atomic_read(&ecm_ipa_ctx->outstanding_pkts)); + + if (evt != IPA_WRITE_DONE) { + ECM_IPA_ERROR("unsupported event on Tx callback\n"); + return; + } + + if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) { + ECM_IPA_DEBUG + ("dropping Tx-complete pkt, state=%s", + ecm_ipa_state_string(ecm_ipa_ctx->state)); + goto out; + } + + ecm_ipa_ctx->net->stats.tx_packets++; + ecm_ipa_ctx->net->stats.tx_bytes += skb->len; + + if (atomic_read(&ecm_ipa_ctx->outstanding_pkts) > 0) + atomic_dec(&ecm_ipa_ctx->outstanding_pkts); + + if + (netif_queue_stopped(ecm_ipa_ctx->net) && + netif_carrier_ok(ecm_ipa_ctx->net) && + atomic_read(&ecm_ipa_ctx->outstanding_pkts) + < (ecm_ipa_ctx->outstanding_low)) { + ECM_IPA_DEBUG + ("outstanding low (%d) - waking up queue\n", + ecm_ipa_ctx->outstanding_low); + netif_wake_queue(ecm_ipa_ctx->net); + } + +out: + dev_kfree_skb_any(skb); +} + +static void ecm_ipa_tx_timeout(struct net_device *net) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net); + + ECM_IPA_ERROR + ("possible IPA stall was detected, %d outstanding", + atomic_read(&ecm_ipa_ctx->outstanding_pkts)); + + net->stats.tx_errors++; +} + +static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = inode->i_private; + + ECM_IPA_LOG_ENTRY(); + file->private_data = &ecm_ipa_ctx->outstanding_pkts; + ECM_IPA_LOG_EXIT(); + return 0; +} + +static ssize_t ecm_ipa_debugfs_atomic_read + (struct file *file, char __user *ubuf, size_t count, loff_t *ppos) +{ + int nbytes; + u8 atomic_str[DEBUGFS_TEMP_BUF_SIZE] = {0}; + atomic_t *atomic_var = file->private_data; + + nbytes = scnprintf + (atomic_str, sizeof(atomic_str), "%d\n", + atomic_read(atomic_var)); + return simple_read_from_buffer(ubuf, count, ppos, atomic_str, nbytes); +} + +#ifdef CONFIG_DEBUG_FS + +static void ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + const mode_t flags_read_write = 0666; + const mode_t flags_read_only = 0444; + struct dentry *file; + + ECM_IPA_LOG_ENTRY(); + + if (!ecm_ipa_ctx) + return; + + ecm_ipa_ctx->directory = debugfs_create_dir("ecm_ipa", NULL); + if (!ecm_ipa_ctx->directory) { + ECM_IPA_ERROR("could not create debugfs directory entry\n"); + goto fail_directory; + } + file = debugfs_create_u8 + ("outstanding_high", flags_read_write, + ecm_ipa_ctx->directory, &ecm_ipa_ctx->outstanding_high); + if (!file) { + ECM_IPA_ERROR("could not create outstanding_high file\n"); + goto fail_file; + } + file = debugfs_create_u8 + ("outstanding_low", flags_read_write, + ecm_ipa_ctx->directory, &ecm_ipa_ctx->outstanding_low); + if (!file) { + ECM_IPA_ERROR("could not create outstanding_low file\n"); + goto fail_file; + } + file = debugfs_create_file + ("outstanding", flags_read_only, + ecm_ipa_ctx->directory, + ecm_ipa_ctx, &ecm_ipa_debugfs_atomic_ops); + if (!file) { + ECM_IPA_ERROR("could not create outstanding file\n"); + goto fail_file; + } + + file = debugfs_create_bool("is_vlan_mode", flags_read_only, + ecm_ipa_ctx->directory, &ecm_ipa_ctx->is_vlan_mode); + if (!file) { + ECM_IPA_ERROR("could not create is_vlan_mode file\n"); + goto fail_file; + } + + ECM_IPA_DEBUG("debugfs entries were created\n"); + ECM_IPA_LOG_EXIT(); + + return; +fail_file: + debugfs_remove_recursive(ecm_ipa_ctx->directory); +fail_directory: + return; +} + +static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + debugfs_remove_recursive(ecm_ipa_ctx->directory); +} + +#else /* !CONFIG_DEBUG_FS*/ + +static void ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx) {} + +static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx) {} + +#endif /* CONFIG_DEBUG_FS */ + +/** + * ecm_ipa_ep_cfg() - configure the USB endpoints for ECM + * + * @usb_to_ipa_hdl: handle received from ipa_connect + * @ipa_to_usb_hdl: handle received from ipa_connect + * @is_vlan_mode - should driver work in vlan mode? + * + * USB to IPA pipe: + * - No de-aggregation + * - Remove Ethernet header + * - SRC NAT + * - Default routing(0) + * IPA to USB Pipe: + * - No aggregation + * - Add Ethernet header + */ +static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, + bool is_vlan_mode) +{ + int result = 0; + struct ipa_ep_cfg usb_to_ipa_ep_cfg; + struct ipa_ep_cfg ipa_to_usb_ep_cfg; + uint8_t hdr_add = 0; + + + ECM_IPA_LOG_ENTRY(); + if (is_vlan_mode) + hdr_add = VLAN_HLEN; + memset(&usb_to_ipa_ep_cfg, 0, sizeof(struct ipa_ep_cfg)); + usb_to_ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR; + usb_to_ipa_ep_cfg.hdr.hdr_len = ETH_HLEN + hdr_add; + usb_to_ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT; + usb_to_ipa_ep_cfg.route.rt_tbl_hdl = 0; + usb_to_ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS; + usb_to_ipa_ep_cfg.mode.mode = IPA_BASIC; + + /* enable hdr_metadata_reg_valid */ + usb_to_ipa_ep_cfg.hdr.hdr_metadata_reg_valid = true; + + result = ipa_cfg_ep(usb_to_ipa_hdl, &usb_to_ipa_ep_cfg); + if (result) { + ECM_IPA_ERROR("failed to configure USB to IPA point\n"); + goto out; + } + memset(&ipa_to_usb_ep_cfg, 0, sizeof(struct ipa_ep_cfg)); + ipa_to_usb_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR; + ipa_to_usb_ep_cfg.hdr.hdr_len = ETH_HLEN + hdr_add; + ipa_to_usb_ep_cfg.nat.nat_en = IPA_BYPASS_NAT; + result = ipa_cfg_ep(ipa_to_usb_hdl, &ipa_to_usb_ep_cfg); + if (result) { + ECM_IPA_ERROR("failed to configure IPA to USB end-point\n"); + goto out; + } + ECM_IPA_DEBUG("end-point registers successfully configured\n"); +out: + ECM_IPA_LOG_EXIT(); + return result; +} + +/** + * ecm_ipa_set_device_ethernet_addr() - set device etherenet address + * @dev_ethaddr: device etherenet address + * + * Returns 0 for success, negative otherwise + */ +static int ecm_ipa_set_device_ethernet_addr + (u8 *dev_ethaddr, u8 device_ethaddr[]) +{ + if (!is_valid_ether_addr(device_ethaddr)) + return -EINVAL; + memcpy(dev_ethaddr, device_ethaddr, ETH_ALEN); + ECM_IPA_DEBUG("device ethernet address: %pM\n", dev_ethaddr); + return 0; +} + +/** ecm_ipa_next_state - return the next state of the driver + * @current_state: the current state of the driver + * @operation: an enum which represent the operation being made on the driver + * by its API. + * + * This function implements the driver internal state machine. + * Its decisions are based on the driver current state and the operation + * being made. + * In case the operation is invalid this state machine will return + * the value ECM_IPA_INVALID to inform the caller for a forbidden sequence. + */ +static enum ecm_ipa_state ecm_ipa_next_state + (enum ecm_ipa_state current_state, enum ecm_ipa_operation operation) +{ + int next_state = ECM_IPA_INVALID; + + switch (current_state) { + case ECM_IPA_UNLOADED: + if (operation == ECM_IPA_INITIALIZE) + next_state = ECM_IPA_INITIALIZED; + break; + case ECM_IPA_INITIALIZED: + if (operation == ECM_IPA_CONNECT) + next_state = ECM_IPA_CONNECTED; + else if (operation == ECM_IPA_OPEN) + next_state = ECM_IPA_UP; + else if (operation == ECM_IPA_CLEANUP) + next_state = ECM_IPA_UNLOADED; + break; + case ECM_IPA_CONNECTED: + if (operation == ECM_IPA_DISCONNECT) + next_state = ECM_IPA_INITIALIZED; + else if (operation == ECM_IPA_OPEN) + next_state = ECM_IPA_CONNECTED_AND_UP; + break; + case ECM_IPA_UP: + if (operation == ECM_IPA_STOP) + next_state = ECM_IPA_INITIALIZED; + else if (operation == ECM_IPA_CONNECT) + next_state = ECM_IPA_CONNECTED_AND_UP; + else if (operation == ECM_IPA_CLEANUP) + next_state = ECM_IPA_UNLOADED; + break; + case ECM_IPA_CONNECTED_AND_UP: + if (operation == ECM_IPA_STOP) + next_state = ECM_IPA_CONNECTED; + else if (operation == ECM_IPA_DISCONNECT) + next_state = ECM_IPA_UP; + break; + default: + ECM_IPA_ERROR("State is not supported\n"); + break; + } + + ECM_IPA_DEBUG + ("state transition ( %s -> %s )- %s\n", + ecm_ipa_state_string(current_state), + ecm_ipa_state_string(next_state), + next_state == ECM_IPA_INVALID ? "Forbidden" : "Allowed"); + + return next_state; +} + +/** + * ecm_ipa_state_string - return the state string representation + * @state: enum which describe the state + */ +static const char *ecm_ipa_state_string(enum ecm_ipa_state state) +{ + switch (state) { + case ECM_IPA_UNLOADED: + return "ECM_IPA_UNLOADED"; + case ECM_IPA_INITIALIZED: + return "ECM_IPA_INITIALIZED"; + case ECM_IPA_CONNECTED: + return "ECM_IPA_CONNECTED"; + case ECM_IPA_UP: + return "ECM_IPA_UP"; + case ECM_IPA_CONNECTED_AND_UP: + return "ECM_IPA_CONNECTED_AND_UP"; + default: + return "Not supported"; + } +} + +/** + * ecm_ipa_init_module() - module initialization + * + */ +static int ecm_ipa_init_module(void) +{ + ECM_IPA_LOG_ENTRY(); + ipa_ecm_logbuf = ipc_log_context_create(IPA_ECM_IPC_LOG_PAGES, + "ipa_ecm", 0); + if (ipa_ecm_logbuf == NULL) + ECM_IPA_DEBUG("failed to create IPC log, continue...\n"); + ECM_IPA_LOG_EXIT(); + return 0; +} + +/** + * ecm_ipa_cleanup_module() - module cleanup + * + */ +static void ecm_ipa_cleanup_module(void) +{ + ECM_IPA_LOG_ENTRY(); + if (ipa_ecm_logbuf) + ipc_log_context_destroy(ipa_ecm_logbuf); + ipa_ecm_logbuf = NULL; + ECM_IPA_LOG_EXIT(); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ECM IPA network interface"); + +late_initcall(ecm_ipa_init_module); +module_exit(ecm_ipa_cleanup_module); diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c new file mode 100644 index 000000000000..4c07391297e4 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c @@ -0,0 +1,1211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ipa_common_i.h" +#ifdef CONFIG_IPA3 +#include "../ipa_v3/ipa_pm.h" +#endif + +#define IPA_GSB_DRV_NAME "ipa_gsb" + +#define MAX_SUPPORTED_IFACE 5 + +#define IPA_GSB_DBG(fmt, args...) \ + do { \ + pr_debug(IPA_GSB_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_GSB_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_GSB_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_GSB_ERR(fmt, args...) \ + do { \ + pr_err(IPA_GSB_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_GSB_MAX_MSG_LEN 512 +static char dbg_buff[IPA_GSB_MAX_MSG_LEN]; + +#define IPA_GSB_SKB_HEADROOM 256 +#define IPA_GSB_SKB_DUMMY_HEADER 42 +#define IPA_GSB_AGGR_BYTE_LIMIT 14 +#define IPA_GSB_AGGR_TIME_LIMIT 1 + +static struct dentry *dent; +static struct dentry *dfile_stats; + +/** + * struct stats - driver statistics, + * @num_ul_packets: number of uplink packets + * @num_dl_packets: number of downlink packets + * @num_insufficient_headroom_packets: number of + packets with insufficient headroom + */ +struct stats { + u64 num_ul_packets; + u64 num_dl_packets; + u64 num_insufficient_headroom_packets; +}; + +/** + * struct ipa_gsb_mux_hdr - ipa gsb mux header, + * @iface_hdl: interface handle + * @qmap_id: qmap id + * @pkt_size: packet size + */ +struct ipa_gsb_mux_hdr { + u8 iface_hdl; + u8 qmap_id; + u16 pkt_size; +}; + +/** + * struct ipa_gsb_iface_info - GSB interface information + * @netdev_name: network interface name + * @device_ethaddr: network interface ethernet address + * @priv: client's private data. to be used in client's callbacks + * @tx_dp_notify: client callback for handling IPA ODU_PROD callback + * @send_dl_skb: client callback for sending skb in downlink direction + * @iface_stats: statistics, how many packets were transmitted + * using the SW bridge. + * @partial_hdr_hdl: handle for partial header + * @wakeup_request: client callback to wakeup + * @is_conencted: is interface connected ? + * @is_resumed: is interface resumed ? + * @iface_hdl: interface handle + */ +struct ipa_gsb_iface_info { + char netdev_name[IPA_RESOURCE_NAME_MAX]; + u8 device_ethaddr[ETH_ALEN]; + void *priv; + ipa_notify_cb tx_dp_notify; + int (*send_dl_skb)(void *priv, struct sk_buff *skb); + struct stats iface_stats; + uint32_t partial_hdr_hdl[IPA_IP_MAX]; + void (*wakeup_request)(void *cl_priv); + bool is_connected; + bool is_resumed; + u8 iface_hdl; +}; + +/** + * struct ipa_gsb_context - GSB driver context information + * @logbuf: buffer of ipc logging + * @logbuf_low: buffer of ipc logging (low priority) + * @lock: global mutex lock for global variables + * @prod_hdl: handle for prod pipe + * @cons_hdl: handle for cons pipe + * @ipa_sys_desc_size: sys pipe desc size + * @num_iface: number of interface + * @iface_hdl: interface handles + * @num_connected_iface: number of connected interface + * @num_resumed_iface: number of resumed interface + * @iface: interface information + * @iface_lock: interface mutex lock for control path + * @iface_spinlock: interface spinlock for data path + * @pm_hdl: IPA PM handle + */ +struct ipa_gsb_context { + void *logbuf; + void *logbuf_low; + struct mutex lock; + u32 prod_hdl; + u32 cons_hdl; + u32 ipa_sys_desc_size; + int num_iface; + bool iface_hdl[MAX_SUPPORTED_IFACE]; + int num_connected_iface; + int num_resumed_iface; + struct ipa_gsb_iface_info *iface[MAX_SUPPORTED_IFACE]; + struct mutex iface_lock[MAX_SUPPORTED_IFACE]; + spinlock_t iface_spinlock[MAX_SUPPORTED_IFACE]; + u32 pm_hdl; + atomic_t disconnect_in_progress; +}; + +static struct ipa_gsb_context *ipa_gsb_ctx; + +#ifdef CONFIG_DEBUG_FS +static ssize_t ipa_gsb_debugfs_stats(struct file *file, + char __user *ubuf, + size_t count, + loff_t *ppos) +{ + int i, nbytes = 0; + struct ipa_gsb_iface_info *iface = NULL; + struct stats iface_stats; + + for (i = 0; i < MAX_SUPPORTED_IFACE; i++) { + iface = ipa_gsb_ctx->iface[i]; + if (iface != NULL) { + iface_stats = iface->iface_stats; + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_GSB_MAX_MSG_LEN - nbytes, + "netdev: %s\n", + iface->netdev_name); + + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_GSB_MAX_MSG_LEN - nbytes, + "UL packets: %lld\n", + iface_stats.num_ul_packets); + + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_GSB_MAX_MSG_LEN - nbytes, + "DL packets: %lld\n", + iface_stats.num_dl_packets); + + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_GSB_MAX_MSG_LEN - nbytes, + "packets with insufficient headroom: %lld\n", + iface_stats.num_insufficient_headroom_packets); + } + } + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static const struct file_operations ipa_gsb_stats_ops = { + .read = ipa_gsb_debugfs_stats, +}; + +static void ipa_gsb_debugfs_init(void) +{ + const mode_t read_only_mode = 00444; + + dent = debugfs_create_dir("ipa_gsb", NULL); + if (IS_ERR(dent)) { + IPA_GSB_ERR("fail to create folder ipa_gsb\n"); + return; + } + + dfile_stats = + debugfs_create_file("stats", read_only_mode, dent, + NULL, &ipa_gsb_stats_ops); + if (!dfile_stats || IS_ERR(dfile_stats)) { + IPA_GSB_ERR("fail to create file stats\n"); + goto fail; + } + + return; + +fail: + debugfs_remove_recursive(dent); +} + +static void ipa_gsb_debugfs_destroy(void) +{ + debugfs_remove_recursive(dent); +} +#else +static void ipa_gsb_debugfs_init(void) +{ +} + +static void ipa_gsb_debugfs_destroy(void) +{ +} +#endif + +static int ipa_gsb_driver_init(struct odu_bridge_params *params) +{ + int i; + + if (!ipa_is_ready()) { + IPA_GSB_ERR("IPA is not ready\n"); + return -EFAULT; + } + + ipa_gsb_ctx = kzalloc(sizeof(*ipa_gsb_ctx), + GFP_KERNEL); + + if (!ipa_gsb_ctx) + return -ENOMEM; + + mutex_init(&ipa_gsb_ctx->lock); + for (i = 0; i < MAX_SUPPORTED_IFACE; i++) { + mutex_init(&ipa_gsb_ctx->iface_lock[i]); + spin_lock_init(&ipa_gsb_ctx->iface_spinlock[i]); + } + ipa_gsb_debugfs_init(); + + return 0; +} + +static int ipa_gsb_commit_partial_hdr(struct ipa_gsb_iface_info *iface_info) +{ + int i; + struct ipa_ioc_add_hdr *hdr; + + if (!iface_info) { + IPA_GSB_ERR("invalid input\n"); + return -EINVAL; + } + + hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + + 2 * sizeof(struct ipa_hdr_add), GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + hdr->commit = 1; + hdr->num_hdrs = 2; + + snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name), + "%s_ipv4", iface_info->netdev_name); + snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name), + "%s_ipv6", iface_info->netdev_name); + /* + * partial header: + * [hdl][QMAP ID][pkt size][Dummy Header][ETH header] + */ + for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) { + /* + * Optimization: add dummy header to reserve space + * for rndis header, so we can do the skb_clone + * instead of deep copy. + */ + hdr->hdr[i].hdr_len = ETH_HLEN + + sizeof(struct ipa_gsb_mux_hdr) + + IPA_GSB_SKB_DUMMY_HEADER; + hdr->hdr[i].type = IPA_HDR_L2_ETHERNET_II; + hdr->hdr[i].is_partial = 1; + hdr->hdr[i].is_eth2_ofst_valid = 1; + hdr->hdr[i].eth2_ofst = sizeof(struct ipa_gsb_mux_hdr) + + IPA_GSB_SKB_DUMMY_HEADER; + /* populate iface handle */ + hdr->hdr[i].hdr[0] = iface_info->iface_hdl; + /* populate src ETH address */ + memcpy(&hdr->hdr[i].hdr[10 + IPA_GSB_SKB_DUMMY_HEADER], + iface_info->device_ethaddr, 6); + /* populate Ethertype */ + if (i == IPA_IP_v4) + *(u16 *)(hdr->hdr[i].hdr + 16 + + IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IP); + else + *(u16 *)(hdr->hdr[i].hdr + 16 + + IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IPV6); + } + + if (ipa_add_hdr(hdr)) { + IPA_GSB_ERR("fail to add partial headers\n"); + kfree(hdr); + return -EFAULT; + } + + for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) + iface_info->partial_hdr_hdl[i] = + hdr->hdr[i].hdr_hdl; + + IPA_GSB_DBG("added partial hdr hdl for ipv4: %d\n", + iface_info->partial_hdr_hdl[IPA_IP_v4]); + IPA_GSB_DBG("added partial hdr hdl for ipv6: %d\n", + iface_info->partial_hdr_hdl[IPA_IP_v6]); + + kfree(hdr); + return 0; +} + +static void ipa_gsb_delete_partial_hdr(struct ipa_gsb_iface_info *iface_info) +{ + struct ipa_ioc_del_hdr *del_hdr; + + del_hdr = kzalloc(sizeof(struct ipa_ioc_del_hdr) + + 2 * sizeof(struct ipa_hdr_del), GFP_KERNEL); + if (!del_hdr) + return; + + del_hdr->commit = 1; + del_hdr->num_hdls = 2; + del_hdr->hdl[IPA_IP_v4].hdl = iface_info->partial_hdr_hdl[IPA_IP_v4]; + del_hdr->hdl[IPA_IP_v6].hdl = iface_info->partial_hdr_hdl[IPA_IP_v6]; + + if (ipa_del_hdr(del_hdr) != 0) + IPA_GSB_ERR("failed to delete partial hdr\n"); + + IPA_GSB_DBG("deleted partial hdr hdl for ipv4: %d\n", + iface_info->partial_hdr_hdl[IPA_IP_v4]); + IPA_GSB_DBG("deleted partial hdr hdl for ipv6: %d\n", + iface_info->partial_hdr_hdl[IPA_IP_v6]); + + kfree(del_hdr); +} + +static int ipa_gsb_reg_intf_props(struct ipa_gsb_iface_info *iface_info) +{ + struct ipa_tx_intf tx; + struct ipa_rx_intf rx; + struct ipa_ioc_tx_intf_prop tx_prop[2]; + struct ipa_ioc_rx_intf_prop rx_prop[2]; + + /* populate tx prop */ + tx.num_props = 2; + tx.prop = tx_prop; + + memset(tx_prop, 0, sizeof(tx_prop)); + tx_prop[0].ip = IPA_IP_v4; + tx_prop[0].dst_pipe = IPA_CLIENT_ODU_EMB_CONS; + tx_prop[0].hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + snprintf(tx_prop[0].hdr_name, sizeof(tx_prop[0].hdr_name), + "%s_ipv4", iface_info->netdev_name); + + tx_prop[1].ip = IPA_IP_v6; + tx_prop[1].dst_pipe = IPA_CLIENT_ODU_EMB_CONS; + tx_prop[1].hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + snprintf(tx_prop[1].hdr_name, sizeof(tx_prop[1].hdr_name), + "%s_ipv6", iface_info->netdev_name); + + /* populate rx prop */ + rx.num_props = 2; + rx.prop = rx_prop; + + memset(rx_prop, 0, sizeof(rx_prop)); + rx_prop[0].ip = IPA_IP_v4; + rx_prop[0].src_pipe = IPA_CLIENT_ODU_PROD; + rx_prop[0].hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_prop[0].attrib.meta_data = iface_info->iface_hdl; + rx_prop[0].attrib.meta_data_mask = 0xFF; + + rx_prop[1].ip = IPA_IP_v6; + rx_prop[1].src_pipe = IPA_CLIENT_ODU_PROD; + rx_prop[1].hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_prop[1].attrib.meta_data = iface_info->iface_hdl; + rx_prop[1].attrib.meta_data_mask = 0xFF; + + if (ipa_register_intf(iface_info->netdev_name, &tx, &rx)) { + IPA_GSB_ERR("fail to add interface prop\n"); + return -EFAULT; + } + + return 0; +} + +static void ipa_gsb_dereg_intf_props(struct ipa_gsb_iface_info *iface_info) +{ + if (ipa_deregister_intf(iface_info->netdev_name) != 0) + IPA_GSB_ERR("fail to dereg intf props\n"); + + IPA_GSB_DBG("deregistered iface props for %s\n", + iface_info->netdev_name); +} + +static void ipa_gsb_pm_cb(void *user_data, enum ipa_pm_cb_event event) +{ + int i; + + if (event != IPA_PM_REQUEST_WAKEUP) { + IPA_GSB_ERR("Unexpected event %d\n", event); + WARN_ON(1); + return; + } + + IPA_GSB_DBG_LOW("wake up clients\n"); + for (i = 0; i < MAX_SUPPORTED_IFACE; i++) + if (ipa_gsb_ctx->iface[i] != NULL) + ipa_gsb_ctx->iface[i]->wakeup_request( + ipa_gsb_ctx->iface[i]->priv); +} + +static int ipa_gsb_register_pm(void) +{ + struct ipa_pm_register_params reg_params; + int ret; + + memset(®_params, 0, sizeof(reg_params)); + reg_params.name = "ipa_gsb"; + reg_params.callback = ipa_gsb_pm_cb; + reg_params.user_data = NULL; + reg_params.group = IPA_PM_GROUP_DEFAULT; + + ret = ipa_pm_register(®_params, + &ipa_gsb_ctx->pm_hdl); + if (ret) { + IPA_GSB_ERR("fail to register with PM %d\n", ret); + goto fail_pm_reg; + } + IPA_GSB_DBG("ipa pm hdl: %d\n", ipa_gsb_ctx->pm_hdl); + + ret = ipa_pm_associate_ipa_cons_to_client(ipa_gsb_ctx->pm_hdl, + IPA_CLIENT_ODU_EMB_CONS); + if (ret) { + IPA_GSB_ERR("fail to associate cons with PM %d\n", ret); + goto fail_pm_cons; + } + + return 0; + +fail_pm_cons: + ipa_pm_deregister(ipa_gsb_ctx->pm_hdl); + ipa_gsb_ctx->pm_hdl = ~0; +fail_pm_reg: + return ret; +} + +int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl) +{ + int i, ret; + struct ipa_gsb_iface_info *new_intf; + + if (!params || !params->wakeup_request || !hdl || + !params->info.netdev_name || !params->info.tx_dp_notify || + !params->info.send_dl_skb) { + IPA_GSB_ERR("Invalid parameters\n"); + return -EINVAL; + } + + IPA_GSB_DBG("netdev_name: %s\n", params->info.netdev_name); + + if (ipa_gsb_ctx == NULL) { + ret = ipa_gsb_driver_init(¶ms->info); + if (ret) { + IPA_GSB_ERR("fail to init ipa gsb driver\n"); + return -EFAULT; + } + ipa_gsb_ctx->ipa_sys_desc_size = + params->info.ipa_desc_size; + IPA_GSB_DBG("desc size: %d\n", ipa_gsb_ctx->ipa_sys_desc_size); + } + + mutex_lock(&ipa_gsb_ctx->lock); + + if (params->info.ipa_desc_size != ipa_gsb_ctx->ipa_sys_desc_size) { + IPA_GSB_ERR("unmatch: orig desc size %d, new desc size %d\n", + ipa_gsb_ctx->ipa_sys_desc_size, + params->info.ipa_desc_size); + mutex_unlock(&ipa_gsb_ctx->lock); + return -EFAULT; + } + + for (i = 0; i < MAX_SUPPORTED_IFACE; i++) + if (ipa_gsb_ctx->iface[i] != NULL && + strnlen(ipa_gsb_ctx->iface[i]->netdev_name, + IPA_RESOURCE_NAME_MAX) == + strnlen(params->info.netdev_name, + IPA_RESOURCE_NAME_MAX) && + strcmp(ipa_gsb_ctx->iface[i]->netdev_name, + params->info.netdev_name) == 0) { + IPA_GSB_ERR("intf was added before.\n"); + mutex_unlock(&ipa_gsb_ctx->lock); + return -EFAULT; + } + + if (ipa_gsb_ctx->num_iface == MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("reached maximum supported interfaces"); + mutex_unlock(&ipa_gsb_ctx->lock); + return -EFAULT; + } + + for (i = 0; i < MAX_SUPPORTED_IFACE; i++) + if (!ipa_gsb_ctx->iface_hdl[i]) { + ipa_gsb_ctx->iface_hdl[i] = true; + *hdl = i; + IPA_GSB_DBG("iface hdl: %d\n", *hdl); + break; + } + + IPA_GSB_DBG("intf was not added before, proceed.\n"); + new_intf = kzalloc(sizeof(*new_intf), GFP_KERNEL); + if (new_intf == NULL) { + ret = -ENOMEM; + goto fail_alloc_mem; + } + + strlcpy(new_intf->netdev_name, params->info.netdev_name, + sizeof(new_intf->netdev_name)); + new_intf->wakeup_request = params->wakeup_request; + new_intf->priv = params->info.priv; + new_intf->tx_dp_notify = params->info.tx_dp_notify; + new_intf->send_dl_skb = params->info.send_dl_skb; + new_intf->iface_hdl = *hdl; + memcpy(new_intf->device_ethaddr, params->info.device_ethaddr, + sizeof(new_intf->device_ethaddr)); + + if (ipa_gsb_commit_partial_hdr(new_intf) != 0) { + IPA_GSB_ERR("fail to commit partial hdrs\n"); + ret = -EFAULT; + goto fail_partial_hdr; + } + + if (ipa_gsb_reg_intf_props(new_intf) != 0) { + IPA_GSB_ERR("fail to register interface props\n"); + ret = -EFAULT; + goto fail_reg_intf_props; + } + + if (ipa_gsb_ctx->num_iface == 0) { + ret = ipa_gsb_register_pm(); + if (ret) { + IPA_GSB_ERR("fail to register with IPA PM %d\n", ret); + ret = -EFAULT; + goto fail_register_pm; + } + } + + ipa_gsb_ctx->iface[*hdl] = new_intf; + ipa_gsb_ctx->num_iface++; + IPA_GSB_DBG("num_iface %d\n", ipa_gsb_ctx->num_iface); + mutex_unlock(&ipa_gsb_ctx->lock); + return 0; + +fail_register_pm: + ipa_gsb_dereg_intf_props(new_intf); +fail_reg_intf_props: + ipa_gsb_delete_partial_hdr(new_intf); +fail_partial_hdr: + kfree(new_intf); +fail_alloc_mem: + ipa_gsb_ctx->iface_hdl[*hdl] = false; + mutex_unlock(&ipa_gsb_ctx->lock); + return ret; +} +EXPORT_SYMBOL(ipa_bridge_init); + +static void ipa_gsb_deregister_pm(void) +{ + IPA_GSB_DBG("deregister ipa pm hdl: %d\n", ipa_gsb_ctx->pm_hdl); + ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl); + ipa_pm_deregister(ipa_gsb_ctx->pm_hdl); + ipa_gsb_ctx->pm_hdl = ~0; +} + +int ipa_bridge_cleanup(u32 hdl) +{ + int i; + + if (!ipa_gsb_ctx) { + IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n"); + return -EFAULT; + } + + if (hdl >= MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("invalid hdl: %d\n", hdl); + return -EINVAL; + } + + mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]); + if (!ipa_gsb_ctx->iface[hdl]) { + IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + + IPA_GSB_DBG("client hdl: %d\n", hdl); + + if (ipa_gsb_ctx->iface[hdl]->is_connected) { + IPA_GSB_ERR("cannot cleanup when iface is connected\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + ipa_gsb_dereg_intf_props(ipa_gsb_ctx->iface[hdl]); + ipa_gsb_delete_partial_hdr(ipa_gsb_ctx->iface[hdl]); + spin_lock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]); + kfree(ipa_gsb_ctx->iface[hdl]); + ipa_gsb_ctx->iface[hdl] = NULL; + ipa_gsb_ctx->iface_hdl[hdl] = false; + spin_unlock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + mutex_lock(&ipa_gsb_ctx->lock); + ipa_gsb_ctx->num_iface--; + IPA_GSB_DBG("num_iface %d\n", ipa_gsb_ctx->num_iface); + if (ipa_gsb_ctx->num_iface == 0) { + ipa_gsb_deregister_pm(); + ipa_gsb_debugfs_destroy(); + ipc_log_context_destroy(ipa_gsb_ctx->logbuf); + ipc_log_context_destroy(ipa_gsb_ctx->logbuf_low); + mutex_unlock(&ipa_gsb_ctx->lock); + mutex_destroy(&ipa_gsb_ctx->lock); + for (i = 0; i < MAX_SUPPORTED_IFACE; i++) + mutex_destroy(&ipa_gsb_ctx->iface_lock[i]); + kfree(ipa_gsb_ctx); + ipa_gsb_ctx = NULL; + return 0; + } + mutex_unlock(&ipa_gsb_ctx->lock); + return 0; +} +EXPORT_SYMBOL(ipa_bridge_cleanup); + +static void ipa_gsb_cons_cb(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb; + struct sk_buff *skb2; + struct ipa_gsb_mux_hdr *mux_hdr; + u16 pkt_size, pad_byte; + u8 hdl; + + if (evt != IPA_RECEIVE) { + IPA_GSB_ERR("unexpected event\n"); + WARN_ON(1); + return; + } + + skb = (struct sk_buff *)data; + + if (skb == NULL) { + IPA_GSB_ERR("unexpected NULL data\n"); + WARN_ON(1); + return; + } + + while (skb->len) { + mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data; + pkt_size = mux_hdr->pkt_size; + /* 4-byte padding */ + pad_byte = ((pkt_size + sizeof(*mux_hdr) + ETH_HLEN + + 3 + IPA_GSB_SKB_DUMMY_HEADER) & ~3) - + (pkt_size + sizeof(*mux_hdr) + + ETH_HLEN + IPA_GSB_SKB_DUMMY_HEADER); + hdl = mux_hdr->iface_hdl; + if (hdl >= MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("invalid hdl: %d\n", hdl); + break; + } + IPA_GSB_DBG_LOW("pkt_size: %d, pad_byte: %d, hdl: %d\n", + pkt_size, pad_byte, hdl); + + /* remove 4 byte mux header AND dummy header*/ + skb_pull(skb, sizeof(*mux_hdr) + IPA_GSB_SKB_DUMMY_HEADER); + + skb2 = skb_clone(skb, GFP_KERNEL); + if (!skb2) { + IPA_GSB_ERR("skb_clone failed\n"); + WARN_ON(1); + break; + } + skb_trim(skb2, pkt_size + ETH_HLEN); + spin_lock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]); + if (ipa_gsb_ctx->iface[hdl] != NULL) { + ipa_gsb_ctx->iface[hdl]->send_dl_skb( + ipa_gsb_ctx->iface[hdl]->priv, skb2); + ipa_gsb_ctx->iface[hdl]->iface_stats.num_dl_packets++; + spin_unlock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]); + skb_pull(skb, pkt_size + ETH_HLEN + pad_byte); + } else { + IPA_GSB_ERR("Invalid hdl: %d, drop the skb\n", hdl); + spin_unlock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]); + dev_kfree_skb_any(skb2); + break; + } + } + + if (skb) { + dev_kfree_skb_any(skb); + skb = NULL; + } +} + +static void ipa_gsb_tx_dp_notify(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb; + struct ipa_gsb_mux_hdr *mux_hdr; + u8 hdl; + + skb = (struct sk_buff *)data; + + if (skb == NULL) { + IPA_GSB_ERR("unexpected NULL data\n"); + WARN_ON(1); + return; + } + + if (evt != IPA_WRITE_DONE && evt != IPA_RECEIVE) { + IPA_GSB_ERR("unexpected event: %d\n", evt); + dev_kfree_skb_any(skb); + return; + } + + /* fetch iface handle from header */ + mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data; + /* change to host order */ + *(u32 *)mux_hdr = ntohl(*(u32 *)mux_hdr); + hdl = mux_hdr->iface_hdl; + if ((hdl < 0) || (hdl >= MAX_SUPPORTED_IFACE) || + !ipa_gsb_ctx->iface[hdl]) { + IPA_GSB_ERR("invalid hdl: %d and cb, drop the skb\n", hdl); + dev_kfree_skb_any(skb); + return; + } + IPA_GSB_DBG_LOW("evt: %d, hdl in tx_dp_notify: %d\n", evt, hdl); + + /* remove 4 byte mux header */ + skb_pull(skb, sizeof(struct ipa_gsb_mux_hdr)); + ipa_gsb_ctx->iface[hdl]->tx_dp_notify( + ipa_gsb_ctx->iface[hdl]->priv, evt, + (unsigned long)skb); +} + +static int ipa_gsb_connect_sys_pipe(void) +{ + struct ipa_sys_connect_params prod_params; + struct ipa_sys_connect_params cons_params; + int res; + + memset(&prod_params, 0, sizeof(prod_params)); + memset(&cons_params, 0, sizeof(cons_params)); + + /* configure RX EP */ + prod_params.client = IPA_CLIENT_ODU_PROD; + prod_params.ipa_ep_cfg.hdr.hdr_len = + ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr); + prod_params.ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT; + prod_params.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1; + prod_params.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0; + prod_params.desc_fifo_sz = ipa_gsb_ctx->ipa_sys_desc_size; + prod_params.priv = NULL; + prod_params.notify = ipa_gsb_tx_dp_notify; + res = ipa_setup_sys_pipe(&prod_params, + &ipa_gsb_ctx->prod_hdl); + if (res) { + IPA_GSB_ERR("fail to setup prod sys pipe %d\n", res); + goto fail_prod; + } + + /* configure TX EP */ + cons_params.client = IPA_CLIENT_ODU_EMB_CONS; + cons_params.ipa_ep_cfg.hdr.hdr_len = + ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr) + + IPA_GSB_SKB_DUMMY_HEADER; + cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1; + cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2; + cons_params.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2; + cons_params.ipa_ep_cfg.hdr_ext.hdr_little_endian = true; + cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT; + /* setup aggregation */ + cons_params.ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR; + cons_params.ipa_ep_cfg.aggr.aggr = IPA_GENERIC; + cons_params.ipa_ep_cfg.aggr.aggr_time_limit = + IPA_GSB_AGGR_TIME_LIMIT; + cons_params.ipa_ep_cfg.aggr.aggr_byte_limit = + IPA_GSB_AGGR_BYTE_LIMIT; + cons_params.desc_fifo_sz = ipa_gsb_ctx->ipa_sys_desc_size; + cons_params.priv = NULL; + cons_params.notify = ipa_gsb_cons_cb; + res = ipa_setup_sys_pipe(&cons_params, + &ipa_gsb_ctx->cons_hdl); + if (res) { + IPA_GSB_ERR("fail to setup cons sys pipe %d\n", res); + goto fail_cons; + } + + IPA_GSB_DBG("prod_hdl = %d, cons_hdl = %d\n", + ipa_gsb_ctx->prod_hdl, ipa_gsb_ctx->cons_hdl); + + return 0; + +fail_cons: + ipa_teardown_sys_pipe(ipa_gsb_ctx->prod_hdl); + ipa_gsb_ctx->prod_hdl = 0; +fail_prod: + return res; +} + +int ipa_bridge_connect(u32 hdl) +{ + int ret; + + if (!ipa_gsb_ctx) { + IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n"); + return -EFAULT; + } + + if (hdl >= MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("invalid hdl: %d\n", hdl); + return -EINVAL; + } + + IPA_GSB_DBG("client hdl: %d\n", hdl); + + mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]); + if (!ipa_gsb_ctx->iface[hdl]) { + IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + + if (ipa_gsb_ctx->iface[hdl]->is_connected) { + IPA_GSB_DBG("iface was already connected\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return 0; + } + + mutex_lock(&ipa_gsb_ctx->lock); + if (ipa_gsb_ctx->num_connected_iface == 0) { + ret = ipa_pm_activate_sync(ipa_gsb_ctx->pm_hdl); + if (ret) { + IPA_GSB_ERR("failed to activate ipa pm\n"); + mutex_unlock(&ipa_gsb_ctx->lock); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; + } + ret = ipa_gsb_connect_sys_pipe(); + if (ret) { + IPA_GSB_ERR("fail to connect pipe\n"); + mutex_unlock(&ipa_gsb_ctx->lock); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; + } + } + + /* connect = connect + resume */ + ipa_gsb_ctx->iface[hdl]->is_connected = true; + ipa_gsb_ctx->iface[hdl]->is_resumed = true; + + ipa_gsb_ctx->num_connected_iface++; + IPA_GSB_DBG("connected iface: %d\n", + ipa_gsb_ctx->num_connected_iface); + ipa_gsb_ctx->num_resumed_iface++; + IPA_GSB_DBG("num resumed iface: %d\n", + ipa_gsb_ctx->num_resumed_iface); + mutex_unlock(&ipa_gsb_ctx->lock); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return 0; +} +EXPORT_SYMBOL(ipa_bridge_connect); + +static int ipa_gsb_disconnect_sys_pipe(void) +{ + int ret; + + IPA_GSB_DBG("prod_hdl = %d, cons_hdl = %d\n", + ipa_gsb_ctx->prod_hdl, ipa_gsb_ctx->cons_hdl); + + ret = ipa_teardown_sys_pipe(ipa_gsb_ctx->prod_hdl); + if (ret) { + IPA_GSB_ERR("failed to tear down prod pipe\n"); + return -EFAULT; + } + ipa_gsb_ctx->prod_hdl = 0; + + ret = ipa_teardown_sys_pipe(ipa_gsb_ctx->cons_hdl); + if (ret) { + IPA_GSB_ERR("failed to tear down cons pipe\n"); + return -EFAULT; + } + ipa_gsb_ctx->cons_hdl = 0; + + return 0; +} + +int ipa_bridge_disconnect(u32 hdl) +{ + int ret = 0; + + if (!ipa_gsb_ctx) { + IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n"); + return -EFAULT; + } + + if (hdl >= MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("invalid hdl: %d\n", hdl); + return -EINVAL; + } + + IPA_GSB_DBG("client hdl: %d\n", hdl); + + mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]); + atomic_set(&ipa_gsb_ctx->disconnect_in_progress, 1); + + if (!ipa_gsb_ctx->iface[hdl]) { + IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl); + ret = -EFAULT; + goto fail; + } + + if (!ipa_gsb_ctx->iface[hdl]->is_connected) { + IPA_GSB_DBG("iface was not connected\n"); + ret = 0; + goto fail; + } + + mutex_lock(&ipa_gsb_ctx->lock); + if (ipa_gsb_ctx->num_connected_iface == 1) { + ret = ipa_gsb_disconnect_sys_pipe(); + if (ret) { + IPA_GSB_ERR("fail to discon pipes\n"); + ret = -EFAULT; + goto fail; + } + + ret = ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl); + if (ret) { + IPA_GSB_ERR("failed to deactivate ipa pm\n"); + ret = -EFAULT; + goto fail; + } + } + + /* disconnect = suspend + disconnect */ + ipa_gsb_ctx->iface[hdl]->is_connected = false; + ipa_gsb_ctx->num_connected_iface--; + IPA_GSB_DBG("connected iface: %d\n", + ipa_gsb_ctx->num_connected_iface); + + if (ipa_gsb_ctx->iface[hdl]->is_resumed) { + ipa_gsb_ctx->iface[hdl]->is_resumed = false; + ipa_gsb_ctx->num_resumed_iface--; + IPA_GSB_DBG("num resumed iface: %d\n", + ipa_gsb_ctx->num_resumed_iface); + } + +fail: + mutex_unlock(&ipa_gsb_ctx->lock); + atomic_set(&ipa_gsb_ctx->disconnect_in_progress, 0); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; +} +EXPORT_SYMBOL(ipa_bridge_disconnect); + +int ipa_bridge_resume(u32 hdl) +{ + int ret; + + if (!ipa_gsb_ctx) { + IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n"); + return -EFAULT; + } + + if (hdl >= MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("invalid hdl: %d\n", hdl); + return -EINVAL; + } + + IPA_GSB_DBG_LOW("client hdl: %d\n", hdl); + + mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]); + if (!ipa_gsb_ctx->iface[hdl]) { + IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + + if (!ipa_gsb_ctx->iface[hdl]->is_connected) { + IPA_GSB_ERR("iface is not connected\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + + if (ipa_gsb_ctx->iface[hdl]->is_resumed) { + IPA_GSB_DBG_LOW("iface was already resumed\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return 0; + } + + mutex_lock(&ipa_gsb_ctx->lock); + if (ipa_gsb_ctx->num_resumed_iface == 0) { + ret = ipa_pm_activate_sync(ipa_gsb_ctx->pm_hdl); + if (ret) { + IPA_GSB_ERR("fail to activate ipa pm\n"); + mutex_unlock(&ipa_gsb_ctx->lock); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; + } + + ret = ipa_start_gsi_channel( + ipa_gsb_ctx->cons_hdl); + if (ret) { + IPA_GSB_ERR( + "fail to start con ep %d\n", + ret); + mutex_unlock(&ipa_gsb_ctx->lock); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; + } + } + + ipa_gsb_ctx->iface[hdl]->is_resumed = true; + ipa_gsb_ctx->num_resumed_iface++; + IPA_GSB_DBG_LOW("num resumed iface: %d\n", + ipa_gsb_ctx->num_resumed_iface); + + mutex_unlock(&ipa_gsb_ctx->lock); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return 0; +} +EXPORT_SYMBOL(ipa_bridge_resume); + +int ipa_bridge_suspend(u32 hdl) +{ + int ret; + + if (!ipa_gsb_ctx) { + IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n"); + return -EFAULT; + } + + if (hdl >= MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("invalid hdl: %d\n", hdl); + return -EINVAL; + } + + IPA_GSB_DBG_LOW("client hdl: %d\n", hdl); + + mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]); + if (!ipa_gsb_ctx->iface[hdl]) { + IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + + if (!ipa_gsb_ctx->iface[hdl]->is_connected) { + IPA_GSB_ERR("iface is not connected\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return -EFAULT; + } + + if (!ipa_gsb_ctx->iface[hdl]->is_resumed) { + IPA_GSB_DBG_LOW("iface was already suspended\n"); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return 0; + } + + mutex_lock(&ipa_gsb_ctx->lock); + if (ipa_gsb_ctx->num_resumed_iface == 1) { + ret = ipa_stop_gsi_channel( + ipa_gsb_ctx->cons_hdl); + if (ret) { + IPA_GSB_ERR( + "fail to stop cons ep %d\n", + ret); + mutex_unlock(&ipa_gsb_ctx->lock); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; + } + + ret = ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl); + if (ret) { + IPA_GSB_ERR("fail to deactivate ipa pm\n"); + ipa_start_gsi_channel(ipa_gsb_ctx->cons_hdl); + mutex_unlock(&ipa_gsb_ctx->lock); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; + } + } + + ipa_gsb_ctx->iface[hdl]->is_resumed = false; + ipa_gsb_ctx->num_resumed_iface--; + IPA_GSB_DBG_LOW("num resumed iface: %d\n", + ipa_gsb_ctx->num_resumed_iface); + + mutex_unlock(&ipa_gsb_ctx->lock); + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return 0; +} +EXPORT_SYMBOL(ipa_bridge_suspend); + +int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth) +{ + int ret; + + if (!ipa_gsb_ctx) { + IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n"); + return -EFAULT; + } + + if (hdl >= MAX_SUPPORTED_IFACE) { + IPA_GSB_ERR("invalid hdl: %d\n", hdl); + return -EINVAL; + } + + IPA_GSB_DBG("client hdl: %d, BW: %d\n", hdl, bandwidth); + + mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]); + + ret = ipa_pm_set_throughput(ipa_gsb_ctx->pm_hdl, + bandwidth); + if (ret) + IPA_GSB_ERR("fail to set perf profile\n"); + + mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); + return ret; +} +EXPORT_SYMBOL(ipa_bridge_set_perf_profile); + +int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb, + struct ipa_tx_meta *metadata) +{ + struct ipa_gsb_mux_hdr *mux_hdr; + struct sk_buff *skb2; + struct stats iface_stats; + int ret; + + IPA_GSB_DBG_LOW("client hdl: %d\n", hdl); + + iface_stats = ipa_gsb_ctx->iface[hdl]->iface_stats; + if (!ipa_gsb_ctx->iface[hdl]) { + IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl); + return -EFAULT; + } + + if (unlikely(atomic_read(&ipa_gsb_ctx->disconnect_in_progress))) { + IPA_GSB_ERR("ipa bridge disconnect_in_progress\n"); + return -EFAULT; + } + + /* make sure skb has enough headroom */ + if (unlikely(skb_headroom(skb) < sizeof(struct ipa_gsb_mux_hdr))) { + IPA_GSB_DBG_LOW("skb doesn't have enough headroom\n"); + skb2 = skb_copy_expand(skb, sizeof(struct ipa_gsb_mux_hdr), + 0, GFP_ATOMIC); + if (!skb2) { + dev_kfree_skb_any(skb); + return -ENOMEM; + } + dev_kfree_skb_any(skb); + skb = skb2; + iface_stats.num_insufficient_headroom_packets++; + } + + /* add 4 byte header for mux */ + mux_hdr = (struct ipa_gsb_mux_hdr *)skb_push(skb, + sizeof(struct ipa_gsb_mux_hdr)); + mux_hdr->iface_hdl = (u8)hdl; + /* change to network order */ + *(u32 *)mux_hdr = htonl(*(u32 *)mux_hdr); + + ret = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata); + if (ret) { + IPA_GSB_ERR("tx dp failed %d\n", ret); + return -EFAULT; + } + ipa_gsb_ctx->iface[hdl]->iface_stats.num_ul_packets++; + + return 0; +} +EXPORT_SYMBOL(ipa_bridge_tx_dp); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ipa gsb driver"); diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c new file mode 100644 index 000000000000..c8bb33064570 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c @@ -0,0 +1,2852 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ipa_common_i.h" +#include "../ipa_v3/ipa_pm.h" + +#define IPA_MHI_DRV_NAME "ipa_mhi_client" + +#define IPA_MHI_DBG(fmt, args...) \ + do { \ + pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_MHI_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + + +#define IPA_MHI_ERR(fmt, args...) \ + do { \ + pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_MHI_FUNC_ENTRY() \ + IPA_MHI_DBG("ENTRY\n") +#define IPA_MHI_FUNC_EXIT() \ + IPA_MHI_DBG("EXIT\n") + +#define IPA_MHI_RM_TIMEOUT_MSEC 10000 +#define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 10 + +#define IPA_MHI_SUSPEND_SLEEP_MIN 900 +#define IPA_MHI_SUSPEND_SLEEP_MAX 1100 + +#define IPA_MHI_MAX_UL_CHANNELS 1 +#define IPA_MHI_MAX_DL_CHANNELS 1 + +/* bit #40 in address should be asserted for MHI transfers over pcie */ +#define IPA_MHI_CLIENT_HOST_ADDR_COND(addr) \ + ((ipa_mhi_client_ctx->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr)) + +enum ipa_mhi_rm_state { + IPA_MHI_RM_STATE_RELEASED, + IPA_MHI_RM_STATE_REQUESTED, + IPA_MHI_RM_STATE_GRANTED, + IPA_MHI_RM_STATE_MAX +}; + +enum ipa_mhi_state { + IPA_MHI_STATE_INITIALIZED, + IPA_MHI_STATE_READY, + IPA_MHI_STATE_STARTED, + IPA_MHI_STATE_SUSPEND_IN_PROGRESS, + IPA_MHI_STATE_SUSPENDED, + IPA_MHI_STATE_RESUME_IN_PROGRESS, + IPA_MHI_STATE_MAX +}; + +static char *ipa_mhi_state_str[] = { + __stringify(IPA_MHI_STATE_INITIALIZED), + __stringify(IPA_MHI_STATE_READY), + __stringify(IPA_MHI_STATE_STARTED), + __stringify(IPA_MHI_STATE_SUSPEND_IN_PROGRESS), + __stringify(IPA_MHI_STATE_SUSPENDED), + __stringify(IPA_MHI_STATE_RESUME_IN_PROGRESS), +}; + +#define MHI_STATE_STR(state) \ + (((state) >= 0 && (state) < IPA_MHI_STATE_MAX) ? \ + ipa_mhi_state_str[(state)] : \ + "INVALID") + +enum ipa_mhi_dma_dir { + IPA_MHI_DMA_TO_HOST, + IPA_MHI_DMA_FROM_HOST, +}; + +/** + * struct ipa_mhi_channel_ctx - MHI Channel context + * @valid: entry is valid + * @id: MHI channel ID + * @hdl: channel handle for uC + * @client: IPA Client + * @state: Channel state + */ +struct ipa_mhi_channel_ctx { + bool valid; + u8 id; + u8 index; + enum ipa_client_type client; + enum ipa_hw_mhi_channel_states state; + bool stop_in_proc; + struct gsi_chan_info ch_info; + u64 channel_context_addr; + struct ipa_mhi_ch_ctx ch_ctx_host; + u64 event_context_addr; + struct ipa_mhi_ev_ctx ev_ctx_host; + bool brstmode_enabled; + union __packed gsi_channel_scratch ch_scratch; + unsigned long cached_gsi_evt_ring_hdl; +}; + +struct ipa_mhi_client_ctx { + enum ipa_mhi_state state; + spinlock_t state_lock; + mhi_client_cb cb_notify; + void *cb_priv; + struct completion rm_prod_granted_comp; + enum ipa_mhi_rm_state rm_cons_state; + struct completion rm_cons_comp; + bool trigger_wakeup; + bool wakeup_notified; + struct workqueue_struct *wq; + struct ipa_mhi_channel_ctx ul_channels[IPA_MHI_MAX_UL_CHANNELS]; + struct ipa_mhi_channel_ctx dl_channels[IPA_MHI_MAX_DL_CHANNELS]; + u32 total_channels; + struct ipa_mhi_msi_info msi; + u32 mmio_addr; + u32 first_ch_idx; + u32 first_er_idx; + u32 host_ctrl_addr; + u32 host_data_addr; + u64 channel_context_array_addr; + u64 event_context_array_addr; + u32 qmi_req_id; + u32 use_ipadma; + bool assert_bit40; + bool test_mode; + u32 pm_hdl; + u32 modem_pm_hdl; +}; + +static struct ipa_mhi_client_ctx *ipa_mhi_client_ctx; + +#ifdef CONFIG_DEBUG_FS +#define IPA_MHI_MAX_MSG_LEN 512 +static char dbg_buff[IPA_MHI_MAX_MSG_LEN]; +static struct dentry *dent; + +static char *ipa_mhi_channel_state_str[] = { + __stringify(IPA_HW_MHI_CHANNEL_STATE_DISABLE), + __stringify(IPA_HW_MHI_CHANNEL_STATE_ENABLE), + __stringify(IPA_HW_MHI_CHANNEL_STATE_RUN), + __stringify(IPA_HW_MHI_CHANNEL_STATE_SUSPEND), + __stringify(IPA_HW_MHI_CHANNEL_STATE_STOP), + __stringify(IPA_HW_MHI_CHANNEL_STATE_ERROR), +}; + +#define MHI_CH_STATE_STR(state) \ + (((state) >= 0 && (state) <= IPA_HW_MHI_CHANNEL_STATE_ERROR) ? \ + ipa_mhi_channel_state_str[(state)] : \ + "INVALID") + +static int ipa_mhi_read_write_host(enum ipa_mhi_dma_dir dir, void *dev_addr, + u64 host_addr, int size) +{ + struct ipa_mem_buffer mem; + int res; + struct device *pdev; + + IPA_MHI_FUNC_ENTRY(); + + if (ipa_mhi_client_ctx->use_ipadma) { + pdev = ipa_get_dma_dev(); + host_addr = IPA_MHI_CLIENT_HOST_ADDR_COND(host_addr); + + mem.size = size; + mem.base = dma_alloc_coherent(pdev, mem.size, + &mem.phys_base, GFP_KERNEL); + if (!mem.base) { + IPA_MHI_ERR( + "dma_alloc_coherent failed, DMA buff size %d\n" + , mem.size); + return -ENOMEM; + } + + res = ipa_dma_enable(); + if (res) { + IPA_MHI_ERR("failed to enable IPA DMA rc=%d\n", res); + goto fail_dma_enable; + } + + if (dir == IPA_MHI_DMA_FROM_HOST) { + res = ipa_dma_sync_memcpy(mem.phys_base, host_addr, + size); + if (res) { + IPA_MHI_ERR( + "ipa_dma_sync_memcpy from host fail%d\n" + , res); + goto fail_memcopy; + } + memcpy(dev_addr, mem.base, size); + } else { + memcpy(mem.base, dev_addr, size); + res = ipa_dma_sync_memcpy(host_addr, mem.phys_base, + size); + if (res) { + IPA_MHI_ERR( + "ipa_dma_sync_memcpy to host fail %d\n" + , res); + goto fail_memcopy; + } + } + goto dma_succeed; + } else { + void *host_ptr; + + if (!ipa_mhi_client_ctx->test_mode) + host_ptr = ioremap(host_addr, size); + else + host_ptr = phys_to_virt(host_addr); + if (!host_ptr) { + IPA_MHI_ERR("ioremap failed for 0x%llx\n", host_addr); + return -EFAULT; + } + if (dir == IPA_MHI_DMA_FROM_HOST) + memcpy(dev_addr, host_ptr, size); + else + memcpy(host_ptr, dev_addr, size); + if (!ipa_mhi_client_ctx->test_mode) + iounmap(host_ptr); + } + + IPA_MHI_FUNC_EXIT(); + return 0; + +dma_succeed: + IPA_MHI_FUNC_EXIT(); + res = 0; +fail_memcopy: + if (ipa_dma_disable()) + IPA_MHI_ERR("failed to disable IPA DMA\n"); +fail_dma_enable: + dma_free_coherent(pdev, mem.size, mem.base, mem.phys_base); + return res; +} + +static int ipa_mhi_print_channel_info(struct ipa_mhi_channel_ctx *channel, + char *buff, int len) +{ + int nbytes = 0; + + if (channel->valid) { + nbytes += scnprintf(&buff[nbytes], + len - nbytes, + "channel idx=%d ch_id=%d client=%d state=%s\n", + channel->index, channel->id, channel->client, + MHI_CH_STATE_STR(channel->state)); + + nbytes += scnprintf(&buff[nbytes], + len - nbytes, + " ch_ctx=%llx\n", + channel->channel_context_addr); + + nbytes += scnprintf(&buff[nbytes], + len - nbytes, + " gsi_evt_ring_hdl=%ld ev_ctx=%llx\n", + channel->cached_gsi_evt_ring_hdl, + channel->event_context_addr); + } + return nbytes; +} + +static int ipa_mhi_print_host_channel_ctx_info( + struct ipa_mhi_channel_ctx *channel, char *buff, int len) +{ + int res, nbytes = 0; + struct ipa_mhi_ch_ctx ch_ctx_host; + + memset(&ch_ctx_host, 0, sizeof(ch_ctx_host)); + + /* reading ch context from host */ + res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST, + &ch_ctx_host, channel->channel_context_addr, + sizeof(ch_ctx_host)); + if (res) { + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "Failed to read from host %d\n", res); + return nbytes; + } + + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "ch_id: %d\n", channel->id); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "chstate: 0x%x\n", ch_ctx_host.chstate); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "brstmode: 0x%x\n", ch_ctx_host.brstmode); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "chtype: 0x%x\n", ch_ctx_host.chtype); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "erindex: 0x%x\n", ch_ctx_host.erindex); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "rbase: 0x%llx\n", ch_ctx_host.rbase); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "rlen: 0x%llx\n", ch_ctx_host.rlen); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "rp: 0x%llx\n", ch_ctx_host.rp); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "wp: 0x%llx\n", ch_ctx_host.wp); + + return nbytes; +} + +static ssize_t ipa_mhi_debugfs_stats(struct file *file, + char __user *ubuf, + size_t count, + loff_t *ppos) +{ + int nbytes = 0; + int i; + struct ipa_mhi_channel_ctx *channel; + + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + "IPA MHI state: %s\n", + MHI_STATE_STR(ipa_mhi_client_ctx->state)); + + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->ul_channels[i]; + nbytes += ipa_mhi_print_channel_info(channel, + &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes); + } + + for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->dl_channels[i]; + nbytes += ipa_mhi_print_channel_info(channel, + &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes); + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_mhi_debugfs_uc_stats(struct file *file, + char __user *ubuf, + size_t count, + loff_t *ppos) +{ + int nbytes = 0; + + nbytes += ipa_uc_mhi_print_stats(dbg_buff, IPA_MHI_MAX_MSG_LEN); + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_mhi_debugfs_dump_host_ch_ctx_arr(struct file *file, + char __user *ubuf, + size_t count, + loff_t *ppos) +{ + int i, nbytes = 0; + struct ipa_mhi_channel_ctx *channel; + + if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_INITIALIZED || + ipa_mhi_client_ctx->state == IPA_MHI_STATE_READY) { + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + "Cannot dump host channel context "); + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + "before IPA MHI was STARTED\n"); + return simple_read_from_buffer(ubuf, count, ppos, + dbg_buff, nbytes); + } + if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) { + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + "IPA MHI is suspended, cannot dump channel ctx array"); + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + " from host -PCIe can be in D3 state\n"); + return simple_read_from_buffer(ubuf, count, ppos, + dbg_buff, nbytes); + } + + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + "channel contex array - dump from host\n"); + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + "***** UL channels *******\n"); + + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->ul_channels[i]; + if (!channel->valid) + continue; + nbytes += ipa_mhi_print_host_channel_ctx_info(channel, + &dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes); + } + + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + "\n***** DL channels *******\n"); + + for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->dl_channels[i]; + if (!channel->valid) + continue; + nbytes += ipa_mhi_print_host_channel_ctx_info(channel, + &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes); + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +const struct file_operations ipa_mhi_stats_ops = { + .read = ipa_mhi_debugfs_stats, +}; + +const struct file_operations ipa_mhi_uc_stats_ops = { + .read = ipa_mhi_debugfs_uc_stats, +}; + +const struct file_operations ipa_mhi_dump_host_ch_ctx_ops = { + .read = ipa_mhi_debugfs_dump_host_ch_ctx_arr, +}; + + +static void ipa_mhi_debugfs_init(void) +{ + const mode_t read_only_mode = 0444; + const mode_t read_write_mode = 0664; + struct dentry *file; + + IPA_MHI_FUNC_ENTRY(); + + dent = debugfs_create_dir("ipa_mhi", 0); + if (IS_ERR(dent)) { + IPA_MHI_ERR("fail to create folder ipa_mhi\n"); + return; + } + + file = debugfs_create_file("stats", read_only_mode, dent, + 0, &ipa_mhi_stats_ops); + if (!file || IS_ERR(file)) { + IPA_MHI_ERR("fail to create file stats\n"); + goto fail; + } + + file = debugfs_create_file("uc_stats", read_only_mode, dent, + 0, &ipa_mhi_uc_stats_ops); + if (!file || IS_ERR(file)) { + IPA_MHI_ERR("fail to create file uc_stats\n"); + goto fail; + } + + file = debugfs_create_u32("use_ipadma", read_write_mode, dent, + &ipa_mhi_client_ctx->use_ipadma); + if (!file || IS_ERR(file)) { + IPA_MHI_ERR("fail to create file use_ipadma\n"); + goto fail; + } + + file = debugfs_create_file("dump_host_channel_ctx_array", + read_only_mode, dent, 0, &ipa_mhi_dump_host_ch_ctx_ops); + if (!file || IS_ERR(file)) { + IPA_MHI_ERR("fail to create file dump_host_channel_ctx_arr\n"); + goto fail; + } + + IPA_MHI_FUNC_EXIT(); + return; +fail: + debugfs_remove_recursive(dent); +} + +#else +static void ipa_mhi_debugfs_init(void) {} +static void ipa_mhi_debugfs_destroy(void) {} +#endif /* CONFIG_DEBUG_FS */ + +static union IpaHwMhiDlUlSyncCmdData_t ipa_cached_dl_ul_sync_info; + +static void ipa_mhi_wq_notify_wakeup(struct work_struct *work); +static DECLARE_WORK(ipa_mhi_notify_wakeup_work, ipa_mhi_wq_notify_wakeup); + +static void ipa_mhi_wq_notify_ready(struct work_struct *work); +static DECLARE_WORK(ipa_mhi_notify_ready_work, ipa_mhi_wq_notify_ready); + +/** + * ipa_mhi_notify_wakeup() - Schedule work to notify data available + * + * This function will schedule a work to notify data available event. + * In case this function is called more than once, only one notification will + * be sent to MHI client driver. No further notifications will be sent until + * IPA MHI state will become STARTED. + */ +static void ipa_mhi_notify_wakeup(void) +{ + IPA_MHI_FUNC_ENTRY(); + if (ipa_mhi_client_ctx->wakeup_notified) { + IPA_MHI_DBG("wakeup already called\n"); + return; + } + queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_wakeup_work); + ipa_mhi_client_ctx->wakeup_notified = true; + IPA_MHI_FUNC_EXIT(); +} + +/** + * ipa_mhi_rm_cons_request() - callback function for IPA RM request resource + * + * In case IPA MHI is not suspended, MHI CONS will be granted immediately. + * In case IPA MHI is suspended, MHI CONS will be granted after resume. + */ +static int ipa_mhi_rm_cons_request(void) +{ + unsigned long flags; + int res; + + IPA_MHI_FUNC_ENTRY(); + + IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_client_ctx->state)); + spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags); + ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_REQUESTED; + if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_STARTED) { + ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED; + res = 0; + } else if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) { + ipa_mhi_notify_wakeup(); + res = -EINPROGRESS; + } else if (ipa_mhi_client_ctx->state == + IPA_MHI_STATE_SUSPEND_IN_PROGRESS) { + /* wakeup event will be trigger after suspend finishes */ + ipa_mhi_client_ctx->trigger_wakeup = true; + res = -EINPROGRESS; + } else { + res = -EINPROGRESS; + } + + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + IPA_MHI_DBG("EXIT with %d\n", res); + return res; +} + +static int ipa_mhi_rm_cons_release(void) +{ + unsigned long flags; + + IPA_MHI_FUNC_ENTRY(); + + spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags); + ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED; + complete_all(&ipa_mhi_client_ctx->rm_cons_comp); + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static void ipa_mhi_rm_prod_notify(void *user_data, enum ipa_rm_event event, + unsigned long data) +{ + IPA_MHI_FUNC_ENTRY(); + + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + IPA_MHI_DBG("IPA_RM_RESOURCE_GRANTED\n"); + complete_all(&ipa_mhi_client_ctx->rm_prod_granted_comp); + break; + + case IPA_RM_RESOURCE_RELEASED: + IPA_MHI_DBG("IPA_RM_RESOURCE_RELEASED\n"); + break; + + default: + IPA_MHI_ERR("unexpected event %d\n", event); + WARN_ON(1); + break; + } + + IPA_MHI_FUNC_EXIT(); +} + +/** + * ipa_mhi_wq_notify_wakeup() - Notify MHI client on data available + * + * This function is called from IPA MHI workqueue to notify + * MHI client driver on data available event. + */ +static void ipa_mhi_wq_notify_wakeup(struct work_struct *work) +{ + IPA_MHI_FUNC_ENTRY(); + ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv, + IPA_MHI_EVENT_DATA_AVAILABLE, 0); + IPA_MHI_FUNC_EXIT(); +} + +/** + * ipa_mhi_wq_notify_ready() - Notify MHI client on ready + * + * This function is called from IPA MHI workqueue to notify + * MHI client driver on ready event when IPA uC is loaded + */ +static void ipa_mhi_wq_notify_ready(struct work_struct *work) +{ + IPA_MHI_FUNC_ENTRY(); + ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv, + IPA_MHI_EVENT_READY, 0); + IPA_MHI_FUNC_EXIT(); +} + +/** + * ipa_mhi_notify_ready() - Schedule work to notify ready + * + * This function will schedule a work to notify ready event. + */ +static void ipa_mhi_notify_ready(void) +{ + IPA_MHI_FUNC_ENTRY(); + queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_ready_work); + IPA_MHI_FUNC_EXIT(); +} + +/** + * ipa_mhi_set_state() - Set new state to IPA MHI + * @state: new state + * + * Sets a new state to IPA MHI if possible according to IPA MHI state machine. + * In some state transitions a wakeup request will be triggered. + * + * Returns: 0 on success, -1 otherwise + */ +static int ipa_mhi_set_state(enum ipa_mhi_state new_state) +{ + unsigned long flags; + int res = -EPERM; + + spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags); + IPA_MHI_DBG("Current state: %s\n", + MHI_STATE_STR(ipa_mhi_client_ctx->state)); + + switch (ipa_mhi_client_ctx->state) { + case IPA_MHI_STATE_INITIALIZED: + if (new_state == IPA_MHI_STATE_READY) { + ipa_mhi_notify_ready(); + res = 0; + } + break; + + case IPA_MHI_STATE_READY: + if (new_state == IPA_MHI_STATE_READY) + res = 0; + if (new_state == IPA_MHI_STATE_STARTED) + res = 0; + break; + + case IPA_MHI_STATE_STARTED: + if (new_state == IPA_MHI_STATE_INITIALIZED) + res = 0; + else if (new_state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS) + res = 0; + break; + + case IPA_MHI_STATE_SUSPEND_IN_PROGRESS: + if (new_state == IPA_MHI_STATE_SUSPENDED) { + if (ipa_mhi_client_ctx->trigger_wakeup) { + ipa_mhi_client_ctx->trigger_wakeup = false; + ipa_mhi_notify_wakeup(); + } + res = 0; + } else if (new_state == IPA_MHI_STATE_STARTED) { + ipa_mhi_client_ctx->wakeup_notified = false; + ipa_mhi_client_ctx->trigger_wakeup = false; + if (ipa_mhi_client_ctx->rm_cons_state == + IPA_MHI_RM_STATE_REQUESTED) { + ipa_rm_notify_completion( + IPA_RM_RESOURCE_GRANTED, + IPA_RM_RESOURCE_MHI_CONS); + ipa_mhi_client_ctx->rm_cons_state = + IPA_MHI_RM_STATE_GRANTED; + } + res = 0; + } + break; + + case IPA_MHI_STATE_SUSPENDED: + if (new_state == IPA_MHI_STATE_RESUME_IN_PROGRESS) + res = 0; + break; + + case IPA_MHI_STATE_RESUME_IN_PROGRESS: + if (new_state == IPA_MHI_STATE_SUSPENDED) { + if (ipa_mhi_client_ctx->trigger_wakeup) { + ipa_mhi_client_ctx->trigger_wakeup = false; + ipa_mhi_notify_wakeup(); + } + res = 0; + } else if (new_state == IPA_MHI_STATE_STARTED) { + ipa_mhi_client_ctx->trigger_wakeup = false; + ipa_mhi_client_ctx->wakeup_notified = false; + if (ipa_mhi_client_ctx->rm_cons_state == + IPA_MHI_RM_STATE_REQUESTED) { + ipa_rm_notify_completion( + IPA_RM_RESOURCE_GRANTED, + IPA_RM_RESOURCE_MHI_CONS); + ipa_mhi_client_ctx->rm_cons_state = + IPA_MHI_RM_STATE_GRANTED; + } + res = 0; + } + break; + + default: + IPA_MHI_ERR("Invalid state %d\n", ipa_mhi_client_ctx->state); + WARN_ON(1); + } + + if (res) + IPA_MHI_ERR("Invalid state change to %s\n", + MHI_STATE_STR(new_state)); + else { + IPA_MHI_DBG("New state change to %s\n", + MHI_STATE_STR(new_state)); + ipa_mhi_client_ctx->state = new_state; + } + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + return res; +} + +static void ipa_mhi_uc_ready_cb(void) +{ + IPA_MHI_FUNC_ENTRY(); + ipa_mhi_set_state(IPA_MHI_STATE_READY); + IPA_MHI_FUNC_EXIT(); +} + +static void ipa_mhi_uc_wakeup_request_cb(void) +{ + unsigned long flags; + + IPA_MHI_FUNC_ENTRY(); + IPA_MHI_DBG("MHI state: %s\n", + MHI_STATE_STR(ipa_mhi_client_ctx->state)); + spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags); + if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) + ipa_mhi_notify_wakeup(); + else if (ipa_mhi_client_ctx->state == + IPA_MHI_STATE_SUSPEND_IN_PROGRESS) + /* wakeup event will be triggered after suspend finishes */ + ipa_mhi_client_ctx->trigger_wakeup = true; + + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + IPA_MHI_FUNC_EXIT(); +} + +static int ipa_mhi_request_prod(void) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + + reinit_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp); + IPA_MHI_DBG("requesting mhi prod\n"); + res = ipa_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD); + if (res) { + if (res != -EINPROGRESS) { + IPA_MHI_ERR("failed to request mhi prod %d\n", res); + return res; + } + res = wait_for_completion_timeout( + &ipa_mhi_client_ctx->rm_prod_granted_comp, + msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC)); + if (res == 0) { + IPA_MHI_ERR("timeout request mhi prod\n"); + return -ETIME; + } + } + + IPA_MHI_DBG("mhi prod granted\n"); + IPA_MHI_FUNC_EXIT(); + return 0; + +} + +static int ipa_mhi_release_prod(void) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + + res = ipa_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD); + + IPA_MHI_FUNC_EXIT(); + return res; + +} + +/** + * ipa_mhi_start() - Start IPA MHI engine + * @params: pcie addresses for MHI + * + * This function is called by MHI client driver on MHI engine start for + * handling MHI accelerated channels. This function is called after + * ipa_mhi_init() was called and can be called after MHI reset to restart MHI + * engine. When this function returns device can move to M0 state. + * + * Return codes: 0 : success + * negative : error + */ +int ipa_mhi_start(struct ipa_mhi_start_params *params) +{ + int res; + struct ipa_mhi_init_engine init_params; + + IPA_MHI_FUNC_ENTRY(); + + if (!params) { + IPA_MHI_ERR("null args\n"); + return -EINVAL; + } + + if (!ipa_mhi_client_ctx) { + IPA_MHI_ERR("not initialized\n"); + return -EPERM; + } + + res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED); + if (res) { + IPA_MHI_ERR("ipa_mhi_set_state %d\n", res); + return res; + } + + ipa_mhi_client_ctx->host_ctrl_addr = params->host_ctrl_addr; + ipa_mhi_client_ctx->host_data_addr = params->host_data_addr; + ipa_mhi_client_ctx->channel_context_array_addr = + params->channel_context_array_addr; + ipa_mhi_client_ctx->event_context_array_addr = + params->event_context_array_addr; + IPA_MHI_DBG("host_ctrl_addr 0x%x\n", + ipa_mhi_client_ctx->host_ctrl_addr); + IPA_MHI_DBG("host_data_addr 0x%x\n", + ipa_mhi_client_ctx->host_data_addr); + IPA_MHI_DBG("channel_context_array_addr 0x%llx\n", + ipa_mhi_client_ctx->channel_context_array_addr); + IPA_MHI_DBG("event_context_array_addr 0x%llx\n", + ipa_mhi_client_ctx->event_context_array_addr); + + if (ipa_pm_is_used()) { + res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl); + if (res) { + IPA_MHI_ERR("failed activate client %d\n", res); + goto fail_pm_activate; + } + res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl); + if (res) { + IPA_MHI_ERR("failed activate modem client %d\n", res); + goto fail_pm_activate_modem; + } + } else { + /* Add MHI <-> Q6 dependencies to IPA RM */ + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res && res != -EINPROGRESS) { + IPA_MHI_ERR("failed to add dependency %d\n", res); + goto fail_add_mhi_q6_dep; + } + + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_MHI_CONS); + if (res && res != -EINPROGRESS) { + IPA_MHI_ERR("failed to add dependency %d\n", res); + goto fail_add_q6_mhi_dep; + } + + res = ipa_mhi_request_prod(); + if (res) { + IPA_MHI_ERR("failed request prod %d\n", res); + goto fail_request_prod; + } + } + + /* gsi params */ + init_params.gsi.first_ch_idx = + ipa_mhi_client_ctx->first_ch_idx; + /* uC params */ + init_params.uC.first_ch_idx = + ipa_mhi_client_ctx->first_ch_idx; + init_params.uC.first_er_idx = + ipa_mhi_client_ctx->first_er_idx; + init_params.uC.host_ctrl_addr = params->host_ctrl_addr; + init_params.uC.host_data_addr = params->host_data_addr; + init_params.uC.mmio_addr = ipa_mhi_client_ctx->mmio_addr; + init_params.uC.msi = &ipa_mhi_client_ctx->msi; + init_params.uC.ipa_cached_dl_ul_sync_info = + &ipa_cached_dl_ul_sync_info; + + res = ipa_mhi_init_engine(&init_params); + if (res) { + IPA_MHI_ERR("IPA core failed to start MHI %d\n", res); + goto fail_init_engine; + } + + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_init_engine: + if (!ipa_pm_is_used()) + ipa_mhi_release_prod(); +fail_request_prod: + if (!ipa_pm_is_used()) + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_MHI_CONS); +fail_add_q6_mhi_dep: + if (!ipa_pm_is_used()) + ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD, + IPA_RM_RESOURCE_Q6_CONS); +fail_add_mhi_q6_dep: + if (ipa_pm_is_used()) + ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl); +fail_pm_activate_modem: + if (ipa_pm_is_used()) + ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl); +fail_pm_activate: + ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED); + return res; +} + +/** + * ipa_mhi_get_channel_context() - Get corresponding channel context + * @ep: IPA ep + * @channel_id: Channel ID + * + * This function will return the corresponding channel context or allocate new + * one in case channel context for channel does not exist. + */ +static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context( + enum ipa_client_type client, u8 channel_id) +{ + int ch_idx; + struct ipa_mhi_channel_ctx *channels; + int max_channels; + + if (IPA_CLIENT_IS_PROD(client)) { + channels = ipa_mhi_client_ctx->ul_channels; + max_channels = IPA_MHI_MAX_UL_CHANNELS; + } else { + channels = ipa_mhi_client_ctx->dl_channels; + max_channels = IPA_MHI_MAX_DL_CHANNELS; + } + + /* find the channel context according to channel id */ + for (ch_idx = 0; ch_idx < max_channels; ch_idx++) { + if (channels[ch_idx].valid && + channels[ch_idx].id == channel_id) + return &channels[ch_idx]; + } + + /* channel context does not exists, allocate a new one */ + for (ch_idx = 0; ch_idx < max_channels; ch_idx++) { + if (!channels[ch_idx].valid) + break; + } + + if (ch_idx == max_channels) { + IPA_MHI_ERR("no more channels available\n"); + return NULL; + } + + channels[ch_idx].valid = true; + channels[ch_idx].id = channel_id; + channels[ch_idx].index = ipa_mhi_client_ctx->total_channels++; + channels[ch_idx].client = client; + channels[ch_idx].state = IPA_HW_MHI_CHANNEL_STATE_INVALID; + + return &channels[ch_idx]; +} + +/** + * ipa_mhi_get_channel_context_by_clnt_hdl() - Get corresponding channel + * context + * @clnt_hdl: client handle as provided in ipa_mhi_connect_pipe() + * + * This function will return the corresponding channel context or NULL in case + * that channel does not exist. + */ +static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context_by_clnt_hdl( + u32 clnt_hdl) +{ + int ch_idx; + + for (ch_idx = 0; ch_idx < IPA_MHI_MAX_UL_CHANNELS; ch_idx++) { + if (ipa_mhi_client_ctx->ul_channels[ch_idx].valid && + ipa_get_ep_mapping( + ipa_mhi_client_ctx->ul_channels[ch_idx].client) + == clnt_hdl) + return &ipa_mhi_client_ctx->ul_channels[ch_idx]; + } + + for (ch_idx = 0; ch_idx < IPA_MHI_MAX_DL_CHANNELS; ch_idx++) { + if (ipa_mhi_client_ctx->dl_channels[ch_idx].valid && + ipa_get_ep_mapping( + ipa_mhi_client_ctx->dl_channels[ch_idx].client) + == clnt_hdl) + return &ipa_mhi_client_ctx->dl_channels[ch_idx]; + } + + return NULL; +} + +static void ipa_mhi_dump_ch_ctx(struct ipa_mhi_channel_ctx *channel) +{ + IPA_MHI_DBG("ch_id %d\n", channel->id); + IPA_MHI_DBG("chstate 0x%x\n", channel->ch_ctx_host.chstate); + IPA_MHI_DBG("brstmode 0x%x\n", channel->ch_ctx_host.brstmode); + IPA_MHI_DBG("pollcfg 0x%x\n", channel->ch_ctx_host.pollcfg); + IPA_MHI_DBG("chtype 0x%x\n", channel->ch_ctx_host.chtype); + IPA_MHI_DBG("erindex 0x%x\n", channel->ch_ctx_host.erindex); + IPA_MHI_DBG("rbase 0x%llx\n", channel->ch_ctx_host.rbase); + IPA_MHI_DBG("rlen 0x%llx\n", channel->ch_ctx_host.rlen); + IPA_MHI_DBG("rp 0x%llx\n", channel->ch_ctx_host.rp); + IPA_MHI_DBG("wp 0x%llx\n", channel->ch_ctx_host.wp); +} + +static void ipa_mhi_dump_ev_ctx(struct ipa_mhi_channel_ctx *channel) +{ + IPA_MHI_DBG("ch_id %d event id %d\n", channel->id, + channel->ch_ctx_host.erindex); + + IPA_MHI_DBG("intmodc 0x%x\n", channel->ev_ctx_host.intmodc); + IPA_MHI_DBG("intmodt 0x%x\n", channel->ev_ctx_host.intmodt); + IPA_MHI_DBG("ertype 0x%x\n", channel->ev_ctx_host.ertype); + IPA_MHI_DBG("msivec 0x%x\n", channel->ev_ctx_host.msivec); + IPA_MHI_DBG("rbase 0x%llx\n", channel->ev_ctx_host.rbase); + IPA_MHI_DBG("rlen 0x%llx\n", channel->ev_ctx_host.rlen); + IPA_MHI_DBG("rp 0x%llx\n", channel->ev_ctx_host.rp); + IPA_MHI_DBG("wp 0x%llx\n", channel->ev_ctx_host.wp); +} + +static int ipa_mhi_read_ch_ctx(struct ipa_mhi_channel_ctx *channel) +{ + int res; + + res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST, + &channel->ch_ctx_host, channel->channel_context_addr, + sizeof(channel->ch_ctx_host)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res); + return res; + + } + ipa_mhi_dump_ch_ctx(channel); + + channel->event_context_addr = + ipa_mhi_client_ctx->event_context_array_addr + + channel->ch_ctx_host.erindex * sizeof(struct ipa_mhi_ev_ctx); + IPA_MHI_DBG("ch %d event_context_addr 0x%llx\n", channel->id, + channel->event_context_addr); + + res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST, + &channel->ev_ctx_host, channel->event_context_addr, + sizeof(channel->ev_ctx_host)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res); + return res; + + } + ipa_mhi_dump_ev_ctx(channel); + + return 0; +} + +static void ipa_mhi_gsi_ev_err_cb(struct gsi_evt_err_notify *notify) +{ + struct ipa_mhi_channel_ctx *channel = notify->user_data; + + IPA_MHI_ERR("channel id=%d client=%d state=%d\n", + channel->id, channel->client, channel->state); + switch (notify->evt_id) { + case GSI_EVT_OUT_OF_BUFFERS_ERR: + IPA_MHI_ERR("Received GSI_EVT_OUT_OF_BUFFERS_ERR\n"); + break; + case GSI_EVT_OUT_OF_RESOURCES_ERR: + IPA_MHI_ERR("Received GSI_EVT_OUT_OF_RESOURCES_ERR\n"); + break; + case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR: + IPA_MHI_ERR("Received GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n"); + break; + case GSI_EVT_EVT_RING_EMPTY_ERR: + IPA_MHI_ERR("Received GSI_EVT_EVT_RING_EMPTY_ERR\n"); + break; + default: + IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id); + } + IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc); + ipa_assert(); +} + +static void ipa_mhi_gsi_ch_err_cb(struct gsi_chan_err_notify *notify) +{ + struct ipa_mhi_channel_ctx *channel = notify->chan_user_data; + + IPA_MHI_ERR("channel id=%d client=%d state=%d\n", + channel->id, channel->client, channel->state); + switch (notify->evt_id) { + case GSI_CHAN_INVALID_TRE_ERR: + IPA_MHI_ERR("Received GSI_CHAN_INVALID_TRE_ERR\n"); + break; + case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR: + IPA_MHI_ERR("Received GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n"); + break; + case GSI_CHAN_OUT_OF_BUFFERS_ERR: + IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n"); + break; + case GSI_CHAN_OUT_OF_RESOURCES_ERR: + IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n"); + break; + case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR: + IPA_MHI_ERR("Received GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n"); + break; + case GSI_CHAN_HWO_1_ERR: + IPA_MHI_ERR("Received GSI_CHAN_HWO_1_ERR\n"); + break; + default: + IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id); + } + IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc); + ipa_assert(); +} + + +static bool ipa_mhi_gsi_channel_empty(struct ipa_mhi_channel_ctx *channel) +{ + IPA_MHI_FUNC_ENTRY(); + + if (!channel->stop_in_proc) { + IPA_MHI_DBG("Channel is not in STOP_IN_PROC\n"); + return true; + } + + if (ipa_mhi_stop_gsi_channel(channel->client)) { + channel->stop_in_proc = false; + return true; + } + + return false; +} + +/** + * ipa_mhi_wait_for_ul_empty_timeout() - wait for pending packets in uplink + * @msecs: timeout to wait + * + * This function will poll until there are no packets pending in uplink channels + * or timeout occurred. + * + * Return code: true - no pending packets in uplink channels + * false - timeout occurred + */ +static bool ipa_mhi_wait_for_ul_empty_timeout(unsigned int msecs) +{ + unsigned long jiffies_timeout = msecs_to_jiffies(msecs); + unsigned long jiffies_start = jiffies; + bool empty = false; + int i; + + IPA_MHI_FUNC_ENTRY(); + while (!empty) { + empty = true; + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + if (!ipa_mhi_client_ctx->ul_channels[i].valid) + continue; + if (ipa_get_transport_type() == + IPA_TRANSPORT_TYPE_GSI) + empty &= ipa_mhi_gsi_channel_empty( + &ipa_mhi_client_ctx->ul_channels[i]); + else + empty &= ipa_mhi_sps_channel_empty( + ipa_mhi_client_ctx->ul_channels[i].client); + } + + if (time_after(jiffies, jiffies_start + jiffies_timeout)) { + IPA_MHI_DBG("finished waiting for UL empty\n"); + break; + } + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI && + IPA_MHI_MAX_UL_CHANNELS == 1) + usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC, + IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC); + } + + IPA_MHI_DBG("IPA UL is %s\n", (empty) ? "empty" : "not empty"); + + IPA_MHI_FUNC_EXIT(); + return empty; +} + +static int ipa_mhi_enable_force_clear(u32 request_id, bool throttle_source) +{ + struct ipa_enable_force_clear_datapath_req_msg_v01 req; + int i; + int res; + + IPA_MHI_FUNC_ENTRY(); + memset(&req, 0, sizeof(req)); + req.request_id = request_id; + req.source_pipe_bitmask = 0; + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + if (!ipa_mhi_client_ctx->ul_channels[i].valid) + continue; + req.source_pipe_bitmask |= 1 << ipa_get_ep_mapping( + ipa_mhi_client_ctx->ul_channels[i].client); + } + if (throttle_source) { + req.throttle_source_valid = 1; + req.throttle_source = 1; + } + IPA_MHI_DBG("req_id=0x%x src_pipe_btmk=0x%x throt_src=%d\n", + req.request_id, req.source_pipe_bitmask, + req.throttle_source); + res = ipa_qmi_enable_force_clear_datapath_send(&req); + if (res) { + IPA_MHI_ERR( + "ipa_qmi_enable_force_clear_datapath_send failed %d\n" + , res); + return res; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static int ipa_mhi_disable_force_clear(u32 request_id) +{ + struct ipa_disable_force_clear_datapath_req_msg_v01 req; + int res; + + IPA_MHI_FUNC_ENTRY(); + memset(&req, 0, sizeof(req)); + req.request_id = request_id; + IPA_MHI_DBG("req_id=0x%x\n", req.request_id); + res = ipa_qmi_disable_force_clear_datapath_send(&req); + if (res) { + IPA_MHI_ERR( + "ipa_qmi_disable_force_clear_datapath_send failed %d\n" + , res); + return res; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static void ipa_mhi_set_holb_on_dl_channels(bool enable, + struct ipa_ep_cfg_holb old_holb[]) +{ + int i; + struct ipa_ep_cfg_holb ep_holb; + int ep_idx; + int res; + + for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) { + if (!ipa_mhi_client_ctx->dl_channels[i].valid) + continue; + if (ipa_mhi_client_ctx->dl_channels[i].state == + IPA_HW_MHI_CHANNEL_STATE_INVALID) + continue; + ep_idx = ipa_get_ep_mapping( + ipa_mhi_client_ctx->dl_channels[i].client); + if (-1 == ep_idx) { + IPA_MHI_ERR("Client %u is not mapped\n", + ipa_mhi_client_ctx->dl_channels[i].client); + ipa_assert(); + return; + } + memset(&ep_holb, 0, sizeof(ep_holb)); + if (enable) { + ipa_get_holb(ep_idx, &old_holb[i]); + ep_holb.en = 1; + ep_holb.tmr_val = 0; + } else { + ep_holb = old_holb[i]; + } + res = ipa_cfg_ep_holb(ep_idx, &ep_holb); + if (res) { + IPA_MHI_ERR("ipa_cfg_ep_holb failed %d\n", res); + ipa_assert(); + return; + } + } +} + +static int ipa_mhi_suspend_gsi_channel(struct ipa_mhi_channel_ctx *channel) +{ + int clnt_hdl; + int res; + + IPA_MHI_FUNC_ENTRY(); + clnt_hdl = ipa_get_ep_mapping(channel->client); + if (clnt_hdl < 0) + return -EFAULT; + + res = ipa_stop_gsi_channel(clnt_hdl); + if (res != 0 && res != -GSI_STATUS_AGAIN && + res != -GSI_STATUS_TIMED_OUT) { + IPA_MHI_ERR("GSI stop channel failed %d\n", res); + return -EFAULT; + } + + /* check if channel was stopped completely */ + if (res) + channel->stop_in_proc = true; + + IPA_MHI_DBG("GSI channel is %s\n", (channel->stop_in_proc) ? + "STOP_IN_PROC" : "STOP"); + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static int ipa_mhi_reset_ul_channel(struct ipa_mhi_channel_ctx *channel) +{ + int res; + bool empty; + struct ipa_ep_cfg_holb old_ep_holb[IPA_MHI_MAX_DL_CHANNELS]; + + IPA_MHI_FUNC_ENTRY(); + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + res = ipa_mhi_suspend_gsi_channel(channel); + if (res) { + IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n", + res); + return res; + } + } else { + res = ipa_uc_mhi_reset_channel(channel->index); + if (res) { + IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n", + res); + return res; + } + } + + empty = ipa_mhi_wait_for_ul_empty_timeout( + IPA_MHI_CH_EMPTY_TIMEOUT_MSEC); + if (!empty) { + IPA_MHI_DBG("%s not empty\n", + (ipa_get_transport_type() == + IPA_TRANSPORT_TYPE_GSI) ? "GSI" : "BAM"); + res = ipa_mhi_enable_force_clear( + ipa_mhi_client_ctx->qmi_req_id, false); + if (res) { + IPA_MHI_ERR("ipa_mhi_enable_force_clear failed %d\n", + res); + ipa_assert(); + return res; + } + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + empty = ipa_mhi_wait_for_ul_empty_timeout( + IPA_MHI_CH_EMPTY_TIMEOUT_MSEC); + + IPA_MHI_DBG("empty=%d\n", empty); + } else { + /* enable packet drop on all DL channels */ + ipa_mhi_set_holb_on_dl_channels(true, old_ep_holb); + ipa_generate_tag_process(); + /* disable packet drop on all DL channels */ + ipa_mhi_set_holb_on_dl_channels(false, old_ep_holb); + + res = ipa_disable_sps_pipe(channel->client); + if (res) { + IPA_MHI_ERR("sps_pipe_disable fail %d\n", res); + ipa_assert(); + return res; + } + } + + res = + ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id); + if (res) { + IPA_MHI_ERR("ipa_mhi_disable_force_clear failed %d\n", + res); + ipa_assert(); + return res; + } + ipa_mhi_client_ctx->qmi_req_id++; + } + + res = ipa_mhi_reset_channel_internal(channel->client); + if (res) { + IPA_MHI_ERR("ipa_mhi_reset_ul_channel_internal failed %d\n" + , res); + return res; + } + + IPA_MHI_FUNC_EXIT(); + + return 0; +} + +static int ipa_mhi_reset_dl_channel(struct ipa_mhi_channel_ctx *channel) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + res = ipa_mhi_suspend_gsi_channel(channel); + if (res) { + IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n" + , res); + return res; + } + + res = ipa_mhi_reset_channel_internal(channel->client); + if (res) { + IPA_MHI_ERR( + "ipa_mhi_reset_ul_channel_internal failed %d\n" + , res); + return res; + } + } else { + res = ipa_mhi_reset_channel_internal(channel->client); + if (res) { + IPA_MHI_ERR( + "ipa_mhi_reset_ul_channel_internal failed %d\n" + , res); + return res; + } + + res = ipa_uc_mhi_reset_channel(channel->index); + if (res) { + IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n", + res); + ipa_mhi_start_channel_internal(channel->client); + return res; + } + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static int ipa_mhi_reset_channel(struct ipa_mhi_channel_ctx *channel) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + if (IPA_CLIENT_IS_PROD(channel->client)) + res = ipa_mhi_reset_ul_channel(channel); + else + res = ipa_mhi_reset_dl_channel(channel); + if (res) { + IPA_MHI_ERR("failed to reset channel error %d\n", res); + return res; + } + + channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE; + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, + &channel->state, channel->channel_context_addr + + offsetof(struct ipa_mhi_ch_ctx, chstate), + sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res); + return res; + } + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +/** + * ipa_mhi_connect_pipe() - Connect pipe to IPA and start corresponding + * MHI channel + * @in: connect parameters + * @clnt_hdl: [out] client handle for this pipe + * + * This function is called by MHI client driver on MHI channel start. + * This function is called after MHI engine was started. + * + * Return codes: 0 : success + * negative : error + */ +int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl) +{ + int res; + unsigned long flags; + struct ipa_mhi_channel_ctx *channel = NULL; + + IPA_MHI_FUNC_ENTRY(); + + if (!in || !clnt_hdl) { + IPA_MHI_ERR("NULL args\n"); + return -EINVAL; + } + + if (in->sys.client >= IPA_CLIENT_MAX) { + IPA_MHI_ERR("bad param client:%d\n", in->sys.client); + return -EINVAL; + } + + if (!IPA_CLIENT_IS_MHI(in->sys.client)) { + IPA_MHI_ERR( + "Invalid MHI client, client: %d\n", in->sys.client); + return -EINVAL; + } + + IPA_MHI_DBG("channel=%d\n", in->channel_id); + + spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags); + if (!ipa_mhi_client_ctx || + ipa_mhi_client_ctx->state != IPA_MHI_STATE_STARTED) { + IPA_MHI_ERR("IPA MHI was not started\n"); + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + + channel = ipa_mhi_get_channel_context(in->sys.client, in->channel_id); + if (!channel) { + IPA_MHI_ERR("ipa_mhi_get_channel_context failed\n"); + return -EINVAL; + } + + if (channel->state != IPA_HW_MHI_CHANNEL_STATE_INVALID && + channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) { + IPA_MHI_ERR("Invalid channel state %d\n", channel->state); + return -EFAULT; + } + + channel->channel_context_addr = + ipa_mhi_client_ctx->channel_context_array_addr + + channel->id * sizeof(struct ipa_mhi_ch_ctx); + + /* for event context address index needs to read from host */ + + IPA_MHI_DBG("client %d channelIndex %d channelID %d, state %d\n", + channel->client, channel->index, channel->id, channel->state); + IPA_MHI_DBG("channel_context_addr 0x%llx cached_gsi_evt_ring_hdl %lu\n", + channel->channel_context_addr, + channel->cached_gsi_evt_ring_hdl); + + IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client); + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + struct ipa_mhi_connect_params_internal internal; + + IPA_MHI_DBG("reading ch/ev context from host\n"); + res = ipa_mhi_read_ch_ctx(channel); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res); + goto fail_start_channel; + } + + internal.channel_id = in->channel_id; + internal.sys = &in->sys; + internal.start.gsi.state = channel->state; + internal.start.gsi.msi = &ipa_mhi_client_ctx->msi; + internal.start.gsi.ev_ctx_host = &channel->ev_ctx_host; + internal.start.gsi.event_context_addr = + channel->event_context_addr; + internal.start.gsi.ch_ctx_host = &channel->ch_ctx_host; + internal.start.gsi.channel_context_addr = + channel->channel_context_addr; + internal.start.gsi.ch_err_cb = ipa_mhi_gsi_ch_err_cb; + internal.start.gsi.channel = (void *)channel; + internal.start.gsi.ev_err_cb = ipa_mhi_gsi_ev_err_cb; + internal.start.gsi.assert_bit40 = + ipa_mhi_client_ctx->assert_bit40; + internal.start.gsi.mhi = &channel->ch_scratch.mhi; + internal.start.gsi.cached_gsi_evt_ring_hdl = + &channel->cached_gsi_evt_ring_hdl; + internal.start.gsi.evchid = channel->index; + + res = ipa_connect_mhi_pipe(&internal, clnt_hdl); + if (res) { + IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res); + goto fail_connect_pipe; + } + channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN; + channel->brstmode_enabled = + channel->ch_scratch.mhi.burst_mode_enabled; + + res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, + &channel->state, channel->channel_context_addr + + offsetof(struct ipa_mhi_ch_ctx, chstate), + sizeof(channel->state)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed\n"); + return res; + + } + } else { + struct ipa_mhi_connect_params_internal internal; + + internal.channel_id = in->channel_id; + internal.sys = &in->sys; + internal.start.uC.index = channel->index; + internal.start.uC.id = channel->id; + internal.start.uC.state = channel->state; + res = ipa_connect_mhi_pipe(&internal, clnt_hdl); + if (res) { + IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res); + goto fail_connect_pipe; + } + channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN; + } + + if (!in->sys.keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); + + IPA_MHI_FUNC_EXIT(); + + return 0; +fail_connect_pipe: + ipa_mhi_reset_channel(channel); +fail_start_channel: + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); + return -EPERM; +} + +/** + * ipa_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding + * MHI channel + * @clnt_hdl: client handle for this pipe + * + * This function is called by MHI client driver on MHI channel reset. + * This function is called after MHI channel was started. + * This function is doing the following: + * - Send command to uC/GSI to reset corresponding MHI channel + * - Configure IPA EP control + * + * Return codes: 0 : success + * negative : error + */ +int ipa_mhi_disconnect_pipe(u32 clnt_hdl) +{ + int res; + enum ipa_client_type client; + static struct ipa_mhi_channel_ctx *channel; + + IPA_MHI_FUNC_ENTRY(); + + if (!ipa_mhi_client_ctx) { + IPA_MHI_ERR("IPA MHI was not initialized\n"); + return -EINVAL; + } + + client = ipa_get_client_mapping(clnt_hdl); + + if (!IPA_CLIENT_IS_MHI(client)) { + IPA_MHI_ERR("invalid IPA MHI client, client: %d\n", client); + return -EINVAL; + } + + channel = ipa_mhi_get_channel_context_by_clnt_hdl(clnt_hdl); + if (!channel) { + IPA_MHI_ERR("invalid clnt index\n"); + return -EINVAL; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa_get_client_mapping(clnt_hdl)); + + res = ipa_mhi_reset_channel(channel); + if (res) { + IPA_MHI_ERR("ipa_mhi_reset_channel failed %d\n", res); + goto fail_reset_channel; + } + + res = ipa_disconnect_mhi_pipe(clnt_hdl); + if (res) { + IPA_MHI_ERR( + "IPA core driver failed to disconnect the pipe hdl %d, res %d" + , clnt_hdl, res); + return res; + } + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl)); + + IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl); + IPA_MHI_FUNC_EXIT(); + return 0; +fail_reset_channel: + IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl)); + return res; +} + +static int ipa_mhi_wait_for_cons_release(void) +{ + unsigned long flags; + int res; + + IPA_MHI_FUNC_ENTRY(); + reinit_completion(&ipa_mhi_client_ctx->rm_cons_comp); + spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags); + if (ipa_mhi_client_ctx->rm_cons_state != IPA_MHI_RM_STATE_GRANTED) { + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + return 0; + } + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + + res = wait_for_completion_timeout( + &ipa_mhi_client_ctx->rm_cons_comp, + msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC)); + if (res == 0) { + IPA_MHI_ERR("timeout release mhi cons\n"); + return -ETIME; + } + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static int ipa_mhi_suspend_channels(struct ipa_mhi_channel_ctx *channels) +{ + int i; + int res; + + IPA_MHI_FUNC_ENTRY(); + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + if (!channels[i].valid) + continue; + if (channels[i].state != + IPA_HW_MHI_CHANNEL_STATE_RUN) + continue; + IPA_MHI_DBG("suspending channel %d\n", + channels[i].id); + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) + res = ipa_mhi_suspend_gsi_channel( + &channels[i]); + else + res = ipa_uc_mhi_suspend_channel( + channels[i].index); + + if (res) { + IPA_MHI_ERR("failed to suspend channel %d error %d\n", + i, res); + return res; + } + channels[i].state = + IPA_HW_MHI_CHANNEL_STATE_SUSPEND; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static int ipa_mhi_stop_event_update_channels( + struct ipa_mhi_channel_ctx *channels) +{ + int i; + int res; + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) + return 0; + + IPA_MHI_FUNC_ENTRY(); + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + if (!channels[i].valid) + continue; + if (channels[i].state != + IPA_HW_MHI_CHANNEL_STATE_SUSPEND) + continue; + IPA_MHI_DBG("stop update event channel %d\n", + channels[i].id); + res = ipa_uc_mhi_stop_event_update_channel( + channels[i].index); + if (res) { + IPA_MHI_ERR("failed stop event channel %d error %d\n", + i, res); + return res; + } + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static bool ipa_mhi_check_pending_packets_from_host(void) +{ + int i; + int res; + struct ipa_mhi_channel_ctx *channel; + + IPA_MHI_FUNC_ENTRY(); + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->ul_channels[i]; + if (!channel->valid) + continue; + + res = ipa_mhi_query_ch_info(channel->client, + &channel->ch_info); + if (res) { + IPA_MHI_ERR("gsi_query_channel_info failed\n"); + return true; + } + res = ipa_mhi_read_ch_ctx(channel); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res); + return true; + } + + if (channel->ch_info.rp != channel->ch_ctx_host.wp) { + IPA_MHI_DBG("There are pending packets from host\n"); + IPA_MHI_DBG("device rp 0x%llx host 0x%llx\n", + channel->ch_info.rp, channel->ch_ctx_host.wp); + + return true; + } + } + + IPA_MHI_FUNC_EXIT(); + return false; +} + +static int ipa_mhi_resume_channels(bool LPTransitionRejected, + struct ipa_mhi_channel_ctx *channels) +{ + int i; + int res; + struct ipa_mhi_channel_ctx *channel; + + IPA_MHI_FUNC_ENTRY(); + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + if (!channels[i].valid) + continue; + if (channels[i].state != + IPA_HW_MHI_CHANNEL_STATE_SUSPEND) + continue; + channel = &channels[i]; + IPA_MHI_DBG("resuming channel %d\n", channel->id); + + res = ipa_mhi_resume_channels_internal(channel->client, + LPTransitionRejected, channel->brstmode_enabled, + channel->ch_scratch, channel->index); + + if (res) { + IPA_MHI_ERR("failed to resume channel %d error %d\n", + i, res); + return res; + } + + channel->stop_in_proc = false; + channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +/** + * ipa_mhi_suspend_ul() - Suspend MHI accelerated up link channels + * @force: + * false: in case of data pending in IPA, MHI channels will not be + * suspended and function will fail. + * true: in case of data pending in IPA, make sure no further access from + * IPA to PCIe is possible. In this case suspend cannot fail. + * + * + * This function is called by MHI client driver on MHI suspend. + * This function is called after MHI channel was started. + * When this function returns device can move to M1/M2/M3/D3cold state. + * + * Return codes: 0 : success + * negative : error + */ +static int ipa_mhi_suspend_ul(bool force, bool *empty, bool *force_clear) +{ + int res; + + *force_clear = false; + + res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels); + if (res) { + IPA_MHI_ERR("ipa_mhi_suspend_ul_channels failed %d\n", res); + goto fail_suspend_ul_channel; + } + + *empty = ipa_mhi_wait_for_ul_empty_timeout( + IPA_MHI_CH_EMPTY_TIMEOUT_MSEC); + + if (!*empty) { + if (force) { + res = ipa_mhi_enable_force_clear( + ipa_mhi_client_ctx->qmi_req_id, false); + if (res) { + IPA_MHI_ERR("failed to enable force clear\n"); + ipa_assert(); + return res; + } + *force_clear = true; + IPA_MHI_DBG("force clear datapath enabled\n"); + + *empty = ipa_mhi_wait_for_ul_empty_timeout( + IPA_MHI_CH_EMPTY_TIMEOUT_MSEC); + IPA_MHI_DBG("empty=%d\n", *empty); + if (!*empty && ipa_get_transport_type() + == IPA_TRANSPORT_TYPE_GSI) { + IPA_MHI_ERR("Failed to suspend UL channels\n"); + if (ipa_mhi_client_ctx->test_mode) { + res = -EAGAIN; + goto fail_suspend_ul_channel; + } + + ipa_assert(); + } + } else { + IPA_MHI_DBG("IPA not empty\n"); + res = -EAGAIN; + goto fail_suspend_ul_channel; + } + } + + if (*force_clear) { + res = + ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id); + if (res) { + IPA_MHI_ERR("failed to disable force clear\n"); + ipa_assert(); + return res; + } + IPA_MHI_DBG("force clear datapath disabled\n"); + ipa_mhi_client_ctx->qmi_req_id++; + } + + if (!force && ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + if (ipa_mhi_check_pending_packets_from_host()) { + res = -EAGAIN; + goto fail_suspend_ul_channel; + } + } + + res = ipa_mhi_stop_event_update_channels( + ipa_mhi_client_ctx->ul_channels); + if (res) { + IPA_MHI_ERR( + "ipa_mhi_stop_event_update_ul_channels failed %d\n", + res); + goto fail_suspend_ul_channel; + } + + return 0; + +fail_suspend_ul_channel: + return res; +} + +static bool ipa_mhi_has_open_aggr_frame(void) +{ + struct ipa_mhi_channel_ctx *channel; + int i; + + for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->dl_channels[i]; + + if (!channel->valid) + continue; + + if (ipa_has_open_aggr_frame(channel->client)) + return true; + } + + return false; +} + +static void ipa_mhi_update_host_ch_state(bool update_rp) +{ + int i; + int res; + struct ipa_mhi_channel_ctx *channel; + + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->ul_channels[i]; + if (!channel->valid) + continue; + + if (update_rp) { + res = ipa_mhi_query_ch_info(channel->client, + &channel->ch_info); + if (res) { + IPA_MHI_ERR("gsi_query_channel_info failed\n"); + ipa_assert(); + return; + } + + res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, + &channel->ch_info.rp, + channel->channel_context_addr + + offsetof(struct ipa_mhi_ch_ctx, rp), + sizeof(channel->ch_info.rp)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed\n"); + ipa_assert(); + return; + } + } + + res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, + &channel->state, channel->channel_context_addr + + offsetof(struct ipa_mhi_ch_ctx, chstate), + sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed\n"); + ipa_assert(); + return; + } + IPA_MHI_DBG("Updated UL CH=%d state to %s on host\n", + i, MHI_CH_STATE_STR(channel->state)); + } + + for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->dl_channels[i]; + if (!channel->valid) + continue; + + if (update_rp) { + res = ipa_mhi_query_ch_info(channel->client, + &channel->ch_info); + if (res) { + IPA_MHI_ERR("gsi_query_channel_info failed\n"); + ipa_assert(); + return; + } + + res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, + &channel->ch_info.rp, + channel->channel_context_addr + + offsetof(struct ipa_mhi_ch_ctx, rp), + sizeof(channel->ch_info.rp)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed\n"); + ipa_assert(); + return; + } + } + + res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, + &channel->state, channel->channel_context_addr + + offsetof(struct ipa_mhi_ch_ctx, chstate), + sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed\n"); + ipa_assert(); + return; + } + IPA_MHI_DBG("Updated DL CH=%d state to %s on host\n", + i, MHI_CH_STATE_STR(channel->state)); + } +} + +static int ipa_mhi_suspend_dl(bool force) +{ + int res; + + res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels); + if (res) { + IPA_MHI_ERR( + "ipa_mhi_suspend_channels for dl failed %d\n", res); + goto fail_suspend_dl_channel; + } + + res = ipa_mhi_stop_event_update_channels + (ipa_mhi_client_ctx->dl_channels); + if (res) { + IPA_MHI_ERR("failed to stop event update on DL %d\n", res); + goto fail_stop_event_update_dl_channel; + } + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + if (ipa_mhi_has_open_aggr_frame()) { + IPA_MHI_DBG("There is an open aggr frame\n"); + if (force) { + ipa_mhi_client_ctx->trigger_wakeup = true; + } else { + res = -EAGAIN; + goto fail_stop_event_update_dl_channel; + } + } + } + + return 0; + +fail_stop_event_update_dl_channel: + ipa_mhi_resume_channels(true, + ipa_mhi_client_ctx->dl_channels); +fail_suspend_dl_channel: + return res; +} + +/** + * ipa_mhi_suspend() - Suspend MHI accelerated channels + * @force: + * false: in case of data pending in IPA, MHI channels will not be + * suspended and function will fail. + * true: in case of data pending in IPA, make sure no further access from + * IPA to PCIe is possible. In this case suspend cannot fail. + * + * This function is called by MHI client driver on MHI suspend. + * This function is called after MHI channel was started. + * When this function returns device can move to M1/M2/M3/D3cold state. + * + * Return codes: 0 : success + * negative : error + */ +int ipa_mhi_suspend(bool force) +{ + int res; + bool empty; + bool force_clear; + + IPA_MHI_FUNC_ENTRY(); + + res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPEND_IN_PROGRESS); + if (res) { + IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res); + return res; + } + + res = ipa_mhi_suspend_dl(force); + if (res) { + IPA_MHI_ERR("ipa_mhi_suspend_dl failed %d\n", res); + goto fail_suspend_dl_channel; + } + + usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX); + + res = ipa_mhi_suspend_ul(force, &empty, &force_clear); + if (res) { + IPA_MHI_ERR("ipa_mhi_suspend_ul failed %d\n", res); + goto fail_suspend_ul_channel; + } + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) + ipa_mhi_update_host_ch_state(true); + + /* + * hold IPA clocks and release them after all + * IPA RM resource are released to make sure tag process will not start + */ + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + if (ipa_pm_is_used()) { + res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl); + if (res) { + IPA_MHI_ERR("fail to deactivate client %d\n", res); + goto fail_deactivate_pm; + } + res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl); + if (res) { + IPA_MHI_ERR("fail to deactivate client %d\n", res); + goto fail_deactivate_modem_pm; + } + } else { + IPA_MHI_DBG("release prod\n"); + res = ipa_mhi_release_prod(); + if (res) { + IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res); + goto fail_release_prod; + } + + IPA_MHI_DBG("wait for cons release\n"); + res = ipa_mhi_wait_for_cons_release(); + if (res) { + IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed\n"); + goto fail_release_cons; + } + } + usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX); + + if (!empty) + ipa_set_tag_process_before_gating(false); + + res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED); + if (res) { + IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res); + goto fail_release_cons; + } + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_release_cons: + if (!ipa_pm_is_used()) + ipa_mhi_request_prod(); +fail_release_prod: + if (ipa_pm_is_used()) + ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl); +fail_deactivate_modem_pm: + if (ipa_pm_is_used()) + ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl); +fail_deactivate_pm: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +fail_suspend_ul_channel: + ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->ul_channels); + if (force_clear) { + if ( + ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id)) { + IPA_MHI_ERR("failed to disable force clear\n"); + ipa_assert(); + } + IPA_MHI_DBG("force clear datapath disabled\n"); + ipa_mhi_client_ctx->qmi_req_id++; + } +fail_suspend_dl_channel: + ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->dl_channels); + ipa_mhi_set_state(IPA_MHI_STATE_STARTED); + return res; +} + +/** + * ipa_mhi_resume() - Resume MHI accelerated channels + * + * This function is called by MHI client driver on MHI resume. + * This function is called after MHI channel was suspended. + * When this function returns device can move to M0 state. + * This function is doing the following: + * - Send command to uC/GSI to resume corresponding MHI channel + * - Request MHI_PROD in IPA RM + * - Resume data to IPA + * + * Return codes: 0 : success + * negative : error + */ +int ipa_mhi_resume(void) +{ + int res; + bool dl_channel_resumed = false; + + IPA_MHI_FUNC_ENTRY(); + + res = ipa_mhi_set_state(IPA_MHI_STATE_RESUME_IN_PROGRESS); + if (res) { + IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res); + return res; + } + + if (ipa_mhi_client_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) { + /* resume all DL channels */ + res = ipa_mhi_resume_channels(false, + ipa_mhi_client_ctx->dl_channels); + if (res) { + IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n", + res); + goto fail_resume_dl_channels; + } + dl_channel_resumed = true; + + ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED, + IPA_RM_RESOURCE_MHI_CONS); + ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED; + } + + if (ipa_pm_is_used()) { + res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl); + if (res) { + IPA_MHI_ERR("fail to activate client %d\n", res); + goto fail_pm_activate; + } + ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl); + if (res) { + IPA_MHI_ERR("fail to activate client %d\n", res); + goto fail_pm_activate_modem; + } + } else { + res = ipa_mhi_request_prod(); + if (res) { + IPA_MHI_ERR("ipa_mhi_request_prod failed %d\n", res); + goto fail_request_prod; + } + } + + /* resume all UL channels */ + res = ipa_mhi_resume_channels(false, + ipa_mhi_client_ctx->ul_channels); + if (res) { + IPA_MHI_ERR("ipa_mhi_resume_ul_channels failed %d\n", res); + goto fail_resume_ul_channels; + } + + if (!dl_channel_resumed) { + res = ipa_mhi_resume_channels(false, + ipa_mhi_client_ctx->dl_channels); + if (res) { + IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n", + res); + goto fail_resume_dl_channels2; + } + } + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) + ipa_mhi_update_host_ch_state(false); + + res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED); + if (res) { + IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res); + goto fail_set_state; + } + + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_set_state: + ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels); +fail_resume_dl_channels2: + ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels); +fail_resume_ul_channels: + if (!ipa_pm_is_used()) + ipa_mhi_release_prod(); +fail_request_prod: + if (ipa_pm_is_used()) + ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl); +fail_pm_activate_modem: + if (ipa_pm_is_used()) + ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl); +fail_pm_activate: + ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels); +fail_resume_dl_channels: + ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED); + return res; +} + + +static int ipa_mhi_destroy_channels(struct ipa_mhi_channel_ctx *channels, + int num_of_channels) +{ + struct ipa_mhi_channel_ctx *channel; + int i, res; + u32 clnt_hdl; + + for (i = 0; i < num_of_channels; i++) { + channel = &channels[i]; + if (!channel->valid) + continue; + if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) + continue; + if (channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) { + clnt_hdl = ipa_get_ep_mapping(channel->client); + IPA_MHI_DBG("disconnect pipe (ep: %d)\n", clnt_hdl); + res = ipa_mhi_disconnect_pipe(clnt_hdl); + if (res) { + IPA_MHI_ERR( + "failed to disconnect pipe %d, err %d\n" + , clnt_hdl, res); + goto fail; + } + } + res = ipa_mhi_destroy_channel(channel->client); + if (res) { + IPA_MHI_ERR( + "ipa_mhi_destroy_channel failed %d" + , res); + goto fail; + } + } + return 0; +fail: + return res; +} + +/** + * ipa_mhi_destroy_all_channels() - Destroy MHI IPA channels + * + * This function is called by IPA MHI client driver on MHI reset to destroy all + * IPA MHI channels. + */ +int ipa_mhi_destroy_all_channels(void) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + /* reset all UL and DL acc channels and its accociated event rings */ + res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->ul_channels, + IPA_MHI_MAX_UL_CHANNELS); + if (res) { + IPA_MHI_ERR("ipa_mhi_destroy_channels(ul_channels) failed %d\n", + res); + return -EPERM; + } + IPA_MHI_DBG("All UL channels are disconnected\n"); + + res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->dl_channels, + IPA_MHI_MAX_DL_CHANNELS); + if (res) { + IPA_MHI_ERR("ipa_mhi_destroy_channels(dl_channels) failed %d\n", + res); + return -EPERM; + } + IPA_MHI_DBG("All DL channels are disconnected\n"); + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static void ipa_mhi_debugfs_destroy(void) +{ + debugfs_remove_recursive(dent); +} + +static void ipa_mhi_delete_rm_resources(void) +{ + int res; + + if (ipa_mhi_client_ctx->state != IPA_MHI_STATE_INITIALIZED && + ipa_mhi_client_ctx->state != IPA_MHI_STATE_READY) { + + IPA_MHI_DBG("release prod\n"); + res = ipa_mhi_release_prod(); + if (res) { + IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", + res); + goto fail; + } + IPA_MHI_DBG("wait for cons release\n"); + res = ipa_mhi_wait_for_cons_release(); + if (res) { + IPA_MHI_ERR("ipa_mhi_wait_for_cons_release%d\n", + res); + goto fail; + } + + usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, + IPA_MHI_SUSPEND_SLEEP_MAX); + + IPA_MHI_DBG("deleate dependency Q6_PROD->MHI_CONS\n"); + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_MHI_CONS); + if (res) { + IPA_MHI_ERR( + "Error deleting dependency %d->%d, res=%d\n", + IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_MHI_CONS, + res); + goto fail; + } + IPA_MHI_DBG("deleate dependency MHI_PROD->Q6_CONS\n"); + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res) { + IPA_MHI_ERR( + "Error deleting dependency %d->%d, res=%d\n", + IPA_RM_RESOURCE_MHI_PROD, + IPA_RM_RESOURCE_Q6_CONS, + res); + goto fail; + } + } + + res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD); + if (res) { + IPA_MHI_ERR("Error deleting resource %d, res=%d\n", + IPA_RM_RESOURCE_MHI_PROD, res); + goto fail; + } + + res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS); + if (res) { + IPA_MHI_ERR("Error deleting resource %d, res=%d\n", + IPA_RM_RESOURCE_MHI_CONS, res); + goto fail; + } + + return; +fail: + ipa_assert(); +} + +static void ipa_mhi_deregister_pm(void) +{ + ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl); + ipa_pm_deregister(ipa_mhi_client_ctx->pm_hdl); + ipa_mhi_client_ctx->pm_hdl = ~0; + + ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl); + ipa_pm_deregister(ipa_mhi_client_ctx->modem_pm_hdl); + ipa_mhi_client_ctx->modem_pm_hdl = ~0; +} + +/** + * ipa_mhi_destroy() - Destroy MHI IPA + * + * This function is called by MHI client driver on MHI reset to destroy all IPA + * MHI resources. + * When this function returns ipa_mhi can re-initialize. + */ +void ipa_mhi_destroy(void) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + if (!ipa_mhi_client_ctx) { + IPA_MHI_DBG("IPA MHI was not initialized, already destroyed\n"); + return; + } + /* reset all UL and DL acc channels and its accociated event rings */ + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + res = ipa_mhi_destroy_all_channels(); + if (res) { + IPA_MHI_ERR("ipa_mhi_destroy_all_channels failed %d\n", + res); + goto fail; + } + } + IPA_MHI_DBG("All channels are disconnected\n"); + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_SPS) { + IPA_MHI_DBG("cleanup uC MHI\n"); + ipa_uc_mhi_cleanup(); + } + + if (ipa_pm_is_used()) + ipa_mhi_deregister_pm(); + else + ipa_mhi_delete_rm_resources(); + + ipa_dma_destroy(); + ipa_mhi_debugfs_destroy(); + destroy_workqueue(ipa_mhi_client_ctx->wq); + kfree(ipa_mhi_client_ctx); + ipa_mhi_client_ctx = NULL; + IPA_MHI_DBG("IPA MHI was reset, ready for re-init\n"); + + IPA_MHI_FUNC_EXIT(); + return; +fail: + ipa_assert(); +} + +static void ipa_mhi_pm_cb(void *p, enum ipa_pm_cb_event event) +{ + unsigned long flags; + + IPA_MHI_FUNC_ENTRY(); + + if (event != IPA_PM_REQUEST_WAKEUP) { + IPA_MHI_ERR("Unexpected event %d\n", event); + WARN_ON(1); + return; + } + + IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_client_ctx->state)); + spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags); + if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) { + ipa_mhi_notify_wakeup(); + } else if (ipa_mhi_client_ctx->state == + IPA_MHI_STATE_SUSPEND_IN_PROGRESS) { + /* wakeup event will be trigger after suspend finishes */ + ipa_mhi_client_ctx->trigger_wakeup = true; + } + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + IPA_MHI_DBG("EXIT"); +} + +static int ipa_mhi_register_pm(void) +{ + int res; + struct ipa_pm_register_params params; + + memset(¶ms, 0, sizeof(params)); + params.name = "MHI"; + params.callback = ipa_mhi_pm_cb; + params.group = IPA_PM_GROUP_DEFAULT; + res = ipa_pm_register(¶ms, &ipa_mhi_client_ctx->pm_hdl); + if (res) { + IPA_MHI_ERR("fail to register with PM %d\n", res); + return res; + } + + res = ipa_pm_associate_ipa_cons_to_client(ipa_mhi_client_ctx->pm_hdl, + IPA_CLIENT_MHI_CONS); + if (res) { + IPA_MHI_ERR("fail to associate cons with PM %d\n", res); + goto fail_pm_cons; + } + + res = ipa_pm_set_throughput(ipa_mhi_client_ctx->pm_hdl, 1000); + if (res) { + IPA_MHI_ERR("fail to set perf profile to PM %d\n", res); + goto fail_pm_cons; + } + + /* create a modem client for clock scaling */ + memset(¶ms, 0, sizeof(params)); + params.name = "MODEM (MHI)"; + params.group = IPA_PM_GROUP_MODEM; + params.skip_clk_vote = true; + res = ipa_pm_register(¶ms, &ipa_mhi_client_ctx->modem_pm_hdl); + if (res) { + IPA_MHI_ERR("fail to register with PM %d\n", res); + goto fail_pm_cons; + } + + return 0; + +fail_pm_cons: + ipa_pm_deregister(ipa_mhi_client_ctx->pm_hdl); + ipa_mhi_client_ctx->pm_hdl = ~0; + return res; +} + +static int ipa_mhi_create_rm_resources(void) +{ + int res; + struct ipa_rm_create_params mhi_prod_params; + struct ipa_rm_create_params mhi_cons_params; + struct ipa_rm_perf_profile profile; + + /* Create PROD in IPA RM */ + memset(&mhi_prod_params, 0, sizeof(mhi_prod_params)); + mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD; + mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS; + mhi_prod_params.reg_params.notify_cb = ipa_mhi_rm_prod_notify; + res = ipa_rm_create_resource(&mhi_prod_params); + if (res) { + IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n"); + goto fail_create_rm_prod; + } + + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = 1000; + res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_PROD, &profile); + if (res) { + IPA_MHI_ERR("fail to set profile to MHI_PROD\n"); + goto fail_perf_rm_prod; + } + + /* Create CONS in IPA RM */ + memset(&mhi_cons_params, 0, sizeof(mhi_cons_params)); + mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS; + mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS; + mhi_cons_params.request_resource = ipa_mhi_rm_cons_request; + mhi_cons_params.release_resource = ipa_mhi_rm_cons_release; + res = ipa_rm_create_resource(&mhi_cons_params); + if (res) { + IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n"); + goto fail_create_rm_cons; + } + + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = 1000; + res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_CONS, &profile); + if (res) { + IPA_MHI_ERR("fail to set profile to MHI_CONS\n"); + goto fail_perf_rm_cons; + } +fail_perf_rm_cons: + ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS); +fail_create_rm_cons: +fail_perf_rm_prod: + ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD); +fail_create_rm_prod: + return res; +} + +/** + * ipa_mhi_init() - Initialize IPA MHI driver + * @params: initialization params + * + * This function is called by MHI client driver on boot to initialize IPA MHI + * Driver. When this function returns device can move to READY state. + * This function is doing the following: + * - Initialize MHI IPA internal data structures + * - Create IPA RM resources + * - Initialize debugfs + * + * Return codes: 0 : success + * negative : error + */ +int ipa_mhi_init(struct ipa_mhi_init_params *params) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + + if (!params) { + IPA_MHI_ERR("null args\n"); + return -EINVAL; + } + + if (!params->notify) { + IPA_MHI_ERR("null notify function\n"); + return -EINVAL; + } + + if (ipa_mhi_client_ctx) { + IPA_MHI_ERR("already initialized\n"); + return -EPERM; + } + + IPA_MHI_DBG("notify = %pS priv = %pK\n", params->notify, params->priv); + IPA_MHI_DBG("msi: addr_lo = 0x%x addr_hi = 0x%x\n", + params->msi.addr_low, params->msi.addr_hi); + IPA_MHI_DBG("msi: data = 0x%x mask = 0x%x\n", + params->msi.data, params->msi.mask); + IPA_MHI_DBG("mmio_addr = 0x%x\n", params->mmio_addr); + IPA_MHI_DBG("first_ch_idx = 0x%x\n", params->first_ch_idx); + IPA_MHI_DBG("first_er_idx = 0x%x\n", params->first_er_idx); + IPA_MHI_DBG("assert_bit40=%d\n", params->assert_bit40); + IPA_MHI_DBG("test_mode=%d\n", params->test_mode); + + /* Initialize context */ + ipa_mhi_client_ctx = kzalloc(sizeof(*ipa_mhi_client_ctx), GFP_KERNEL); + if (!ipa_mhi_client_ctx) { + res = -EFAULT; + goto fail_alloc_ctx; + } + + ipa_mhi_client_ctx->state = IPA_MHI_STATE_INITIALIZED; + ipa_mhi_client_ctx->cb_notify = params->notify; + ipa_mhi_client_ctx->cb_priv = params->priv; + ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED; + init_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp); + spin_lock_init(&ipa_mhi_client_ctx->state_lock); + init_completion(&ipa_mhi_client_ctx->rm_cons_comp); + ipa_mhi_client_ctx->msi = params->msi; + ipa_mhi_client_ctx->mmio_addr = params->mmio_addr; + ipa_mhi_client_ctx->first_ch_idx = params->first_ch_idx; + ipa_mhi_client_ctx->first_er_idx = params->first_er_idx; + ipa_mhi_client_ctx->qmi_req_id = 0; + ipa_mhi_client_ctx->use_ipadma = true; + ipa_mhi_client_ctx->assert_bit40 = !!params->assert_bit40; + ipa_mhi_client_ctx->test_mode = params->test_mode; + + ipa_mhi_client_ctx->wq = create_singlethread_workqueue("ipa_mhi_wq"); + if (!ipa_mhi_client_ctx->wq) { + IPA_MHI_ERR("failed to create workqueue\n"); + res = -EFAULT; + goto fail_create_wq; + } + + res = ipa_dma_init(); + if (res) { + IPA_MHI_ERR("failed to init ipa dma %d\n", res); + goto fail_dma_init; + } + + if (ipa_pm_is_used()) + res = ipa_mhi_register_pm(); + else + res = ipa_mhi_create_rm_resources(); + if (res) { + IPA_MHI_ERR("failed to create RM resources\n"); + res = -EFAULT; + goto fail_rm; + } + + /* Initialize uC interface */ + ipa_uc_mhi_init(ipa_mhi_uc_ready_cb, + ipa_mhi_uc_wakeup_request_cb); + if (ipa_uc_state_check() == 0) + ipa_mhi_set_state(IPA_MHI_STATE_READY); + + /* Initialize debugfs */ + ipa_mhi_debugfs_init(); + + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_rm: + ipa_dma_destroy(); +fail_dma_init: + destroy_workqueue(ipa_mhi_client_ctx->wq); +fail_create_wq: + kfree(ipa_mhi_client_ctx); + ipa_mhi_client_ctx = NULL; +fail_alloc_ctx: + return res; +} + +static void ipa_mhi_cache_dl_ul_sync_info( + struct ipa_config_req_msg_v01 *config_req) +{ + ipa_cached_dl_ul_sync_info.params.isDlUlSyncEnabled = true; + ipa_cached_dl_ul_sync_info.params.UlAccmVal = + (config_req->ul_accumulation_time_limit_valid) ? + config_req->ul_accumulation_time_limit : 0; + ipa_cached_dl_ul_sync_info.params.ulMsiEventThreshold = + (config_req->ul_msi_event_threshold_valid) ? + config_req->ul_msi_event_threshold : 0; + ipa_cached_dl_ul_sync_info.params.dlMsiEventThreshold = + (config_req->dl_msi_event_threshold_valid) ? + config_req->dl_msi_event_threshold : 0; +} + +/** + * ipa_mhi_handle_ipa_config_req() - hanle IPA CONFIG QMI message + * + * This function is called by by IPA QMI service to indicate that IPA CONFIG + * message was sent from modem. IPA MHI will update this information to IPA uC + * or will cache it until IPA MHI will be initialized. + * + * Return codes: 0 : success + * negative : error + */ +int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req) +{ + IPA_MHI_FUNC_ENTRY(); + + if (ipa_get_transport_type() != IPA_TRANSPORT_TYPE_GSI) { + ipa_mhi_cache_dl_ul_sync_info(config_req); + if (ipa_mhi_client_ctx && + ipa_mhi_client_ctx->state != + IPA_MHI_STATE_INITIALIZED) + ipa_uc_mhi_send_dl_ul_sync_info( + &ipa_cached_dl_ul_sync_info); + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +int ipa_mhi_is_using_dma(bool *flag) +{ + IPA_MHI_FUNC_ENTRY(); + + if (!ipa_mhi_client_ctx) { + IPA_MHI_ERR("not initialized\n"); + return -EPERM; + } + + *flag = ipa_mhi_client_ctx->use_ipadma ? true : false; + + IPA_MHI_FUNC_EXIT(); + return 0; +} +EXPORT_SYMBOL(ipa_mhi_is_using_dma); + +const char *ipa_mhi_get_state_str(int state) +{ + return MHI_STATE_STR(state); +} +EXPORT_SYMBOL(ipa_mhi_get_state_str); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA MHI client driver"); diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c new file mode 100644 index 000000000000..c18f79bd998b --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c @@ -0,0 +1,818 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include "../ipa_common_i.h" +#include "../ipa_v3/ipa_pm.h" + +#define IPA_NTN_DMA_POOL_ALIGNMENT 8 +#define OFFLOAD_DRV_NAME "ipa_uc_offload" +#define IPA_UC_OFFLOAD_DBG(fmt, args...) \ + do { \ + pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_UC_OFFLOAD_LOW(fmt, args...) \ + do { \ + pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_UC_OFFLOAD_ERR(fmt, args...) \ + do { \ + pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_UC_OFFLOAD_INFO(fmt, args...) \ + do { \ + pr_info(OFFLOAD_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +enum ipa_uc_offload_state { + IPA_UC_OFFLOAD_STATE_INVALID, + IPA_UC_OFFLOAD_STATE_INITIALIZED, + IPA_UC_OFFLOAD_STATE_UP, +}; + +struct ipa_uc_offload_ctx { + enum ipa_uc_offload_proto proto; + enum ipa_uc_offload_state state; + void *priv; + u8 hdr_len; + u32 partial_hdr_hdl[IPA_IP_MAX]; + char netdev_name[IPA_RESOURCE_NAME_MAX]; + ipa_notify_cb notify; + struct completion ntn_completion; + u32 pm_hdl; + struct ipa_ntn_conn_in_params conn; +}; + +static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE]; + +static int ipa_uc_ntn_cons_release(void); +static int ipa_uc_ntn_cons_request(void); +static void ipa_uc_offload_rm_notify(void *, enum ipa_rm_event, unsigned long); + +static int ipa_commit_partial_hdr( + struct ipa_ioc_add_hdr *hdr, + const char *netdev_name, + struct ipa_hdr_info *hdr_info) +{ + int i; + + if (hdr == NULL || hdr_info == NULL) { + IPA_UC_OFFLOAD_ERR("Invalid input\n"); + return -EINVAL; + } + + hdr->commit = 1; + hdr->num_hdrs = 2; + + snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name), + "%s_ipv4", netdev_name); + snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name), + "%s_ipv6", netdev_name); + for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) { + hdr->hdr[i].hdr_len = hdr_info[i].hdr_len; + memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len); + hdr->hdr[i].type = hdr_info[i].hdr_type; + hdr->hdr[i].is_partial = 1; + hdr->hdr[i].is_eth2_ofst_valid = 1; + hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset; + } + + if (ipa_add_hdr(hdr)) { + IPA_UC_OFFLOAD_ERR("fail to add partial headers\n"); + return -EFAULT; + } + + return 0; +} + +static void ipa_uc_offload_ntn_pm_cb(void *p, enum ipa_pm_cb_event event) +{ + /* suspend/resume is not supported */ + IPA_UC_OFFLOAD_DBG("event = %d\n", event); +} + +static int ipa_uc_offload_ntn_register_pm_client( + struct ipa_uc_offload_ctx *ntn_ctx) +{ + int res; + struct ipa_pm_register_params params; + + memset(¶ms, 0, sizeof(params)); + params.name = "ETH"; + params.callback = ipa_uc_offload_ntn_pm_cb; + params.user_data = ntn_ctx; + params.group = IPA_PM_GROUP_DEFAULT; + res = ipa_pm_register(¶ms, &ntn_ctx->pm_hdl); + if (res) { + IPA_UC_OFFLOAD_ERR("fail to register with PM %d\n", res); + return res; + } + + res = ipa_pm_associate_ipa_cons_to_client(ntn_ctx->pm_hdl, + IPA_CLIENT_ETHERNET_CONS); + if (res) { + IPA_UC_OFFLOAD_ERR("fail to associate cons with PM %d\n", res); + ipa_pm_deregister(ntn_ctx->pm_hdl); + ntn_ctx->pm_hdl = ~0; + return res; + } + + return 0; +} + +static void ipa_uc_offload_ntn_deregister_pm_client( + struct ipa_uc_offload_ctx *ntn_ctx) +{ + ipa_pm_deactivate_sync(ntn_ctx->pm_hdl); + ipa_pm_deregister(ntn_ctx->pm_hdl); +} +static int ipa_uc_offload_ntn_create_rm_resources( + struct ipa_uc_offload_ctx *ntn_ctx) +{ + int ret; + struct ipa_rm_create_params param; + + memset(¶m, 0, sizeof(param)); + param.name = IPA_RM_RESOURCE_ETHERNET_PROD; + param.reg_params.user_data = ntn_ctx; + param.reg_params.notify_cb = ipa_uc_offload_rm_notify; + param.floor_voltage = IPA_VOLTAGE_SVS; + ret = ipa_rm_create_resource(¶m); + if (ret) { + IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_PROD resource\n"); + return -EFAULT; + } + + memset(¶m, 0, sizeof(param)); + param.name = IPA_RM_RESOURCE_ETHERNET_CONS; + param.request_resource = ipa_uc_ntn_cons_request; + param.release_resource = ipa_uc_ntn_cons_release; + ret = ipa_rm_create_resource(¶m); + if (ret) { + IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_CONS resource\n"); + ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD); + return -EFAULT; + } + + return 0; +} + +static int ipa_uc_offload_ntn_reg_intf( + struct ipa_uc_offload_intf_params *inp, + struct ipa_uc_offload_out_params *outp, + struct ipa_uc_offload_ctx *ntn_ctx) +{ + struct ipa_ioc_add_hdr *hdr = NULL; + struct ipa_tx_intf tx; + struct ipa_rx_intf rx; + struct ipa_ioc_tx_intf_prop tx_prop[2]; + struct ipa_ioc_rx_intf_prop rx_prop[2]; + int ret = 0; + u32 len; + + + IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n", + inp->netdev_name); + if (ipa_pm_is_used()) + ret = ipa_uc_offload_ntn_register_pm_client(ntn_ctx); + else + ret = ipa_uc_offload_ntn_create_rm_resources(ntn_ctx); + if (ret) { + IPA_UC_OFFLOAD_ERR("fail to create rm resource\n"); + return -EFAULT; + } + memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX); + ntn_ctx->hdr_len = inp->hdr_info[0].hdr_len; + ntn_ctx->notify = inp->notify; + ntn_ctx->priv = inp->priv; + + /* add partial header */ + len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add); + hdr = kzalloc(len, GFP_KERNEL); + if (hdr == NULL) { + ret = -ENOMEM; + goto fail_alloc; + } + + if (ipa_commit_partial_hdr(hdr, ntn_ctx->netdev_name, inp->hdr_info)) { + IPA_UC_OFFLOAD_ERR("fail to commit partial headers\n"); + ret = -EFAULT; + goto fail; + } + + /* populate tx prop */ + tx.num_props = 2; + tx.prop = tx_prop; + + memset(tx_prop, 0, sizeof(tx_prop)); + tx_prop[0].ip = IPA_IP_v4; + tx_prop[0].dst_pipe = IPA_CLIENT_ETHERNET_CONS; + tx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type; + memcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name, + sizeof(tx_prop[0].hdr_name)); + + tx_prop[1].ip = IPA_IP_v6; + tx_prop[1].dst_pipe = IPA_CLIENT_ETHERNET_CONS; + tx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type; + memcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name, + sizeof(tx_prop[1].hdr_name)); + + /* populate rx prop */ + rx.num_props = 2; + rx.prop = rx_prop; + + memset(rx_prop, 0, sizeof(rx_prop)); + rx_prop[0].ip = IPA_IP_v4; + rx_prop[0].src_pipe = IPA_CLIENT_ETHERNET_PROD; + rx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type; + if (inp->is_meta_data_valid) { + rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_prop[0].attrib.meta_data = inp->meta_data; + rx_prop[0].attrib.meta_data_mask = inp->meta_data_mask; + } + + rx_prop[1].ip = IPA_IP_v6; + rx_prop[1].src_pipe = IPA_CLIENT_ETHERNET_PROD; + rx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type; + if (inp->is_meta_data_valid) { + rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_prop[1].attrib.meta_data = inp->meta_data; + rx_prop[1].attrib.meta_data_mask = inp->meta_data_mask; + } + + if (ipa_register_intf(inp->netdev_name, &tx, &rx)) { + IPA_UC_OFFLOAD_ERR("fail to add interface prop\n"); + memset(ntn_ctx, 0, sizeof(*ntn_ctx)); + ret = -EFAULT; + goto fail; + } + + ntn_ctx->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl; + ntn_ctx->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl; + init_completion(&ntn_ctx->ntn_completion); + ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED; + + kfree(hdr); + return ret; + +fail: + kfree(hdr); +fail_alloc: + if (ipa_pm_is_used()) { + ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx); + } else { + ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS); + ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD); + } + return ret; +} + +int ipa_uc_offload_reg_intf( + struct ipa_uc_offload_intf_params *inp, + struct ipa_uc_offload_out_params *outp) +{ + struct ipa_uc_offload_ctx *ctx; + int ret = 0; + + if (inp == NULL || outp == NULL) { + IPA_UC_OFFLOAD_ERR("invalid params in=%pK out=%pK\n", + inp, outp); + return -EINVAL; + } + + if (inp->proto <= IPA_UC_INVALID || + inp->proto >= IPA_UC_MAX_PROT_SIZE) { + IPA_UC_OFFLOAD_ERR("invalid proto %d\n", inp->proto); + return -EINVAL; + } + + if (!ipa_uc_offload_ctx[inp->proto]) { + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (ctx == NULL) { + IPA_UC_OFFLOAD_ERR("fail to alloc uc offload ctx\n"); + return -EFAULT; + } + ipa_uc_offload_ctx[inp->proto] = ctx; + ctx->proto = inp->proto; + } else + ctx = ipa_uc_offload_ctx[inp->proto]; + + if (ctx->state != IPA_UC_OFFLOAD_STATE_INVALID) { + IPA_UC_OFFLOAD_ERR("Already Initialized\n"); + return -EINVAL; + } + + if (ctx->proto == IPA_UC_NTN) { + ret = ipa_uc_offload_ntn_reg_intf(inp, outp, ctx); + if (!ret) + outp->clnt_hndl = IPA_UC_NTN; + } + + return ret; +} +EXPORT_SYMBOL(ipa_uc_offload_reg_intf); + +static int ipa_uc_ntn_cons_release(void) +{ + return 0; +} + +static int ipa_uc_ntn_cons_request(void) +{ + int ret = 0; + struct ipa_uc_offload_ctx *ntn_ctx; + + ntn_ctx = ipa_uc_offload_ctx[IPA_UC_NTN]; + if (!ntn_ctx) { + IPA_UC_OFFLOAD_ERR("NTN is not initialized\n"); + ret = -EFAULT; + } else if (ntn_ctx->state != IPA_UC_OFFLOAD_STATE_UP) { + IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", ntn_ctx->state); + ret = -EFAULT; + } + + return ret; +} + +static void ipa_uc_offload_rm_notify(void *user_data, enum ipa_rm_event event, + unsigned long data) +{ + struct ipa_uc_offload_ctx *offload_ctx; + + offload_ctx = (struct ipa_uc_offload_ctx *)user_data; + if (!(offload_ctx && offload_ctx->proto > IPA_UC_INVALID && + offload_ctx->proto < IPA_UC_MAX_PROT_SIZE)) { + IPA_UC_OFFLOAD_ERR("Invalid user data\n"); + return; + } + + if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) + IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", offload_ctx->state); + + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + complete_all(&offload_ctx->ntn_completion); + break; + + case IPA_RM_RESOURCE_RELEASED: + break; + + default: + IPA_UC_OFFLOAD_ERR("Invalid RM Evt: %d", event); + break; + } +} + +static int ipa_uc_ntn_alloc_conn_smmu_info(struct ipa_ntn_setup_info *dest, + struct ipa_ntn_setup_info *source) +{ + int result; + + IPA_UC_OFFLOAD_DBG("Allocating smmu info\n"); + + memcpy(dest, source, sizeof(struct ipa_ntn_setup_info)); + + dest->data_buff_list = + kcalloc(dest->num_buffers, sizeof(struct ntn_buff_smmu_map), + GFP_KERNEL); + if (dest->data_buff_list == NULL) { + IPA_UC_OFFLOAD_ERR("failed to alloc smmu info\n"); + return -ENOMEM; + } + + memcpy(dest->data_buff_list, source->data_buff_list, + sizeof(struct ntn_buff_smmu_map) * dest->num_buffers); + + result = ipa_smmu_store_sgt(&dest->buff_pool_base_sgt, + source->buff_pool_base_sgt); + if (result) { + kfree(dest->data_buff_list); + return result; + } + + result = ipa_smmu_store_sgt(&dest->ring_base_sgt, + source->ring_base_sgt); + if (result) { + kfree(dest->data_buff_list); + ipa_smmu_free_sgt(&dest->buff_pool_base_sgt); + return result; + } + + return 0; +} + +static void ipa_uc_ntn_free_conn_smmu_info(struct ipa_ntn_setup_info *params) +{ + kfree(params->data_buff_list); + ipa_smmu_free_sgt(¶ms->buff_pool_base_sgt); + ipa_smmu_free_sgt(¶ms->ring_base_sgt); +} + +int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp, + struct ipa_ntn_conn_out_params *outp, + struct ipa_uc_offload_ctx *ntn_ctx) +{ + int result = 0; + enum ipa_uc_offload_state prev_state; + + if (ntn_ctx->conn.dl.smmu_enabled != ntn_ctx->conn.ul.smmu_enabled) { + IPA_UC_OFFLOAD_ERR("ul and dl smmu enablement do not match\n"); + return -EINVAL; + } + + prev_state = ntn_ctx->state; + if (inp->dl.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT || + inp->dl.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) { + IPA_UC_OFFLOAD_ERR("alignment failure on TX\n"); + return -EINVAL; + } + if (inp->ul.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT || + inp->ul.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) { + IPA_UC_OFFLOAD_ERR("alignment failure on RX\n"); + return -EINVAL; + } + + if (ipa_pm_is_used()) { + result = ipa_pm_activate_sync(ntn_ctx->pm_hdl); + if (result) { + IPA_UC_OFFLOAD_ERR("fail to activate: %d\n", result); + return result; + } + } else { + result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ETHERNET_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (result) { + IPA_UC_OFFLOAD_ERR("fail to add rm dependency: %d\n", + result); + return result; + } + + result = ipa_rm_request_resource(IPA_RM_RESOURCE_ETHERNET_PROD); + if (result == -EINPROGRESS) { + if (wait_for_completion_timeout(&ntn_ctx->ntn_completion + , 10*HZ) == 0) { + IPA_UC_OFFLOAD_ERR("ETH_PROD req timeout\n"); + result = -EFAULT; + goto fail; + } + } else if (result != 0) { + IPA_UC_OFFLOAD_ERR("fail to request resource\n"); + result = -EFAULT; + goto fail; + } + } + + ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP; + result = ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify, + ntn_ctx->priv, ntn_ctx->hdr_len, outp); + if (result) { + IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes: %d\n", + result); + ntn_ctx->state = prev_state; + result = -EFAULT; + goto fail; + } + + if (ntn_ctx->conn.dl.smmu_enabled) { + result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.dl, + &inp->dl); + if (result) { + IPA_UC_OFFLOAD_ERR("alloc failure on TX\n"); + goto fail; + } + result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.ul, + &inp->ul); + if (result) { + ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.dl); + IPA_UC_OFFLOAD_ERR("alloc failure on RX\n"); + goto fail; + } + } + +fail: + if (!ipa_pm_is_used()) + ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD, + IPA_RM_RESOURCE_APPS_CONS); + return result; +} + +int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp, + struct ipa_uc_offload_conn_out_params *outp) +{ + int ret = 0; + struct ipa_uc_offload_ctx *offload_ctx; + + if (!(inp && outp)) { + IPA_UC_OFFLOAD_ERR("bad parm. in=%pK out=%pK\n", inp, outp); + return -EINVAL; + } + + if (inp->clnt_hndl <= IPA_UC_INVALID || + inp->clnt_hndl >= IPA_UC_MAX_PROT_SIZE) { + IPA_UC_OFFLOAD_ERR("invalid client handle %d\n", + inp->clnt_hndl); + return -EINVAL; + } + + offload_ctx = ipa_uc_offload_ctx[inp->clnt_hndl]; + if (!offload_ctx) { + IPA_UC_OFFLOAD_ERR("Invalid Handle\n"); + return -EINVAL; + } + + if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) { + IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state); + return -EPERM; + } + + switch (offload_ctx->proto) { + case IPA_UC_NTN: + ret = ipa_uc_ntn_conn_pipes(&inp->u.ntn, &outp->u.ntn, + offload_ctx); + break; + + default: + IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", offload_ctx->proto); + ret = -EINVAL; + break; + } + + return ret; +} +EXPORT_SYMBOL(ipa_uc_offload_conn_pipes); + +int ipa_set_perf_profile(struct ipa_perf_profile *profile) +{ + struct ipa_rm_perf_profile rm_profile; + enum ipa_rm_resource_name resource_name; + + if (profile == NULL) { + IPA_UC_OFFLOAD_ERR("Invalid input\n"); + return -EINVAL; + } + + rm_profile.max_supported_bandwidth_mbps = + profile->max_supported_bw_mbps; + + if (profile->client == IPA_CLIENT_ETHERNET_PROD) { + resource_name = IPA_RM_RESOURCE_ETHERNET_PROD; + } else if (profile->client == IPA_CLIENT_ETHERNET_CONS) { + resource_name = IPA_RM_RESOURCE_ETHERNET_CONS; + } else { + IPA_UC_OFFLOAD_ERR("not supported\n"); + return -EINVAL; + } + + if (ipa_pm_is_used()) + return ipa_pm_set_throughput( + ipa_uc_offload_ctx[IPA_UC_NTN]->pm_hdl, + profile->max_supported_bw_mbps); + + if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) { + IPA_UC_OFFLOAD_ERR("fail to setup rm perf profile\n"); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(ipa_set_perf_profile); + +static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx) +{ + int ipa_ep_idx_ul, ipa_ep_idx_dl; + int ret = 0; + + if (ntn_ctx->conn.dl.smmu_enabled != ntn_ctx->conn.ul.smmu_enabled) { + IPA_UC_OFFLOAD_ERR("ul and dl smmu enablement do not match\n"); + return -EINVAL; + } + + ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED; + + if (ipa_pm_is_used()) { + ret = ipa_pm_deactivate_sync(ntn_ctx->pm_hdl); + if (ret) { + IPA_UC_OFFLOAD_ERR("fail to deactivate res: %d\n", + ret); + return -EFAULT; + } + } else { + ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD); + if (ret) { + IPA_UC_OFFLOAD_ERR("fail release ETHERNET_PROD: %d\n", + ret); + return -EFAULT; + } + + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (ret) { + IPA_UC_OFFLOAD_ERR("fail del dep ETH->APPS, %d\n", ret); + return -EFAULT; + } + } + + ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD); + ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS); + ret = ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl, + &ntn_ctx->conn); + if (ret) { + IPA_UC_OFFLOAD_ERR("fail to tear down ntn offload pipes, %d\n", + ret); + return -EFAULT; + } + if (ntn_ctx->conn.dl.smmu_enabled) { + ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.dl); + ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.ul); + } + + return ret; +} + +int ipa_uc_offload_disconn_pipes(u32 clnt_hdl) +{ + struct ipa_uc_offload_ctx *offload_ctx; + int ret = 0; + + if (clnt_hdl <= IPA_UC_INVALID || + clnt_hdl >= IPA_UC_MAX_PROT_SIZE) { + IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl); + return -EINVAL; + } + + offload_ctx = ipa_uc_offload_ctx[clnt_hdl]; + if (!offload_ctx) { + IPA_UC_OFFLOAD_ERR("Invalid client Handle\n"); + return -EINVAL; + } + + if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_UP) { + IPA_UC_OFFLOAD_ERR("Invalid state\n"); + return -EINVAL; + } + + switch (offload_ctx->proto) { + case IPA_UC_NTN: + ret = ipa_uc_ntn_disconn_pipes(offload_ctx); + break; + + default: + IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl); + ret = -EINVAL; + break; + } + + return ret; +} +EXPORT_SYMBOL(ipa_uc_offload_disconn_pipes); + +static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx) +{ + int len, result = 0; + struct ipa_ioc_del_hdr *hdr; + + if (ipa_pm_is_used()) { + ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx); + } else { + if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD)) { + IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_PROD\n"); + return -EFAULT; + } + + if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS)) { + IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_CONS\n"); + return -EFAULT; + } + } + + len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del); + hdr = kzalloc(len, GFP_KERNEL); + if (hdr == NULL) + return -ENOMEM; + + hdr->commit = 1; + hdr->num_hdls = 2; + hdr->hdl[0].hdl = ntn_ctx->partial_hdr_hdl[0]; + hdr->hdl[1].hdl = ntn_ctx->partial_hdr_hdl[1]; + + if (ipa_del_hdr(hdr)) { + IPA_UC_OFFLOAD_ERR("fail to delete partial header\n"); + result = -EFAULT; + goto fail; + } + + if (ipa_deregister_intf(ntn_ctx->netdev_name)) { + IPA_UC_OFFLOAD_ERR("fail to delete interface prop\n"); + result = -EFAULT; + goto fail; + } + +fail: + kfree(hdr); + return result; +} + +int ipa_uc_offload_cleanup(u32 clnt_hdl) +{ + struct ipa_uc_offload_ctx *offload_ctx; + int ret = 0; + + if (clnt_hdl <= IPA_UC_INVALID || + clnt_hdl >= IPA_UC_MAX_PROT_SIZE) { + IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl); + return -EINVAL; + } + + offload_ctx = ipa_uc_offload_ctx[clnt_hdl]; + if (!offload_ctx) { + IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl); + return -EINVAL; + } + + if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) { + IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state); + return -EINVAL; + } + + switch (offload_ctx->proto) { + case IPA_UC_NTN: + ret = ipa_uc_ntn_cleanup(offload_ctx); + break; + + default: + IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl); + ret = -EINVAL; + break; + } + + if (!ret) { + kfree(offload_ctx); + offload_ctx = NULL; + ipa_uc_offload_ctx[clnt_hdl] = NULL; + } + + return ret; +} +EXPORT_SYMBOL(ipa_uc_offload_cleanup); + +/** + * ipa_uc_offload_uc_rdyCB() - To register uC ready CB if uC not + * ready + * @inout: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *inp) +{ + int ret = 0; + + if (!inp) { + IPA_UC_OFFLOAD_ERR("Invalid input\n"); + return -EINVAL; + } + + if (inp->proto == IPA_UC_NTN) + ret = ipa_ntn_uc_reg_rdyCB(inp->notify, inp->priv); + + if (ret == -EEXIST) { + inp->is_uC_ready = true; + ret = 0; + } else + inp->is_uC_ready = false; + + return ret; +} +EXPORT_SYMBOL(ipa_uc_offload_reg_rdyCB); + +void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto) +{ + if (proto == IPA_UC_NTN) + ipa_ntn_uc_dereg_rdyCB(); +} +EXPORT_SYMBOL(ipa_uc_offload_dereg_rdyCB); diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c new file mode 100644 index 000000000000..6269b763e8d5 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c @@ -0,0 +1,3015 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ipa_v3/ipa_i.h" +#include "../ipa_rm_i.h" + +#define IPA_USB_RM_TIMEOUT_MSEC 10000 +#define IPA_USB_DEV_READY_TIMEOUT_MSEC 10000 + +#define IPA_HOLB_TMR_EN 0x1 + +/* GSI channels weights */ +#define IPA_USB_DL_CHAN_LOW_WEIGHT 0x5 +#define IPA_USB_UL_CHAN_LOW_WEIGHT 0x4 + +#define IPA_USB_MAX_MSG_LEN 4096 + +#define IPA_USB_DRV_NAME "ipa_usb" + +#define IPA_USB_DBG(fmt, args...) \ + do { \ + pr_debug(IPA_USB_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_USB_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_USB_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_USB_ERR(fmt, args...) \ + do { \ + pr_err(IPA_USB_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_USB_INFO(fmt, args...) \ + do { \ + pr_info(IPA_USB_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +enum ipa_usb_direction { + IPA_USB_DIR_UL, + IPA_USB_DIR_DL, +}; + +struct ipa_usb_xdci_connect_params_internal { + enum ipa_usb_max_usb_packet_size max_pkt_size; + u32 ipa_to_usb_clnt_hdl; + u8 ipa_to_usb_xferrscidx; + bool ipa_to_usb_xferrscidx_valid; + u32 usb_to_ipa_clnt_hdl; + u8 usb_to_ipa_xferrscidx; + bool usb_to_ipa_xferrscidx_valid; + enum ipa_usb_teth_prot teth_prot; + struct ipa_usb_teth_prot_params teth_prot_params; + u32 max_supported_bandwidth_mbps; +}; + +enum ipa3_usb_teth_prot_state { + IPA_USB_TETH_PROT_INITIALIZED, + IPA_USB_TETH_PROT_CONNECTED, + IPA_USB_TETH_PROT_INVALID +}; + +struct ipa3_usb_teth_prot_context { + union { + struct ipa_usb_init_params rndis; + struct ecm_ipa_params ecm; + struct teth_bridge_init_params teth_bridge; + } teth_prot_params; + enum ipa3_usb_teth_prot_state state; + void *user_data; +}; + +enum ipa3_usb_cons_state { + IPA_USB_CONS_GRANTED, + IPA_USB_CONS_RELEASED +}; + +struct ipa3_usb_rm_context { + struct ipa_rm_create_params prod_params; + struct ipa_rm_create_params cons_params; + bool prod_valid; + bool cons_valid; + struct completion prod_comp; + enum ipa3_usb_cons_state cons_state; + /* consumer was requested*/ + bool cons_requested; + /* consumer was requested and released before it was granted*/ + bool cons_requested_released; +}; + +struct ipa3_usb_pm_context { + struct ipa_pm_register_params reg_params; + struct work_struct *remote_wakeup_work; + u32 hdl; +}; + +enum ipa3_usb_state { + IPA_USB_INVALID, + IPA_USB_INITIALIZED, + IPA_USB_CONNECTED, + IPA_USB_STOPPED, + IPA_USB_SUSPEND_REQUESTED, + IPA_USB_SUSPENDED, + IPA_USB_SUSPENDED_NO_RWAKEUP, + IPA_USB_RESUME_IN_PROGRESS +}; + +enum ipa3_usb_transport_type { + IPA_USB_TRANSPORT_TETH, + IPA_USB_TRANSPORT_DPL, + IPA_USB_TRANSPORT_MAX +}; + +/* Get transport type from tethering protocol */ +#define IPA3_USB_GET_TTYPE(__teth_prot) \ + (((__teth_prot) == IPA_USB_DIAG) ? \ + IPA_USB_TRANSPORT_DPL : IPA_USB_TRANSPORT_TETH) + +/* Does the given transport type is DPL? */ +#define IPA3_USB_IS_TTYPE_DPL(__ttype) \ + ((__ttype) == IPA_USB_TRANSPORT_DPL) + +struct ipa3_usb_teth_prot_conn_params { + u32 usb_to_ipa_clnt_hdl; + u32 ipa_to_usb_clnt_hdl; + struct ipa_usb_teth_prot_params params; +}; + +/** + * Transport type - could be either data tethering or DPL + * Each transport has it's own RM resources and statuses + */ +struct ipa3_usb_transport_type_ctx { + struct ipa3_usb_rm_context rm_ctx; + struct ipa3_usb_pm_context pm_ctx; + int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data); + void *user_data; + enum ipa3_usb_state state; + struct ipa_usb_xdci_chan_params ul_ch_params; + struct ipa_usb_xdci_chan_params dl_ch_params; + struct ipa3_usb_teth_prot_conn_params teth_conn_params; +}; + +struct ipa3_usb_smmu_reg_map { + int cnt; + phys_addr_t addr; +}; + +struct ipa3_usb_context { + struct ipa3_usb_teth_prot_context + teth_prot_ctx[IPA_USB_MAX_TETH_PROT_SIZE]; + int num_init_prot; /* without dpl */ + struct teth_bridge_init_params teth_bridge_params; + struct completion dev_ready_comp; + u32 qmi_req_id; + spinlock_t state_lock; + bool dl_data_pending; + struct workqueue_struct *wq; + struct mutex general_mutex; + struct ipa3_usb_transport_type_ctx + ttype_ctx[IPA_USB_TRANSPORT_MAX]; + struct dentry *dfile_state_info; + struct dentry *dent; + struct ipa3_usb_smmu_reg_map smmu_reg_map; +}; + +enum ipa3_usb_op { + IPA_USB_OP_INIT_TETH_PROT, + IPA_USB_OP_REQUEST_CHANNEL, + IPA_USB_OP_CONNECT, + IPA_USB_OP_DISCONNECT, + IPA_USB_OP_RELEASE_CHANNEL, + IPA_USB_OP_DEINIT_TETH_PROT, + IPA_USB_OP_SUSPEND, + IPA_USB_OP_SUSPEND_NO_RWAKEUP, + IPA_USB_OP_RESUME +}; + +struct ipa3_usb_status_dbg_info { + const char *teth_state; + const char *dpl_state; + int num_init_prot; + const char *inited_prots[IPA_USB_MAX_TETH_PROT_SIZE]; + const char *teth_connected_prot; + const char *dpl_connected_prot; + const char *teth_cons_state; + const char *dpl_cons_state; +}; + +static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work); +static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work); +static DECLARE_WORK(ipa3_usb_notify_remote_wakeup_work, + ipa3_usb_wq_notify_remote_wakeup); +static DECLARE_WORK(ipa3_usb_dpl_notify_remote_wakeup_work, + ipa3_usb_wq_dpl_notify_remote_wakeup); + +struct ipa3_usb_context *ipa3_usb_ctx; + +static char *ipa3_usb_op_to_string(enum ipa3_usb_op op) +{ + switch (op) { + case IPA_USB_OP_INIT_TETH_PROT: + return "IPA_USB_OP_INIT_TETH_PROT"; + case IPA_USB_OP_REQUEST_CHANNEL: + return "IPA_USB_OP_REQUEST_CHANNEL"; + case IPA_USB_OP_CONNECT: + return "IPA_USB_OP_CONNECT"; + case IPA_USB_OP_DISCONNECT: + return "IPA_USB_OP_DISCONNECT"; + case IPA_USB_OP_RELEASE_CHANNEL: + return "IPA_USB_OP_RELEASE_CHANNEL"; + case IPA_USB_OP_DEINIT_TETH_PROT: + return "IPA_USB_OP_DEINIT_TETH_PROT"; + case IPA_USB_OP_SUSPEND: + return "IPA_USB_OP_SUSPEND"; + case IPA_USB_OP_SUSPEND_NO_RWAKEUP: + return "IPA_USB_OP_SUSPEND_NO_RWAKEUP"; + case IPA_USB_OP_RESUME: + return "IPA_USB_OP_RESUME"; + } + + return "UNSUPPORTED"; +} + +static char *ipa3_usb_state_to_string(enum ipa3_usb_state state) +{ + switch (state) { + case IPA_USB_INVALID: + return "IPA_USB_INVALID"; + case IPA_USB_INITIALIZED: + return "IPA_USB_INITIALIZED"; + case IPA_USB_CONNECTED: + return "IPA_USB_CONNECTED"; + case IPA_USB_STOPPED: + return "IPA_USB_STOPPED"; + case IPA_USB_SUSPEND_REQUESTED: + return "IPA_USB_SUSPEND_REQUESTED"; + case IPA_USB_SUSPENDED: + return "IPA_USB_SUSPENDED"; + case IPA_USB_SUSPENDED_NO_RWAKEUP: + return "IPA_USB_SUSPENDED_NO_RWAKEUP"; + case IPA_USB_RESUME_IN_PROGRESS: + return "IPA_USB_RESUME_IN_PROGRESS"; + } + + return "UNSUPPORTED"; +} + +static char *ipa3_usb_notify_event_to_string(enum ipa_usb_notify_event event) +{ + switch (event) { + case IPA_USB_DEVICE_READY: + return "IPA_USB_DEVICE_READY"; + case IPA_USB_REMOTE_WAKEUP: + return "IPA_USB_REMOTE_WAKEUP"; + case IPA_USB_SUSPEND_COMPLETED: + return "IPA_USB_SUSPEND_COMPLETED"; + } + + return "UNSUPPORTED"; +} + +static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit, + enum ipa3_usb_transport_type ttype) +{ + unsigned long flags; + int state_legal = false; + enum ipa3_usb_state state; + struct ipa3_usb_rm_context *rm_ctx; + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + state = ipa3_usb_ctx->ttype_ctx[ttype].state; + switch (new_state) { + case IPA_USB_INVALID: + if (state == IPA_USB_INITIALIZED) + state_legal = true; + break; + case IPA_USB_INITIALIZED: + if (state == IPA_USB_STOPPED || state == IPA_USB_INVALID || + ((!IPA3_USB_IS_TTYPE_DPL(ttype)) && + (state == IPA_USB_INITIALIZED))) + state_legal = true; + break; + case IPA_USB_CONNECTED: + if (state == IPA_USB_INITIALIZED || + state == IPA_USB_STOPPED || + state == IPA_USB_RESUME_IN_PROGRESS || + state == IPA_USB_SUSPENDED_NO_RWAKEUP || + /* + * In case of failure during suspend request + * handling, state is reverted to connected. + */ + (err_permit && state == IPA_USB_SUSPEND_REQUESTED)) + state_legal = true; + break; + case IPA_USB_STOPPED: + if (state == IPA_USB_CONNECTED || + state == IPA_USB_SUSPENDED || + state == IPA_USB_SUSPENDED_NO_RWAKEUP) + state_legal = true; + break; + case IPA_USB_SUSPEND_REQUESTED: + if (state == IPA_USB_CONNECTED) + state_legal = true; + break; + case IPA_USB_SUSPENDED: + if (state == IPA_USB_SUSPEND_REQUESTED || + /* + * In case of failure during resume, state is reverted + * to original, which could be suspended. Allow it + */ + (err_permit && state == IPA_USB_RESUME_IN_PROGRESS)) + state_legal = true; + break; + case IPA_USB_SUSPENDED_NO_RWAKEUP: + if (state == IPA_USB_CONNECTED) + state_legal = true; + break; + case IPA_USB_RESUME_IN_PROGRESS: + if (state == IPA_USB_SUSPENDED) + state_legal = true; + break; + default: + state_legal = false; + break; + + } + if (state_legal) { + if (state != new_state) { + IPA_USB_DBG("ipa_usb %s state changed %s -> %s\n", + IPA3_USB_IS_TTYPE_DPL(ttype) ? "DPL" : "", + ipa3_usb_state_to_string(state), + ipa3_usb_state_to_string(new_state)); + ipa3_usb_ctx->ttype_ctx[ttype].state = new_state; + } + } else { + IPA_USB_ERR("invalid state change %s -> %s\n", + ipa3_usb_state_to_string(state), + ipa3_usb_state_to_string(new_state)); + } + + if (!ipa_pm_is_used() && + state_legal && (new_state == IPA_USB_CONNECTED)) { + rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + if ((rm_ctx->cons_state == IPA_USB_CONS_GRANTED) || + rm_ctx->cons_requested_released) { + rm_ctx->cons_requested = false; + rm_ctx->cons_requested_released = + false; + } + /* Notify RM that consumer is granted */ + if (rm_ctx->cons_requested) { + ipa_rm_notify_completion( + IPA_RM_RESOURCE_GRANTED, + rm_ctx->cons_params.name); + rm_ctx->cons_state = IPA_USB_CONS_GRANTED; + rm_ctx->cons_requested = false; + } + } + + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + return state_legal; +} + +static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op, + enum ipa3_usb_transport_type ttype) +{ + unsigned long flags; + bool is_legal = false; + enum ipa3_usb_state state; + bool is_dpl; + + if (ipa3_usb_ctx == NULL) { + IPA_USB_ERR("ipa_usb_ctx is not initialized!\n"); + return false; + } + + is_dpl = IPA3_USB_IS_TTYPE_DPL(ttype); + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + state = ipa3_usb_ctx->ttype_ctx[ttype].state; + switch (op) { + case IPA_USB_OP_INIT_TETH_PROT: + if (state == IPA_USB_INVALID || + (!is_dpl && state == IPA_USB_INITIALIZED)) + is_legal = true; + break; + case IPA_USB_OP_REQUEST_CHANNEL: + if (state == IPA_USB_INITIALIZED) + is_legal = true; + break; + case IPA_USB_OP_CONNECT: + if (state == IPA_USB_INITIALIZED || state == IPA_USB_STOPPED) + is_legal = true; + break; + case IPA_USB_OP_DISCONNECT: + if (state == IPA_USB_CONNECTED || + state == IPA_USB_SUSPENDED || + state == IPA_USB_SUSPENDED_NO_RWAKEUP) + is_legal = true; + break; + case IPA_USB_OP_RELEASE_CHANNEL: + /* when releasing 1st channel state will be changed already */ + if (state == IPA_USB_STOPPED || + (!is_dpl && state == IPA_USB_INITIALIZED)) + is_legal = true; + break; + case IPA_USB_OP_DEINIT_TETH_PROT: + /* + * For data tethering we should allow deinit an inited protocol + * always. E.g. rmnet is inited and rndis is connected. + * USB can deinit rmnet first and then disconnect rndis + * on cable disconnect. + */ + if (!is_dpl || state == IPA_USB_INITIALIZED) + is_legal = true; + break; + case IPA_USB_OP_SUSPEND: + if (state == IPA_USB_CONNECTED) + is_legal = true; + break; + case IPA_USB_OP_SUSPEND_NO_RWAKEUP: + if (state == IPA_USB_CONNECTED) + is_legal = true; + break; + case IPA_USB_OP_RESUME: + if (state == IPA_USB_SUSPENDED || + state == IPA_USB_SUSPENDED_NO_RWAKEUP) + is_legal = true; + break; + default: + is_legal = false; + break; + } + + if (!is_legal) { + IPA_USB_ERR("Illegal %s operation: state=%s operation=%s\n", + is_dpl ? "DPL" : "", + ipa3_usb_state_to_string(state), + ipa3_usb_op_to_string(op)); + } + + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + return is_legal; +} + +static void ipa3_usb_notify_do(enum ipa3_usb_transport_type ttype, + enum ipa_usb_notify_event event) +{ + int (*cb)(enum ipa_usb_notify_event, void *user_data); + void *user_data; + int res; + + IPA_USB_DBG("Trying to notify USB with %s\n", + ipa3_usb_notify_event_to_string(event)); + + cb = ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb; + user_data = ipa3_usb_ctx->ttype_ctx[ttype].user_data; + + if (cb) { + res = cb(event, user_data); + IPA_USB_DBG("Notified USB with %s. is_dpl=%d result=%d\n", + ipa3_usb_notify_event_to_string(event), + IPA3_USB_IS_TTYPE_DPL(ttype), res); + } +} + +/* + * This call-back is called from ECM or RNDIS drivers. + * Both drivers are data tethering drivers and not DPL + */ +void ipa3_usb_device_ready_notify_cb(void) +{ + IPA_USB_DBG_LOW("entry\n"); + ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH, + IPA_USB_DEVICE_READY); + IPA_USB_DBG_LOW("exit\n"); +} + +static void ipa3_usb_prod_notify_cb_do(enum ipa_rm_event event, + enum ipa3_usb_transport_type ttype) +{ + struct ipa3_usb_rm_context *rm_ctx; + + IPA_USB_DBG_LOW("entry\n"); + + rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + IPA_USB_DBG(":%s granted\n", + ipa_rm_resource_str(rm_ctx->prod_params.name)); + complete_all(&rm_ctx->prod_comp); + break; + case IPA_RM_RESOURCE_RELEASED: + IPA_USB_DBG(":%s released\n", + ipa_rm_resource_str(rm_ctx->prod_params.name)); + complete_all(&rm_ctx->prod_comp); + break; + } + IPA_USB_DBG_LOW("exit\n"); +} + +static void ipa3_usb_prod_notify_cb(void *user_data, enum ipa_rm_event event, + unsigned long data) +{ + ipa3_usb_prod_notify_cb_do(event, IPA_USB_TRANSPORT_TETH); +} + +static void ipa3_usb_dpl_dummy_prod_notify_cb(void *user_data, + enum ipa_rm_event event, unsigned long data) +{ + ipa3_usb_prod_notify_cb_do(event, IPA_USB_TRANSPORT_TETH); +} + +static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work) +{ + ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH, IPA_USB_REMOTE_WAKEUP); +} + +static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work) +{ + ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_REMOTE_WAKEUP); +} + +static int ipa3_usb_cons_request_resource_cb_do( + enum ipa3_usb_transport_type ttype, + struct work_struct *remote_wakeup_work) +{ + struct ipa3_usb_rm_context *rm_ctx; + unsigned long flags; + int result; + + IPA_USB_DBG_LOW("entry\n"); + rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + IPA_USB_DBG("state is %s\n", + ipa3_usb_state_to_string( + ipa3_usb_ctx->ttype_ctx[ttype].state)); + switch (ipa3_usb_ctx->ttype_ctx[ttype].state) { + case IPA_USB_CONNECTED: + case IPA_USB_SUSPENDED_NO_RWAKEUP: + rm_ctx->cons_state = IPA_USB_CONS_GRANTED; + result = 0; + break; + case IPA_USB_SUSPEND_REQUESTED: + rm_ctx->cons_requested = true; + if (rm_ctx->cons_state == IPA_USB_CONS_GRANTED) + result = 0; + else + result = -EINPROGRESS; + break; + case IPA_USB_SUSPENDED: + if (!rm_ctx->cons_requested) { + rm_ctx->cons_requested = true; + queue_work(ipa3_usb_ctx->wq, remote_wakeup_work); + } + result = -EINPROGRESS; + break; + default: + rm_ctx->cons_requested = true; + result = -EINPROGRESS; + break; + } + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + IPA_USB_DBG_LOW("exit with %d\n", result); + return result; +} + +static int ipa3_usb_cons_request_resource_cb(void) +{ + return ipa3_usb_cons_request_resource_cb_do(IPA_USB_TRANSPORT_TETH, + &ipa3_usb_notify_remote_wakeup_work); +} + +static int ipa3_usb_dpl_cons_request_resource_cb(void) +{ + return ipa3_usb_cons_request_resource_cb_do(IPA_USB_TRANSPORT_DPL, + &ipa3_usb_dpl_notify_remote_wakeup_work); +} + +static int ipa3_usb_cons_release_resource_cb_do( + enum ipa3_usb_transport_type ttype) +{ + unsigned long flags; + struct ipa3_usb_rm_context *rm_ctx; + + IPA_USB_DBG_LOW("entry\n"); + rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + IPA_USB_DBG("state is %s\n", + ipa3_usb_state_to_string( + ipa3_usb_ctx->ttype_ctx[ttype].state)); + switch (ipa3_usb_ctx->ttype_ctx[ttype].state) { + case IPA_USB_SUSPENDED: + /* Proceed with the suspend if no DL/DPL data */ + if (rm_ctx->cons_requested) + rm_ctx->cons_requested_released = true; + break; + case IPA_USB_SUSPEND_REQUESTED: + if (rm_ctx->cons_requested) + rm_ctx->cons_requested_released = true; + break; + case IPA_USB_STOPPED: + case IPA_USB_RESUME_IN_PROGRESS: + case IPA_USB_SUSPENDED_NO_RWAKEUP: + if (rm_ctx->cons_requested) + rm_ctx->cons_requested = false; + break; + case IPA_USB_CONNECTED: + case IPA_USB_INITIALIZED: + break; + default: + IPA_USB_ERR("received cons_release_cb in bad state: %s!\n", + ipa3_usb_state_to_string( + ipa3_usb_ctx->ttype_ctx[ttype].state)); + WARN_ON(1); + break; + } + + rm_ctx->cons_state = IPA_USB_CONS_RELEASED; + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + IPA_USB_DBG_LOW("exit\n"); + return 0; +} + +static int ipa3_usb_cons_release_resource_cb(void) +{ + return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_TETH); +} + +static int ipa3_usb_dpl_cons_release_resource_cb(void) +{ + return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_DPL); +} + +static void ipa3_usb_pm_cb(void *p, enum ipa_pm_cb_event event) +{ + struct ipa3_usb_transport_type_ctx *ttype_ctx = + (struct ipa3_usb_transport_type_ctx *)p; + unsigned long flags; + + IPA_USB_DBG_LOW("entry\n"); + + if (event != IPA_PM_REQUEST_WAKEUP) { + IPA_USB_ERR("Unexpected event %d\n", event); + WARN_ON(1); + return; + } + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + IPA_USB_DBG("state is %s\n", + ipa3_usb_state_to_string(ttype_ctx->state)); + if (ttype_ctx->state == IPA_USB_SUSPENDED) + queue_work(ipa3_usb_ctx->wq, + ttype_ctx->pm_ctx.remote_wakeup_work); + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + IPA_USB_DBG_LOW("exit\n"); +} + +static char *ipa3_usb_teth_prot_to_string(enum ipa_usb_teth_prot teth_prot) +{ + switch (teth_prot) { + case IPA_USB_RNDIS: + return "rndis_ipa"; + case IPA_USB_ECM: + return "ecm_ipa"; + case IPA_USB_RMNET: + case IPA_USB_MBIM: + return "teth_bridge"; + case IPA_USB_DIAG: + return "dpl"; + default: + break; + } + + return "unsupported"; +} + +static char *ipa3_usb_teth_bridge_prot_to_string( + enum ipa_usb_teth_prot teth_prot) +{ + switch (teth_prot) { + case IPA_USB_RMNET: + return "rmnet"; + case IPA_USB_MBIM: + return "mbim"; + default: + break; + } + + return "unsupported"; +} + +static int ipa3_usb_init_teth_bridge(void) +{ + int result; + + result = teth_bridge_init(&ipa3_usb_ctx->teth_bridge_params); + if (result) { + IPA_USB_ERR("Failed to initialize teth_bridge\n"); + return result; + } + + return 0; +} + +static int ipa3_usb_register_pm(enum ipa3_usb_transport_type ttype) +{ + struct ipa3_usb_transport_type_ctx *ttype_ctx = + &ipa3_usb_ctx->ttype_ctx[ttype]; + int result; + + /* there is one PM resource for teth and one for DPL */ + if (!IPA3_USB_IS_TTYPE_DPL(ttype) && ipa3_usb_ctx->num_init_prot > 0) + return 0; + + memset(&ttype_ctx->pm_ctx.reg_params, 0, + sizeof(ttype_ctx->pm_ctx.reg_params)); + ttype_ctx->pm_ctx.reg_params.name = (ttype == IPA_USB_TRANSPORT_DPL) ? + "USB DPL" : "USB"; + ttype_ctx->pm_ctx.reg_params.callback = ipa3_usb_pm_cb; + ttype_ctx->pm_ctx.reg_params.user_data = ttype_ctx; + ttype_ctx->pm_ctx.reg_params.group = IPA_PM_GROUP_DEFAULT; + + result = ipa_pm_register(&ttype_ctx->pm_ctx.reg_params, + &ttype_ctx->pm_ctx.hdl); + if (result) { + IPA_USB_ERR("fail to register with PM %d\n", result); + goto fail_pm_reg; + } + + result = ipa_pm_associate_ipa_cons_to_client(ttype_ctx->pm_ctx.hdl, + (ttype == IPA_USB_TRANSPORT_DPL) ? + IPA_CLIENT_USB_DPL_CONS : IPA_CLIENT_USB_CONS); + if (result) { + IPA_USB_ERR("fail to associate cons with PM %d\n", result); + goto fail_pm_cons; + } + + return 0; + +fail_pm_cons: + ipa_pm_deregister(ttype_ctx->pm_ctx.hdl); +fail_pm_reg: + memset(&ttype_ctx->pm_ctx.reg_params, 0, + sizeof(ttype_ctx->pm_ctx.reg_params)); + return result; +} + +static int ipa3_usb_deregister_pm(enum ipa3_usb_transport_type ttype) +{ + struct ipa3_usb_pm_context *pm_ctx = + &ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx; + int result; + + result = ipa_pm_deregister(pm_ctx->hdl); + if (result) + return result; + + memset(&pm_ctx->reg_params, 0, sizeof(pm_ctx->reg_params)); + return 0; +} + +static int ipa3_usb_create_rm_resources(enum ipa3_usb_transport_type ttype) +{ + struct ipa3_usb_rm_context *rm_ctx; + int result = -EFAULT; + bool created = false; + + rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + + /* create PROD */ + if (!rm_ctx->prod_valid) { + rm_ctx->prod_params.name = IPA3_USB_IS_TTYPE_DPL(ttype) ? + IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD : + IPA_RM_RESOURCE_USB_PROD; + rm_ctx->prod_params.floor_voltage = IPA_VOLTAGE_SVS2; + rm_ctx->prod_params.reg_params.user_data = NULL; + rm_ctx->prod_params.reg_params.notify_cb = + IPA3_USB_IS_TTYPE_DPL(ttype) ? + ipa3_usb_dpl_dummy_prod_notify_cb : + ipa3_usb_prod_notify_cb; + rm_ctx->prod_params.request_resource = NULL; + rm_ctx->prod_params.release_resource = NULL; + result = ipa_rm_create_resource(&rm_ctx->prod_params); + if (result) { + IPA_USB_ERR("Failed to create %s RM resource\n", + ipa_rm_resource_str(rm_ctx->prod_params.name)); + return result; + } + rm_ctx->prod_valid = true; + created = true; + IPA_USB_DBG("Created %s RM resource\n", + ipa_rm_resource_str(rm_ctx->prod_params.name)); + } + + /* Create CONS */ + if (!rm_ctx->cons_valid) { + rm_ctx->cons_params.name = IPA3_USB_IS_TTYPE_DPL(ttype) ? + IPA_RM_RESOURCE_USB_DPL_CONS : + IPA_RM_RESOURCE_USB_CONS; + rm_ctx->cons_params.floor_voltage = IPA_VOLTAGE_SVS2; + rm_ctx->cons_params.reg_params.user_data = NULL; + rm_ctx->cons_params.reg_params.notify_cb = NULL; + rm_ctx->cons_params.request_resource = + IPA3_USB_IS_TTYPE_DPL(ttype) ? + ipa3_usb_dpl_cons_request_resource_cb : + ipa3_usb_cons_request_resource_cb; + rm_ctx->cons_params.release_resource = + IPA3_USB_IS_TTYPE_DPL(ttype) ? + ipa3_usb_dpl_cons_release_resource_cb : + ipa3_usb_cons_release_resource_cb; + result = ipa_rm_create_resource(&rm_ctx->cons_params); + if (result) { + IPA_USB_ERR("Failed to create %s RM resource\n", + ipa_rm_resource_str(rm_ctx->cons_params.name)); + goto create_cons_rsc_fail; + } + rm_ctx->cons_valid = true; + IPA_USB_DBG("Created %s RM resource\n", + ipa_rm_resource_str(rm_ctx->cons_params.name)); + } + + return 0; + +create_cons_rsc_fail: + if (created) { + rm_ctx->prod_valid = false; + ipa_rm_delete_resource(rm_ctx->prod_params.name); + } + return result; +} + +int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot, + struct ipa_usb_teth_params *teth_params, + int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, + void *), + void *user_data) +{ + int result = -EFAULT; + enum ipa3_usb_transport_type ttype; + struct ipa3_usb_teth_prot_context *teth_prot_ptr; + + mutex_lock(&ipa3_usb_ctx->general_mutex); + IPA_USB_DBG_LOW("entry\n"); + if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE || + ((teth_prot == IPA_USB_RNDIS || teth_prot == IPA_USB_ECM) && + teth_params == NULL) || ipa_usb_notify_cb == NULL || + user_data == NULL) { + IPA_USB_ERR("bad parameters\n"); + result = -EINVAL; + goto bad_params; + } + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_INIT_TETH_PROT, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + result = -EPERM; + goto bad_params; + } + + /* Create IPA RM USB resources */ + teth_prot_ptr = &ipa3_usb_ctx->teth_prot_ctx[teth_prot]; + if (ipa_pm_is_used()) + result = ipa3_usb_register_pm(ttype); + else + result = ipa3_usb_create_rm_resources(ttype); + if (result) { + IPA_USB_ERR("Failed creating IPA RM USB resources\n"); + goto bad_params; + } + + if (!ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb) { + ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = + ipa_usb_notify_cb; + } else if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + if (ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb != + ipa_usb_notify_cb) { + IPA_USB_ERR("Got different notify_cb\n"); + result = -EINVAL; + goto bad_params; + } + } else { + IPA_USB_ERR("Already has dpl_notify_cb\n"); + result = -EINVAL; + goto bad_params; + } + + /* Initialize tethering protocol */ + switch (teth_prot) { + case IPA_USB_RNDIS: + case IPA_USB_ECM: + if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state != + IPA_USB_TETH_PROT_INVALID) { + IPA_USB_DBG("%s already initialized\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + result = -EPERM; + goto bad_params; + } + ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data; + if (teth_prot == IPA_USB_RNDIS) { + struct ipa_usb_init_params *rndis_ptr = + &teth_prot_ptr->teth_prot_params.rndis; + + rndis_ptr->device_ready_notify = + ipa3_usb_device_ready_notify_cb; + memcpy(rndis_ptr->host_ethaddr, + teth_params->host_ethaddr, + sizeof(teth_params->host_ethaddr)); + memcpy(rndis_ptr->device_ethaddr, + teth_params->device_ethaddr, + sizeof(teth_params->device_ethaddr)); + + result = rndis_ipa_init(rndis_ptr); + if (result) { + IPA_USB_ERR("Failed to initialize %s\n", + ipa3_usb_teth_prot_to_string( + teth_prot)); + goto teth_prot_init_fail; + } + } else { + struct ecm_ipa_params *ecm_ptr = + &teth_prot_ptr->teth_prot_params.ecm; + + ecm_ptr->device_ready_notify = + ipa3_usb_device_ready_notify_cb; + memcpy(ecm_ptr->host_ethaddr, + teth_params->host_ethaddr, + sizeof(teth_params->host_ethaddr)); + memcpy(ecm_ptr->device_ethaddr, + teth_params->device_ethaddr, + sizeof(teth_params->device_ethaddr)); + + result = ecm_ipa_init(ecm_ptr); + if (result) { + IPA_USB_ERR("Failed to initialize %s\n", + ipa3_usb_teth_prot_to_string( + teth_prot)); + goto teth_prot_init_fail; + } + } + teth_prot_ptr->state = + IPA_USB_TETH_PROT_INITIALIZED; + ipa3_usb_ctx->num_init_prot++; + IPA_USB_DBG("initialized %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + case IPA_USB_RMNET: + case IPA_USB_MBIM: + if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state != + IPA_USB_TETH_PROT_INVALID) { + IPA_USB_DBG("%s already initialized\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + result = -EPERM; + goto bad_params; + } + ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data; + result = ipa3_usb_init_teth_bridge(); + if (result) + goto teth_prot_init_fail; + ipa3_usb_ctx->teth_prot_ctx[teth_prot].state = + IPA_USB_TETH_PROT_INITIALIZED; + ipa3_usb_ctx->num_init_prot++; + IPA_USB_DBG("initialized %s %s\n", + ipa3_usb_teth_prot_to_string(teth_prot), + ipa3_usb_teth_bridge_prot_to_string(teth_prot)); + break; + case IPA_USB_DIAG: + if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state != + IPA_USB_TETH_PROT_INVALID) { + IPA_USB_DBG("DPL already initialized\n"); + result = -EPERM; + goto bad_params; + } + ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data; + ipa3_usb_ctx->teth_prot_ctx[teth_prot].state = + IPA_USB_TETH_PROT_INITIALIZED; + IPA_USB_DBG("initialized DPL\n"); + break; + default: + IPA_USB_ERR("unexpected tethering protocol\n"); + result = -EINVAL; + goto bad_params; + } + + if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype)) + IPA_USB_ERR("failed to change state to initialized\n"); + + IPA_USB_DBG_LOW("exit\n"); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return 0; + +teth_prot_init_fail: + if ((IPA3_USB_IS_TTYPE_DPL(ttype)) + || (ipa3_usb_ctx->num_init_prot == 0)) { + if (ipa_pm_is_used()) { + ipa3_usb_deregister_pm(ttype); + } else { + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid = + false; + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid = + false; + ipa_rm_delete_resource( + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name); + ipa_rm_delete_resource( + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name); + } + } +bad_params: + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; +} +EXPORT_SYMBOL(ipa_usb_init_teth_prot); + +void ipa3_usb_gsi_evt_err_cb(struct gsi_evt_err_notify *notify) +{ + IPA_USB_DBG_LOW("entry\n"); + if (!notify) + return; + IPA_USB_ERR("Received event error %d, description: %d\n", + notify->evt_id, notify->err_desc); + IPA_USB_DBG_LOW("exit\n"); +} + +void ipa3_usb_gsi_chan_err_cb(struct gsi_chan_err_notify *notify) +{ + IPA_USB_DBG_LOW("entry\n"); + if (!notify) + return; + IPA_USB_ERR("Received channel error %d, description: %d\n", + notify->evt_id, notify->err_desc); + IPA_USB_DBG_LOW("exit\n"); +} + +static bool ipa3_usb_check_chan_params(struct ipa_usb_xdci_chan_params *params) +{ + IPA_USB_DBG_LOW("gevntcount_low_addr = %x\n", + params->gevntcount_low_addr); + IPA_USB_DBG_LOW("gevntcount_hi_addr = %x\n", + params->gevntcount_hi_addr); + IPA_USB_DBG_LOW("dir = %d\n", params->dir); + IPA_USB_DBG_LOW("xfer_ring_len = %d\n", params->xfer_ring_len); + IPA_USB_DBG_LOW("last_trb_addr_iova = %x\n", + params->xfer_scratch.last_trb_addr_iova); + IPA_USB_DBG_LOW("const_buffer_size = %d\n", + params->xfer_scratch.const_buffer_size); + IPA_USB_DBG_LOW("depcmd_low_addr = %x\n", + params->xfer_scratch.depcmd_low_addr); + IPA_USB_DBG_LOW("depcmd_hi_addr = %x\n", + params->xfer_scratch.depcmd_hi_addr); + + if (params->client >= IPA_CLIENT_MAX || + params->teth_prot < 0 || + params->teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE || + params->xfer_ring_len % GSI_CHAN_RE_SIZE_16B || + params->xfer_scratch.const_buffer_size < 1 || + params->xfer_scratch.const_buffer_size > 31) { + IPA_USB_ERR("Invalid params\n"); + return false; + } + switch (params->teth_prot) { + case IPA_USB_DIAG: + if (!IPA_CLIENT_IS_CONS(params->client)) { + IPA_USB_ERR("DPL supports only DL channel\n"); + return false; + } + case IPA_USB_RNDIS: + case IPA_USB_ECM: + if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state == + IPA_USB_TETH_PROT_INVALID) { + IPA_USB_ERR("%s is not initialized\n", + ipa3_usb_teth_prot_to_string( + params->teth_prot)); + return false; + } + break; + case IPA_USB_RMNET: + case IPA_USB_MBIM: + if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state == + IPA_USB_TETH_PROT_INVALID) { + IPA_USB_ERR("%s is not initialized\n", + ipa3_usb_teth_bridge_prot_to_string( + params->teth_prot)); + return false; + } + break; + default: + IPA_USB_ERR("Unknown tethering protocol (%d)\n", + params->teth_prot); + return false; + } + return true; +} + +static int ipa3_usb_smmu_map_xdci_channel( + struct ipa_usb_xdci_chan_params *params, bool map) +{ + int result; + u32 gevntcount_r = rounddown(params->gevntcount_low_addr, PAGE_SIZE); + u32 xfer_scratch_r = + rounddown(params->xfer_scratch.depcmd_low_addr, PAGE_SIZE); + + if (gevntcount_r != xfer_scratch_r) { + IPA_USB_ERR("No support more than 1 page map for USB regs\n"); + WARN_ON(1); + return -EINVAL; + } + + if (map) { + if (ipa3_usb_ctx->smmu_reg_map.cnt == 0) { + ipa3_usb_ctx->smmu_reg_map.addr = gevntcount_r; + result = ipa3_smmu_map_peer_reg( + ipa3_usb_ctx->smmu_reg_map.addr, true, + IPA_SMMU_CB_AP); + if (result) { + IPA_USB_ERR("failed to map USB regs %d\n", + result); + return result; + } + } else { + if (gevntcount_r != ipa3_usb_ctx->smmu_reg_map.addr) { + IPA_USB_ERR( + "No support for map different reg\n"); + return -EINVAL; + } + } + ipa3_usb_ctx->smmu_reg_map.cnt++; + } else { + if (gevntcount_r != ipa3_usb_ctx->smmu_reg_map.addr) { + IPA_USB_ERR( + "No support for map different reg\n"); + return -EINVAL; + } + + if (ipa3_usb_ctx->smmu_reg_map.cnt == 1) { + result = ipa3_smmu_map_peer_reg( + ipa3_usb_ctx->smmu_reg_map.addr, false, + IPA_SMMU_CB_AP); + if (result) { + IPA_USB_ERR("failed to unmap USB regs %d\n", + result); + return result; + } + } + ipa3_usb_ctx->smmu_reg_map.cnt--; + } + + + result = ipa3_smmu_map_peer_buff(params->xfer_ring_base_addr_iova, + params->xfer_ring_len, map, params->sgt_xfer_rings, + IPA_SMMU_CB_AP); + if (result) { + IPA_USB_ERR("failed to map Xfer ring %d\n", result); + return result; + } + + result = ipa3_smmu_map_peer_buff(params->data_buff_base_addr_iova, + params->data_buff_base_len, map, params->sgt_data_buff, + IPA_SMMU_CB_AP); + if (result) { + IPA_USB_ERR("failed to map TRBs buff %d\n", result); + return result; + } + + return 0; +} + +static int ipa3_usb_request_xdci_channel( + struct ipa_usb_xdci_chan_params *params, + enum ipa_usb_direction dir, + struct ipa_req_chan_out_params *out_params) +{ + int result = -EFAULT; + struct ipa_request_gsi_channel_params chan_params; + enum ipa3_usb_transport_type ttype; + enum ipa_usb_teth_prot teth_prot; + struct ipa_usb_init_params *rndis_ptr; + struct ecm_ipa_params *ecm_ptr; + struct ipa_usb_xdci_chan_params *xdci_ch_params; + + IPA_USB_DBG_LOW("entry\n"); + if (params == NULL || out_params == NULL || + !ipa3_usb_check_chan_params(params)) { + IPA_USB_ERR("bad parameters\n"); + return -EINVAL; + } + + ttype = IPA3_USB_GET_TTYPE(params->teth_prot); + teth_prot = params->teth_prot; + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_REQUEST_CHANNEL, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + return -EPERM; + } + + rndis_ptr = + &ipa3_usb_ctx->teth_prot_ctx[teth_prot].teth_prot_params.rndis; + ecm_ptr = + &ipa3_usb_ctx->teth_prot_ctx[teth_prot].teth_prot_params.ecm; + + memset(&chan_params, 0, sizeof(struct ipa_request_gsi_channel_params)); + memcpy(&chan_params.ipa_ep_cfg, ¶ms->ipa_ep_cfg, + sizeof(struct ipa_ep_cfg)); + chan_params.client = params->client; + switch (params->teth_prot) { + case IPA_USB_RNDIS: + chan_params.priv = rndis_ptr->private; + if (params->dir == GSI_CHAN_DIR_FROM_GSI) + chan_params.notify = rndis_ptr->ipa_tx_notify; + else + chan_params.notify = rndis_ptr->ipa_rx_notify; + chan_params.skip_ep_cfg = rndis_ptr->skip_ep_cfg; + break; + case IPA_USB_ECM: + chan_params.priv = ecm_ptr->private; + if (params->dir == GSI_CHAN_DIR_FROM_GSI) + chan_params.notify = ecm_ptr->ecm_ipa_tx_dp_notify; + else + chan_params.notify = ecm_ptr->ecm_ipa_rx_dp_notify; + chan_params.skip_ep_cfg = ecm_ptr->skip_ep_cfg; + break; + case IPA_USB_RMNET: + case IPA_USB_MBIM: + chan_params.priv = + ipa3_usb_ctx->teth_bridge_params.private_data; + chan_params.notify = + ipa3_usb_ctx->teth_bridge_params.usb_notify_cb; + chan_params.skip_ep_cfg = + ipa3_usb_ctx->teth_bridge_params.skip_ep_cfg; + break; + case IPA_USB_DIAG: + chan_params.priv = NULL; + chan_params.notify = NULL; + chan_params.skip_ep_cfg = true; + break; + default: + break; + } + + result = ipa3_usb_smmu_map_xdci_channel(params, true); + if (result) { + IPA_USB_ERR("failed to smmu map %d\n", result); + return result; + } + + /* store channel params for SMMU unmap */ + if (dir == IPA_USB_DIR_UL) + xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].ul_ch_params; + else + xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].dl_ch_params; + + *xdci_ch_params = *params; + result = ipa_smmu_store_sgt( + &xdci_ch_params->sgt_xfer_rings, + params->sgt_xfer_rings); + if (result) + return result; + + result = ipa_smmu_store_sgt( + &xdci_ch_params->sgt_data_buff, + params->sgt_data_buff); + if (result) { + ipa_smmu_free_sgt(&xdci_ch_params->sgt_xfer_rings); + return result; + } + chan_params.keep_ipa_awake = params->keep_ipa_awake; + chan_params.evt_ring_params.intf = GSI_EVT_CHTYPE_XDCI_EV; + chan_params.evt_ring_params.intr = GSI_INTR_IRQ; + chan_params.evt_ring_params.re_size = GSI_EVT_RING_RE_SIZE_16B; + chan_params.evt_ring_params.ring_len = params->xfer_ring_len - + chan_params.evt_ring_params.re_size; + chan_params.evt_ring_params.ring_base_addr = + params->xfer_ring_base_addr_iova; + chan_params.evt_ring_params.ring_base_vaddr = NULL; + chan_params.evt_ring_params.int_modt = 0; + chan_params.evt_ring_params.int_modt = 0; + chan_params.evt_ring_params.intvec = 0; + chan_params.evt_ring_params.msi_addr = 0; + chan_params.evt_ring_params.rp_update_addr = 0; + chan_params.evt_ring_params.exclusive = true; + chan_params.evt_ring_params.err_cb = ipa3_usb_gsi_evt_err_cb; + chan_params.evt_ring_params.user_data = NULL; + chan_params.evt_scratch.xdci.gevntcount_low_addr = + params->gevntcount_low_addr; + chan_params.evt_scratch.xdci.gevntcount_hi_addr = + params->gevntcount_hi_addr; + chan_params.chan_params.prot = GSI_CHAN_PROT_XDCI; + chan_params.chan_params.dir = params->dir; + /* chan_id is set in ipa3_request_gsi_channel() */ + chan_params.chan_params.re_size = GSI_CHAN_RE_SIZE_16B; + chan_params.chan_params.ring_len = params->xfer_ring_len; + chan_params.chan_params.ring_base_addr = + params->xfer_ring_base_addr_iova; + chan_params.chan_params.ring_base_vaddr = NULL; + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + chan_params.chan_params.use_db_eng = GSI_CHAN_DIRECT_MODE; + else + chan_params.chan_params.use_db_eng = GSI_CHAN_DB_MODE; + chan_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG; + if (params->dir == GSI_CHAN_DIR_FROM_GSI) + chan_params.chan_params.low_weight = + IPA_USB_DL_CHAN_LOW_WEIGHT; + else + chan_params.chan_params.low_weight = + IPA_USB_UL_CHAN_LOW_WEIGHT; + chan_params.chan_params.xfer_cb = NULL; + chan_params.chan_params.err_cb = ipa3_usb_gsi_chan_err_cb; + chan_params.chan_params.chan_user_data = NULL; + chan_params.chan_scratch.xdci.last_trb_addr = + params->xfer_scratch.last_trb_addr_iova; + /* xferrscidx will be updated later */ + chan_params.chan_scratch.xdci.xferrscidx = 0; + chan_params.chan_scratch.xdci.const_buffer_size = + params->xfer_scratch.const_buffer_size; + chan_params.chan_scratch.xdci.depcmd_low_addr = + params->xfer_scratch.depcmd_low_addr; + chan_params.chan_scratch.xdci.depcmd_hi_addr = + params->xfer_scratch.depcmd_hi_addr; + + /* + * Update scratch for MCS smart prefetch: + * Starting IPA4.5, smart prefetch implemented by H/W. + * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch + * so keep the fields zero. + */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + chan_params.chan_scratch.xdci.outstanding_threshold = + ((params->teth_prot == IPA_USB_MBIM) ? 1 : 2) * + chan_params.chan_params.re_size; + } + /* max_outstanding_tre is set in ipa3_request_gsi_channel() */ + + result = ipa3_request_gsi_channel(&chan_params, out_params); + if (result) { + IPA_USB_ERR("failed to allocate GSI channel\n"); + ipa3_usb_smmu_map_xdci_channel(params, false); + return result; + } + + IPA_USB_DBG_LOW("exit\n"); + return 0; +} + +static int ipa3_usb_release_xdci_channel(u32 clnt_hdl, + enum ipa_usb_direction dir, + enum ipa3_usb_transport_type ttype) +{ + int result = 0; + struct ipa_usb_xdci_chan_params *xdci_ch_params; + + IPA_USB_DBG_LOW("entry\n"); + if (ttype < 0 || ttype >= IPA_USB_TRANSPORT_MAX) { + IPA_USB_ERR("bad parameter\n"); + return -EINVAL; + } + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_RELEASE_CHANNEL, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + return -EPERM; + } + + /* Release channel */ + result = ipa3_release_gsi_channel(clnt_hdl); + if (result) { + IPA_USB_ERR("failed to deallocate channel\n"); + return result; + } + + if (dir == IPA_USB_DIR_UL) + xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].ul_ch_params; + else + xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].dl_ch_params; + + result = ipa3_usb_smmu_map_xdci_channel(xdci_ch_params, false); + + if (xdci_ch_params->sgt_xfer_rings != NULL) + ipa_smmu_free_sgt(&xdci_ch_params->sgt_xfer_rings); + if (xdci_ch_params->sgt_data_buff != NULL) + ipa_smmu_free_sgt(&xdci_ch_params->sgt_data_buff); + + /* Change ipa_usb state to INITIALIZED */ + if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype)) + IPA_USB_ERR("failed to change state to initialized\n"); + + IPA_USB_DBG_LOW("exit\n"); + return 0; +} + +static int ipa3_usb_request_prod(enum ipa3_usb_transport_type ttype) +{ + int result; + struct ipa3_usb_rm_context *rm_ctx; + const char *rsrc_str; + + rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name); + + IPA_USB_DBG_LOW("requesting %s\n", rsrc_str); + init_completion(&rm_ctx->prod_comp); + result = ipa_rm_request_resource(rm_ctx->prod_params.name); + if (result) { + if (result != -EINPROGRESS) { + IPA_USB_ERR("failed to request %s: %d\n", + rsrc_str, result); + return result; + } + result = wait_for_completion_timeout(&rm_ctx->prod_comp, + msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC)); + if (result == 0) { + IPA_USB_ERR("timeout request %s\n", rsrc_str); + return -ETIME; + } + } + + IPA_USB_DBG_LOW("%s granted\n", rsrc_str); + return 0; +} + +static int ipa3_usb_release_prod(enum ipa3_usb_transport_type ttype) +{ + int result; + struct ipa3_usb_rm_context *rm_ctx; + const char *rsrc_str; + + rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name); + + IPA_USB_DBG_LOW("releasing %s\n", rsrc_str); + + init_completion(&rm_ctx->prod_comp); + result = ipa_rm_release_resource(rm_ctx->prod_params.name); + if (result) { + if (result != -EINPROGRESS) { + IPA_USB_ERR("failed to release %s: %d\n", + rsrc_str, result); + return result; + } + result = wait_for_completion_timeout(&rm_ctx->prod_comp, + msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC)); + if (result == 0) { + IPA_USB_ERR("timeout release %s\n", rsrc_str); + return -ETIME; + } + } + + IPA_USB_DBG_LOW("%s released\n", rsrc_str); + return 0; +} + +static bool ipa3_usb_check_connect_params( + struct ipa_usb_xdci_connect_params_internal *params) +{ + IPA_USB_DBG_LOW("ul xferrscidx = %d\n", params->usb_to_ipa_xferrscidx); + IPA_USB_DBG_LOW("dl xferrscidx = %d\n", params->ipa_to_usb_xferrscidx); + IPA_USB_DBG_LOW("max_supported_bandwidth_mbps = %d\n", + params->max_supported_bandwidth_mbps); + + if (params->max_pkt_size < IPA_USB_FULL_SPEED_64B || + params->max_pkt_size > IPA_USB_SUPER_SPEED_1024B || + params->ipa_to_usb_xferrscidx > 127 || + (params->teth_prot != IPA_USB_DIAG && + (params->usb_to_ipa_xferrscidx > 127)) || + params->teth_prot < 0 || + params->teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) { + IPA_USB_ERR("Invalid params\n"); + return false; + } + + if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state == + IPA_USB_TETH_PROT_INVALID) { + IPA_USB_ERR("%s is not initialized\n", + ipa3_usb_teth_prot_to_string( + params->teth_prot)); + return false; + } + + return true; +} + +static int ipa3_usb_connect_teth_bridge( + struct teth_bridge_connect_params *params) +{ + int result; + + result = teth_bridge_connect(params); + if (result) { + IPA_USB_ERR("failed to connect teth_bridge (%s)\n", + params->tethering_mode == TETH_TETHERING_MODE_RMNET ? + "rmnet" : "mbim"); + return result; + } + + return 0; +} + +static int ipa3_usb_connect_dpl(void) +{ + int res = 0; + + if (ipa_pm_is_used()) + return 0; + + /* + * Add DPL dependency to RM dependency graph, first add_dependency call + * is sync in order to make sure the IPA clocks are up before we + * continue and notify the USB driver it may continue. + */ + res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res < 0) { + IPA_USB_ERR("ipa_rm_add_dependency_sync() failed\n"); + return res; + } + + /* + * this add_dependency call can't be sync since it will block until DPL + * status is connected (which can happen only later in the flow), + * the clocks are already up so the call doesn't need to block. + */ + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_USB_DPL_CONS); + if (res < 0 && res != -EINPROGRESS) { + IPA_USB_ERR("ipa_rm_add_dependency() failed\n"); + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD, + IPA_RM_RESOURCE_Q6_CONS); + return res; + } + + return 0; +} + +static int ipa3_usb_connect_teth_prot(enum ipa_usb_teth_prot teth_prot) +{ + int result; + struct teth_bridge_connect_params teth_bridge_params; + struct ipa3_usb_teth_prot_conn_params *teth_conn_params; + enum ipa3_usb_transport_type ttype; + struct ipa3_usb_teth_prot_context *teth_prot_ptr = + &ipa3_usb_ctx->teth_prot_ctx[teth_prot]; + + IPA_USB_DBG("connecting protocol = %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + teth_conn_params = &(ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params); + + switch (teth_prot) { + case IPA_USB_RNDIS: + if (teth_prot_ptr->state == + IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_DBG("%s is already connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + } + ipa3_usb_ctx->ttype_ctx[ttype].user_data = + teth_prot_ptr->user_data; + result = rndis_ipa_pipe_connect_notify( + teth_conn_params->usb_to_ipa_clnt_hdl, + teth_conn_params->ipa_to_usb_clnt_hdl, + teth_conn_params->params.max_xfer_size_bytes_to_dev, + teth_conn_params->params.max_packet_number_to_dev, + teth_conn_params->params.max_xfer_size_bytes_to_host, + teth_prot_ptr->teth_prot_params.rndis.private); + if (result) { + IPA_USB_ERR("failed to connect %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL; + return result; + } + teth_prot_ptr->state = + IPA_USB_TETH_PROT_CONNECTED; + IPA_USB_DBG("%s is connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + case IPA_USB_ECM: + if (teth_prot_ptr->state == + IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_DBG("%s is already connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + } + ipa3_usb_ctx->ttype_ctx[ttype].user_data = + teth_prot_ptr->user_data; + result = ecm_ipa_connect(teth_conn_params->usb_to_ipa_clnt_hdl, + teth_conn_params->ipa_to_usb_clnt_hdl, + teth_prot_ptr->teth_prot_params.ecm.private); + if (result) { + IPA_USB_ERR("failed to connect %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL; + return result; + } + teth_prot_ptr->state = + IPA_USB_TETH_PROT_CONNECTED; + IPA_USB_DBG("%s is connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + case IPA_USB_RMNET: + case IPA_USB_MBIM: + if (teth_prot_ptr->state == + IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_DBG("%s is already connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + } + result = ipa3_usb_init_teth_bridge(); + if (result) + return result; + + ipa3_usb_ctx->ttype_ctx[ttype].user_data = + teth_prot_ptr->user_data; + teth_bridge_params.ipa_usb_pipe_hdl = + teth_conn_params->ipa_to_usb_clnt_hdl; + teth_bridge_params.usb_ipa_pipe_hdl = + teth_conn_params->usb_to_ipa_clnt_hdl; + teth_bridge_params.tethering_mode = + (teth_prot == IPA_USB_RMNET) ? + (TETH_TETHERING_MODE_RMNET):(TETH_TETHERING_MODE_MBIM); + teth_bridge_params.client_type = IPA_CLIENT_USB_PROD; + result = ipa3_usb_connect_teth_bridge(&teth_bridge_params); + if (result) { + ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL; + return result; + } + ipa3_usb_ctx->teth_prot_ctx[teth_prot].state = + IPA_USB_TETH_PROT_CONNECTED; + ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY); + IPA_USB_DBG("%s (%s) is connected\n", + ipa3_usb_teth_prot_to_string(teth_prot), + ipa3_usb_teth_bridge_prot_to_string(teth_prot)); + break; + case IPA_USB_DIAG: + if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state == + IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_DBG("%s is already connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + } + + ipa3_usb_ctx->ttype_ctx[ttype].user_data = + ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data; + result = ipa3_usb_connect_dpl(); + if (result) { + IPA_USB_ERR("Failed connecting DPL result=%d\n", + result); + ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL; + return result; + } + ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state = + IPA_USB_TETH_PROT_CONNECTED; + ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY); + IPA_USB_DBG("%s is connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + default: + IPA_USB_ERR("Invalid tethering protocol\n"); + return -EFAULT; + } + + return 0; +} + +static int ipa3_usb_disconnect_teth_bridge(void) +{ + int result; + + result = teth_bridge_disconnect(IPA_CLIENT_USB_PROD); + if (result) { + IPA_USB_ERR("failed to disconnect teth_bridge\n"); + return result; + } + + return 0; +} + +static int ipa3_usb_disconnect_dpl(void) +{ + int res; + + if (ipa_pm_is_used()) + return 0; + + /* Remove DPL RM dependency */ + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res) + IPA_USB_ERR("deleting DPL_DUMMY_PROD rsrc dependency fail\n"); + + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_USB_DPL_CONS); + if (res) + IPA_USB_ERR("deleting DPL_CONS rsrc dependencty fail\n"); + + return 0; +} + +static int ipa3_usb_disconnect_teth_prot(enum ipa_usb_teth_prot teth_prot) +{ + int result = 0; + enum ipa3_usb_transport_type ttype; + struct ipa3_usb_teth_prot_context *teth_prot_ptr = + &ipa3_usb_ctx->teth_prot_ctx[teth_prot]; + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + switch (teth_prot) { + case IPA_USB_RNDIS: + case IPA_USB_ECM: + if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state != + IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_DBG("%s is not connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + return -EPERM; + } + if (teth_prot == IPA_USB_RNDIS) { + result = rndis_ipa_pipe_disconnect_notify( + teth_prot_ptr->teth_prot_params.rndis.private); + } else { + result = ecm_ipa_disconnect( + teth_prot_ptr->teth_prot_params.ecm.private); + } + if (result) { + IPA_USB_ERR("failed to disconnect %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + } + teth_prot_ptr->state = IPA_USB_TETH_PROT_INITIALIZED; + IPA_USB_DBG("disconnected %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + case IPA_USB_RMNET: + case IPA_USB_MBIM: + if (teth_prot_ptr->state != IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_DBG("%s (%s) is not connected\n", + ipa3_usb_teth_prot_to_string(teth_prot), + ipa3_usb_teth_bridge_prot_to_string(teth_prot)); + return -EPERM; + } + result = ipa3_usb_disconnect_teth_bridge(); + if (result) + break; + + teth_prot_ptr->state = IPA_USB_TETH_PROT_INITIALIZED; + IPA_USB_DBG("disconnected %s (%s)\n", + ipa3_usb_teth_prot_to_string(teth_prot), + ipa3_usb_teth_bridge_prot_to_string(teth_prot)); + break; + case IPA_USB_DIAG: + if (teth_prot_ptr->state != IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_DBG("%s is not connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + return -EPERM; + } + result = ipa3_usb_disconnect_dpl(); + if (result) + break; + teth_prot_ptr->state = IPA_USB_TETH_PROT_INITIALIZED; + IPA_USB_DBG("disconnected %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + default: + break; + } + + ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL; + return result; +} + +static int ipa3_usb_xdci_connect_internal( + struct ipa_usb_xdci_connect_params_internal *params) +{ + int result = -EFAULT; + struct ipa_rm_perf_profile profile; + enum ipa3_usb_transport_type ttype; + struct ipa3_usb_teth_prot_conn_params *teth_prot_ptr; + struct ipa3_usb_rm_context *rm_ctx_ptr; + struct ipa3_usb_transport_type_ctx *t_ctx_ptr; + + IPA_USB_DBG_LOW("entry\n"); + if (params == NULL || !ipa3_usb_check_connect_params(params)) { + IPA_USB_ERR("bad parameters\n"); + return -EINVAL; + } + + ttype = (params->teth_prot == IPA_USB_DIAG) ? IPA_USB_TRANSPORT_DPL : + IPA_USB_TRANSPORT_TETH; + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_CONNECT, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + return -EPERM; + } + + teth_prot_ptr = &ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params; + teth_prot_ptr->ipa_to_usb_clnt_hdl = params->ipa_to_usb_clnt_hdl; + rm_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) + teth_prot_ptr->usb_to_ipa_clnt_hdl = + params->usb_to_ipa_clnt_hdl; + teth_prot_ptr->params = params->teth_prot_params; + + /* Set EE xDCI specific scratch */ + result = ipa3_set_usb_max_packet_size(params->max_pkt_size); + if (result) { + IPA_USB_ERR("failed setting xDCI EE scratch field\n"); + return result; + } + + if (ipa_pm_is_used()) { + /* perf profile is not set on USB DPL pipe */ + if (ttype != IPA_USB_TRANSPORT_DPL) { + result = ipa_pm_set_throughput( + ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl, + params->max_supported_bandwidth_mbps); + if (result) { + IPA_USB_ERR("failed to set perf profile\n"); + return result; + } + } + + result = ipa_pm_activate_sync( + ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl); + if (result) { + IPA_USB_ERR("failed to activate pm\n"); + return result; + } + } else { + /* Set RM PROD & CONS perf profile */ + profile.max_supported_bandwidth_mbps = + params->max_supported_bandwidth_mbps; + result = ipa_rm_set_perf_profile( + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name, + &profile); + + t_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[ttype]; + + if (result) { + IPA_USB_ERR("failed to set %s perf profile\n", + ipa_rm_resource_str( + t_ctx_ptr->rm_ctx.prod_params.name)); + return result; + } + + result = ipa_rm_set_perf_profile( + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name, + &profile); + + if (result) { + IPA_USB_ERR("failed to set %s perf profile\n", + ipa_rm_resource_str( + t_ctx_ptr->rm_ctx.cons_params.name)); + return result; + } + + /* Request PROD */ + result = ipa3_usb_request_prod(ttype); + if (result) + return result; + } + + if (params->teth_prot != IPA_USB_DIAG) { + /* Start UL channel */ + result = ipa3_xdci_start(params->usb_to_ipa_clnt_hdl, + params->usb_to_ipa_xferrscidx, + params->usb_to_ipa_xferrscidx_valid); + if (result) { + IPA_USB_ERR("failed to connect UL channel\n"); + goto connect_ul_fail; + } + } + + /* Start DL/DPL channel */ + result = ipa3_xdci_start(params->ipa_to_usb_clnt_hdl, + params->ipa_to_usb_xferrscidx, + params->ipa_to_usb_xferrscidx_valid); + if (result) { + IPA_USB_ERR("failed to connect DL/DPL channel\n"); + goto connect_dl_fail; + } + + /* Connect tethering protocol */ + result = ipa3_usb_connect_teth_prot(params->teth_prot); + if (result) { + IPA_USB_ERR("failed to connect teth protocol\n"); + goto connect_teth_prot_fail; + } + + if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) { + IPA_USB_ERR( + "failed to change state to connected\n"); + goto state_change_connected_fail; + } + + IPA_USB_DBG_LOW("exit\n"); + return 0; + +state_change_connected_fail: + ipa3_usb_disconnect_teth_prot(params->teth_prot); +connect_teth_prot_fail: + ipa3_xdci_disconnect(params->ipa_to_usb_clnt_hdl, false, -1); + ipa3_reset_gsi_channel(params->ipa_to_usb_clnt_hdl); + ipa3_reset_gsi_event_ring(params->ipa_to_usb_clnt_hdl); +connect_dl_fail: + if (params->teth_prot != IPA_USB_DIAG) { + ipa3_xdci_disconnect(params->usb_to_ipa_clnt_hdl, false, -1); + ipa3_reset_gsi_channel(params->usb_to_ipa_clnt_hdl); + ipa3_reset_gsi_event_ring(params->usb_to_ipa_clnt_hdl); + } +connect_ul_fail: + if (ipa_pm_is_used()) + ipa_pm_deactivate_sync( + ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl); + else + ipa3_usb_release_prod(ttype); + return result; +} + +#ifdef CONFIG_DEBUG_FS +static char dbg_buff[IPA_USB_MAX_MSG_LEN]; + +static char *ipa3_usb_cons_state_to_string(enum ipa3_usb_cons_state state) +{ + switch (state) { + case IPA_USB_CONS_GRANTED: + return "CONS_GRANTED"; + case IPA_USB_CONS_RELEASED: + return "CONS_RELEASED"; + } + + return "UNSUPPORTED"; +} + +static int ipa3_usb_get_status_dbg_info(struct ipa3_usb_status_dbg_info *status) +{ + int res; + int i; + unsigned long flags; + struct ipa3_usb_rm_context *rm_ctx_ptr; + + IPA_USB_DBG_LOW("entry\n"); + + if (ipa3_usb_ctx == NULL) { + IPA_USB_ERR("IPA USB was not inited yet\n"); + return -EFAULT; + } + + mutex_lock(&ipa3_usb_ctx->general_mutex); + + if (!status) { + IPA_USB_ERR("Invalid input\n"); + res = -EINVAL; + goto bail; + } + + memset(status, 0, sizeof(struct ipa3_usb_status_dbg_info)); + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + rm_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].rm_ctx; + status->teth_state = ipa3_usb_state_to_string( + ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].state); + status->dpl_state = ipa3_usb_state_to_string( + ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].state); + if (rm_ctx_ptr->cons_valid) + status->teth_cons_state = ipa3_usb_cons_state_to_string( + rm_ctx_ptr->cons_state); + rm_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].rm_ctx; + if (rm_ctx_ptr->cons_valid) + status->dpl_cons_state = ipa3_usb_cons_state_to_string( + rm_ctx_ptr->cons_state); + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + + for (i = 0 ; i < IPA_USB_MAX_TETH_PROT_SIZE ; i++) { + if (ipa3_usb_ctx->teth_prot_ctx[i].state == + IPA_USB_TETH_PROT_INITIALIZED) { + if ((i == IPA_USB_RMNET) || (i == IPA_USB_MBIM)) + status->inited_prots[status->num_init_prot++] = + ipa3_usb_teth_bridge_prot_to_string(i); + else + status->inited_prots[status->num_init_prot++] = + ipa3_usb_teth_prot_to_string(i); + } else if (ipa3_usb_ctx->teth_prot_ctx[i].state == + IPA_USB_TETH_PROT_CONNECTED) { + switch (i) { + case IPA_USB_RMNET: + case IPA_USB_MBIM: + status->teth_connected_prot = + ipa3_usb_teth_bridge_prot_to_string(i); + break; + case IPA_USB_DIAG: + status->dpl_connected_prot = + ipa3_usb_teth_prot_to_string(i); + break; + default: + status->teth_connected_prot = + ipa3_usb_teth_prot_to_string(i); + } + } + } + + res = 0; + IPA_USB_DBG_LOW("exit\n"); +bail: + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return res; +} + +static ssize_t ipa3_read_usb_state_info(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ipa3_usb_status_dbg_info status; + int result; + int nbytes; + int cnt = 0; + int i; + + result = ipa3_usb_get_status_dbg_info(&status); + if (result) { + nbytes = scnprintf(dbg_buff, IPA_USB_MAX_MSG_LEN, + "Fail to read IPA USB status\n"); + cnt += nbytes; + } else { + nbytes = scnprintf(dbg_buff, IPA_USB_MAX_MSG_LEN, + "Tethering Data State: %s\n" + "DPL State: %s\n" + "Protocols in Initialized State: ", + status.teth_state, + status.dpl_state); + cnt += nbytes; + + for (i = 0 ; i < status.num_init_prot ; i++) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_USB_MAX_MSG_LEN - cnt, + "%s ", status.inited_prots[i]); + cnt += nbytes; + } + nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt, + status.num_init_prot ? "\n" : "None\n"); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt, + "Protocols in Connected State: "); + cnt += nbytes; + if (status.teth_connected_prot) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_USB_MAX_MSG_LEN - cnt, + "%s ", status.teth_connected_prot); + cnt += nbytes; + } + if (status.dpl_connected_prot) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_USB_MAX_MSG_LEN - cnt, + "%s ", status.dpl_connected_prot); + cnt += nbytes; + } + nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt, + (status.teth_connected_prot || + status.dpl_connected_prot) ? "\n" : "None\n"); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt, + "USB Tethering Consumer State: %s\n", + status.teth_cons_state ? + status.teth_cons_state : "Invalid"); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt, + "DPL Consumer State: %s\n", + status.dpl_cons_state ? status.dpl_cons_state : + "Invalid"); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +const struct file_operations ipa3_ipa_usb_ops = { + .read = ipa3_read_usb_state_info, +}; + +static void ipa_usb_debugfs_init(void) +{ + const mode_t read_only_mode = 0444; + + ipa3_usb_ctx->dent = debugfs_create_dir("ipa_usb", 0); + if (IS_ERR(ipa3_usb_ctx->dent)) { + pr_err("fail to create folder in debug_fs\n"); + return; + } + + ipa3_usb_ctx->dfile_state_info = debugfs_create_file("state_info", + read_only_mode, ipa3_usb_ctx->dent, 0, + &ipa3_ipa_usb_ops); + if (!ipa3_usb_ctx->dfile_state_info || + IS_ERR(ipa3_usb_ctx->dfile_state_info)) { + pr_err("failed to create file for state_info\n"); + goto fail; + } + + return; + +fail: + debugfs_remove_recursive(ipa3_usb_ctx->dent); + ipa3_usb_ctx->dent = NULL; +} + +static void ipa_usb_debugfs_remove(void) +{ + if (IS_ERR(ipa3_usb_ctx->dent)) { + IPA_USB_ERR("ipa_usb debugfs folder was not created\n"); + return; + } + + debugfs_remove_recursive(ipa3_usb_ctx->dent); +} +#else /* CONFIG_DEBUG_FS */ +static void ipa_usb_debugfs_init(void){} +static void ipa_usb_debugfs_remove(void){} +#endif /* CONFIG_DEBUG_FS */ + +static int ipa_usb_set_lock_unlock(bool is_lock) +{ + IPA_USB_DBG("entry\n"); + if (is_lock) + mutex_lock(&ipa3_usb_ctx->general_mutex); + else + mutex_unlock(&ipa3_usb_ctx->general_mutex); + IPA_USB_DBG("exit\n"); + + return 0; +} + + + +int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params, + struct ipa_usb_xdci_chan_params *dl_chan_params, + struct ipa_req_chan_out_params *ul_out_params, + struct ipa_req_chan_out_params *dl_out_params, + struct ipa_usb_xdci_connect_params *connect_params) +{ + int result = -EFAULT; + struct ipa_usb_xdci_connect_params_internal conn_params; + + mutex_lock(&ipa3_usb_ctx->general_mutex); + IPA_USB_DBG_LOW("entry\n"); + if (connect_params == NULL || dl_chan_params == NULL || + dl_out_params == NULL || + (connect_params->teth_prot != IPA_USB_DIAG && + (ul_chan_params == NULL || ul_out_params == NULL))) { + IPA_USB_ERR("bad parameters\n"); + result = -EINVAL; + goto bad_params; + } + + if (connect_params->teth_prot != IPA_USB_DIAG) { + result = ipa3_usb_request_xdci_channel(ul_chan_params, + IPA_USB_DIR_UL, ul_out_params); + if (result) { + IPA_USB_ERR("failed to allocate UL channel\n"); + goto bad_params; + } + } + + result = ipa3_usb_request_xdci_channel(dl_chan_params, IPA_USB_DIR_DL, + dl_out_params); + if (result) { + IPA_USB_ERR("failed to allocate DL/DPL channel\n"); + goto alloc_dl_chan_fail; + } + + memset(&conn_params, 0, + sizeof(struct ipa_usb_xdci_connect_params_internal)); + conn_params.max_pkt_size = connect_params->max_pkt_size; + conn_params.ipa_to_usb_clnt_hdl = dl_out_params->clnt_hdl; + conn_params.ipa_to_usb_xferrscidx = + connect_params->ipa_to_usb_xferrscidx; + conn_params.ipa_to_usb_xferrscidx_valid = + connect_params->ipa_to_usb_xferrscidx_valid; + if (connect_params->teth_prot != IPA_USB_DIAG) { + conn_params.usb_to_ipa_clnt_hdl = ul_out_params->clnt_hdl; + conn_params.usb_to_ipa_xferrscidx = + connect_params->usb_to_ipa_xferrscidx; + conn_params.usb_to_ipa_xferrscidx_valid = + connect_params->usb_to_ipa_xferrscidx_valid; + } + conn_params.teth_prot = connect_params->teth_prot; + conn_params.teth_prot_params = connect_params->teth_prot_params; + conn_params.max_supported_bandwidth_mbps = + connect_params->max_supported_bandwidth_mbps; + result = ipa3_usb_xdci_connect_internal(&conn_params); + if (result) { + IPA_USB_ERR("failed to connect\n"); + goto connect_fail; + } + + /* + * Register for xdci lock/unlock callback with ipa core driver. + * As per use case, only register for IPA_CONS end point for now. + * If needed we can include the same for IPA_PROD ep. + * For IPA_USB_DIAG/DPL config there will not be any UL ep. + */ + if (connect_params->teth_prot != IPA_USB_DIAG) + ipa3_register_lock_unlock_callback(&ipa_usb_set_lock_unlock, + ul_out_params->clnt_hdl); + + IPA_USB_DBG_LOW("exit\n"); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return 0; + +connect_fail: + ipa3_usb_release_xdci_channel(dl_out_params->clnt_hdl, IPA_USB_DIR_DL, + IPA3_USB_GET_TTYPE(dl_chan_params->teth_prot)); +alloc_dl_chan_fail: + if (connect_params->teth_prot != IPA_USB_DIAG) + ipa3_usb_release_xdci_channel(ul_out_params->clnt_hdl, + IPA_USB_DIR_UL, + IPA3_USB_GET_TTYPE(ul_chan_params->teth_prot)); +bad_params: + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; +} +EXPORT_SYMBOL(ipa_usb_xdci_connect); + +static int ipa3_usb_check_disconnect_prot(enum ipa_usb_teth_prot teth_prot) +{ + if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) { + IPA_USB_ERR("bad parameter\n"); + return -EFAULT; + } + + if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state != + IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_ERR("%s is not connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + return -EFAULT; + } + + return 0; +} + +/* Assumes lock already acquired */ +static int ipa_usb_xdci_dismiss_channels(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + int result = 0; + enum ipa3_usb_transport_type ttype; + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + IPA_USB_DBG_LOW("entry\n"); + + /* Reset DL channel */ + result = ipa3_reset_gsi_channel(dl_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to reset DL channel\n"); + return result; + } + + /* Reset DL event ring */ + result = ipa3_reset_gsi_event_ring(dl_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to reset DL event ring\n"); + return result; + } + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + ipa3_xdci_ep_delay_rm(ul_clnt_hdl); /* Remove ep_delay if set */ + /* Reset UL channel */ + result = ipa3_reset_gsi_channel(ul_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to reset UL channel\n"); + return result; + } + + /* Reset UL event ring */ + result = ipa3_reset_gsi_event_ring(ul_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to reset UL event ring\n"); + return result; + } + } + + /* + * Deregister for xdci lock/unlock callback from ipa core driver. + * As per use case, only deregister for IPA_CONS end point for now. + * If needed we can include the same for IPA_PROD ep. + * For IPA_USB_DIAG/DPL config there will not be any UL config. + */ + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) + ipa3_deregister_lock_unlock_callback(ul_clnt_hdl); + + /* Change state to STOPPED */ + if (!ipa3_usb_set_state(IPA_USB_STOPPED, false, ttype)) + IPA_USB_ERR("failed to change state to stopped\n"); + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + result = ipa3_usb_release_xdci_channel(ul_clnt_hdl, + IPA_USB_DIR_UL, ttype); + if (result) { + IPA_USB_ERR("failed to release UL channel\n"); + return result; + } + } + + result = ipa3_usb_release_xdci_channel(dl_clnt_hdl, + IPA_USB_DIR_DL, ttype); + if (result) { + IPA_USB_ERR("failed to release DL channel\n"); + return result; + } + + IPA_USB_DBG_LOW("exit\n"); + + return 0; +} + +int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + int result = 0; + struct ipa_ep_cfg_holb holb_cfg; + unsigned long flags; + enum ipa3_usb_state orig_state; + enum ipa3_usb_transport_type ttype; + + mutex_lock(&ipa3_usb_ctx->general_mutex); + IPA_USB_DBG_LOW("entry\n"); + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_DISCONNECT, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + result = -EPERM; + goto bad_params; + } + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + if (ipa3_usb_ctx->ttype_ctx[ttype].state == + IPA_USB_SUSPENDED_NO_RWAKEUP) { + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + result = ipa_usb_xdci_dismiss_channels(ul_clnt_hdl, dl_clnt_hdl, + teth_prot); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; + } + + if (ipa3_usb_check_disconnect_prot(teth_prot)) { + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + result = -EINVAL; + goto bad_params; + } + + if (ipa3_usb_ctx->ttype_ctx[ttype].state != IPA_USB_SUSPENDED) { + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + /* Stop DL/DPL channel */ + result = ipa3_xdci_disconnect(dl_clnt_hdl, false, -1); + if (result) { + IPA_USB_ERR("failed to disconnect DL/DPL channel\n"); + goto bad_params; + } + } else { + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_EN; + holb_cfg.tmr_val = 0; + ipa3_cfg_ep_holb(dl_clnt_hdl, &holb_cfg); + } + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + orig_state = ipa3_usb_ctx->ttype_ctx[ttype].state; + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + if (orig_state != IPA_USB_SUSPENDED) { + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, + flags); + /* Stop UL channel */ + result = ipa3_xdci_disconnect(ul_clnt_hdl, + true, + ipa3_usb_ctx->qmi_req_id); + if (result) { + IPA_USB_ERR("failed disconnect UL channel\n"); + goto bad_params; + } + ipa3_usb_ctx->qmi_req_id++; + } else + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, + flags); + } else + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + + result = ipa_usb_xdci_dismiss_channels(ul_clnt_hdl, dl_clnt_hdl, + teth_prot); + if (result) + goto bad_params; + + /* Disconnect tethering protocol */ + result = ipa3_usb_disconnect_teth_prot(teth_prot); + if (result) + goto bad_params; + + if (orig_state != IPA_USB_SUSPENDED) { + if (ipa_pm_is_used()) + result = ipa_pm_deactivate_sync( + ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl); + else + result = ipa3_usb_release_prod(ttype); + if (result) { + IPA_USB_ERR("failed to release PROD\n"); + goto bad_params; + } + } + + IPA_USB_DBG_LOW("exit\n"); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return 0; + +bad_params: + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; + +} +EXPORT_SYMBOL(ipa_usb_xdci_disconnect); + +int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot) +{ + int result = -EFAULT; + enum ipa3_usb_transport_type ttype; + struct ipa3_usb_teth_prot_context *teth_prot_ptr; + + mutex_lock(&ipa3_usb_ctx->general_mutex); + IPA_USB_DBG_LOW("entry\n"); + if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) { + IPA_USB_ERR("bad parameters\n"); + result = -EINVAL; + goto bad_params; + } + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_DEINIT_TETH_PROT, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + result = -EPERM; + goto bad_params; + } + + /* Clean-up tethering protocol */ + teth_prot_ptr = &ipa3_usb_ctx->teth_prot_ctx[teth_prot]; + + switch (teth_prot) { + case IPA_USB_RNDIS: + case IPA_USB_ECM: + if (teth_prot_ptr->state != + IPA_USB_TETH_PROT_INITIALIZED) { + IPA_USB_ERR("%s is not initialized\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + result = -EINVAL; + goto bad_params; + } + if (teth_prot == IPA_USB_RNDIS) + rndis_ipa_cleanup( + teth_prot_ptr->teth_prot_params.rndis.private); + else + ecm_ipa_cleanup( + teth_prot_ptr->teth_prot_params.ecm.private); + teth_prot_ptr->user_data = NULL; + teth_prot_ptr->state = IPA_USB_TETH_PROT_INVALID; + ipa3_usb_ctx->num_init_prot--; + IPA_USB_DBG("deinitialized %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + case IPA_USB_RMNET: + case IPA_USB_MBIM: + if (teth_prot_ptr->state != + IPA_USB_TETH_PROT_INITIALIZED) { + IPA_USB_ERR("%s (%s) is not initialized\n", + ipa3_usb_teth_prot_to_string(teth_prot), + ipa3_usb_teth_bridge_prot_to_string(teth_prot)); + result = -EINVAL; + goto bad_params; + } + + teth_prot_ptr->user_data = NULL; + teth_prot_ptr->state = IPA_USB_TETH_PROT_INVALID; + ipa3_usb_ctx->num_init_prot--; + IPA_USB_DBG("deinitialized %s (%s)\n", + ipa3_usb_teth_prot_to_string(teth_prot), + ipa3_usb_teth_bridge_prot_to_string(teth_prot)); + break; + case IPA_USB_DIAG: + if (teth_prot_ptr->state != + IPA_USB_TETH_PROT_INITIALIZED) { + IPA_USB_ERR("%s is not initialized\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + result = -EINVAL; + goto bad_params; + } + teth_prot_ptr->user_data = NULL; + teth_prot_ptr->state = IPA_USB_TETH_PROT_INVALID; + IPA_USB_DBG("deinitialized %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + default: + IPA_USB_ERR("unexpected tethering protocol\n"); + result = -EINVAL; + goto bad_params; + } + + if (IPA3_USB_IS_TTYPE_DPL(ttype) || + (ipa3_usb_ctx->num_init_prot == 0)) { + if (!ipa3_usb_set_state(IPA_USB_INVALID, false, ttype)) + IPA_USB_ERR( + "failed to change state to invalid\n"); + if (ipa_pm_is_used()) { + ipa3_usb_deregister_pm(ttype); + ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL; + } else { + ipa_rm_delete_resource( + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name); + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid = + false; + ipa_rm_delete_resource( + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name); + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid = + false; + ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL; + } + } + + IPA_USB_DBG_LOW("exit\n"); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return 0; + +bad_params: + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; +} +EXPORT_SYMBOL(ipa_usb_deinit_teth_prot); + +/* Assumes lock already acquired */ +static int ipa3_usb_suspend_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + int result = 0; + enum ipa3_usb_transport_type ttype; + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_SUSPEND_NO_RWAKEUP, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + result = -EPERM; + goto fail_exit; + } + + IPA_USB_DBG("Start suspend with no remote wakeup sequence: %s\n", + IPA3_USB_IS_TTYPE_DPL(ttype) ? + "DPL channel":"Data Tethering channels"); + + if (ipa3_usb_check_disconnect_prot(teth_prot)) { + result = -EINVAL; + goto fail_exit; + } + + /* Stop DL/DPL channel */ + result = ipa3_xdci_disconnect(dl_clnt_hdl, false, -1); + if (result) { + IPA_USB_ERR("failed to disconnect DL/DPL channel\n"); + goto fail_exit; + } + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + /* Stop UL channel */ + result = ipa3_xdci_disconnect(ul_clnt_hdl, true, + ipa3_usb_ctx->qmi_req_id); + if (result) { + IPA_USB_ERR("failed disconnect UL channel\n"); + goto start_dl; + } + ipa3_usb_ctx->qmi_req_id++; + } + + /* Disconnect tethering protocol */ + result = ipa3_usb_disconnect_teth_prot(teth_prot); + if (result) + goto start_ul; + + if (ipa_pm_is_used()) + result = ipa_pm_deactivate_sync( + ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl); + else + result = ipa3_usb_release_prod(ttype); + if (result) { + IPA_USB_ERR("failed to release PROD\n"); + goto connect_teth; + } + + /* Change ipa_usb state to SUSPENDED_NO_RWAKEUP */ + if (!ipa3_usb_set_state(IPA_USB_SUSPENDED_NO_RWAKEUP, false, ttype)) + IPA_USB_ERR("failed to change state to suspend no rwakeup\n"); + + IPA_USB_DBG_LOW("exit\n"); + return 0; + +connect_teth: + (void)ipa3_usb_connect_teth_prot(teth_prot); +start_ul: + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) + (void)ipa3_xdci_connect(ul_clnt_hdl); +start_dl: + (void)ipa3_xdci_connect(dl_clnt_hdl); +fail_exit: + return result; +} + +int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot, bool with_remote_wakeup) +{ + int result = 0; + unsigned long flags; + enum ipa3_usb_transport_type ttype; + + mutex_lock(&ipa3_usb_ctx->general_mutex); + IPA_USB_DBG_LOW("entry\n"); + + if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) { + IPA_USB_ERR("bad parameters\n"); + result = -EINVAL; + goto bad_params; + } + + if (!with_remote_wakeup) { + result = ipa3_usb_suspend_no_remote_wakeup(ul_clnt_hdl, + dl_clnt_hdl, teth_prot); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; + } + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_SUSPEND, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + result = -EPERM; + goto bad_params; + } + + IPA_USB_DBG("Start suspend sequence: %s\n", + IPA3_USB_IS_TTYPE_DPL(ttype) ? + "DPL channel":"Data Tethering channels"); + + /* Change state to SUSPEND_REQUESTED */ + if (!ipa3_usb_set_state(IPA_USB_SUSPEND_REQUESTED, false, ttype)) { + IPA_USB_ERR( + "fail changing state to suspend_req\n"); + result = -EFAULT; + goto bad_params; + } + + /* Stop UL channel & suspend DL/DPL EP */ + result = ipa3_xdci_suspend(ul_clnt_hdl, dl_clnt_hdl, + true, + ipa3_usb_ctx->qmi_req_id, IPA3_USB_IS_TTYPE_DPL(ttype)); + if (result) { + IPA_USB_ERR("failed to suspend\n"); + goto suspend_fail; + } + ipa3_usb_ctx->qmi_req_id++; + + if (ipa_pm_is_used()) + result = ipa_pm_deactivate_sync( + ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl); + else + result = ipa3_usb_release_prod(ttype); + if (result) { + IPA_USB_ERR("failed to release PROD\n"); + goto release_prod_fail; + } + + /* Check if DL/DPL data pending */ + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state == + IPA_USB_CONS_GRANTED && + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) { + + IPA_USB_DBG("DL/DPL data pending, invoke remote wakeup\n"); + queue_work(ipa3_usb_ctx->wq, + IPA3_USB_IS_TTYPE_DPL(ttype) ? + &ipa3_usb_dpl_notify_remote_wakeup_work : + &ipa3_usb_notify_remote_wakeup_work); + } + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + + /* Change state to SUSPENDED */ + if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false, ttype)) + IPA_USB_ERR("failed to change state to suspended\n"); + + /* Check if DL/DPL data pending */ + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) { + IPA_USB_DBG_LOW( + "DL/DPL data is pending, invoking remote wakeup\n"); + queue_work(ipa3_usb_ctx->wq, IPA3_USB_IS_TTYPE_DPL(ttype) ? + &ipa3_usb_dpl_notify_remote_wakeup_work : + &ipa3_usb_notify_remote_wakeup_work); + } + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + + IPA_USB_DBG_LOW("exit\n"); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return 0; + +release_prod_fail: + ipa3_xdci_resume(ul_clnt_hdl, dl_clnt_hdl, + IPA3_USB_IS_TTYPE_DPL(ttype)); +suspend_fail: + /* Change state back to CONNECTED */ + if (!ipa3_usb_set_state(IPA_USB_CONNECTED, true, ttype)) + IPA_USB_ERR("failed to change state back to connected\n"); +bad_params: + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; +} +EXPORT_SYMBOL(ipa_usb_xdci_suspend); + +/* Assumes lock already acquired */ +static int ipa3_usb_resume_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + int result = -EFAULT; + enum ipa3_usb_transport_type ttype; + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + IPA_USB_DBG("Start resume with no remote wakeup sequence: %s\n", + IPA3_USB_IS_TTYPE_DPL(ttype) ? + "DPL channel":"Data Tethering channels"); + + /* Request USB_PROD */ + if (ipa_pm_is_used()) + result = ipa_pm_activate_sync( + ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl); + else + result = ipa3_usb_request_prod(ttype); + if (result) + goto fail_exit; + + /* Connect tethering protocol */ + result = ipa3_usb_connect_teth_prot(teth_prot); + if (result) { + IPA_USB_ERR("failed to connect teth protocol\n"); + goto release_prod; + } + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + /* Start UL channel */ + result = ipa3_xdci_connect(ul_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to start UL channel\n"); + goto disconn_teth; + } + } + + /* Start DL/DPL channel */ + result = ipa3_xdci_connect(dl_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to start DL/DPL channel\n"); + goto stop_ul; + } + + /* Change state to CONNECTED */ + if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) { + IPA_USB_ERR("failed to change state to connected\n"); + result = -EFAULT; + goto stop_dl; + } + + return 0; + +stop_dl: + (void)ipa3_xdci_disconnect(dl_clnt_hdl, false, -1); +stop_ul: + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + (void)ipa3_xdci_disconnect(ul_clnt_hdl, true, + ipa3_usb_ctx->qmi_req_id); + ipa3_usb_ctx->qmi_req_id++; + } +disconn_teth: + (void)ipa3_usb_disconnect_teth_prot(teth_prot); +release_prod: + if (ipa_pm_is_used()) + (void)ipa_pm_deactivate_sync( + ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl); + else + (void)ipa3_usb_release_prod(ttype); +fail_exit: + return result; +} + +int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + int result = -EFAULT; + enum ipa3_usb_state prev_state; + unsigned long flags; + enum ipa3_usb_transport_type ttype; + + mutex_lock(&ipa3_usb_ctx->general_mutex); + IPA_USB_DBG_LOW("entry\n"); + + if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) { + IPA_USB_ERR("bad parameters\n"); + result = -EINVAL; + goto bad_params; + } + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_RESUME, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + result = -EPERM; + goto bad_params; + } + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + prev_state = ipa3_usb_ctx->ttype_ctx[ttype].state; + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + if (prev_state == IPA_USB_SUSPENDED_NO_RWAKEUP) { + result = ipa3_usb_resume_no_remote_wakeup(ul_clnt_hdl, + dl_clnt_hdl, teth_prot); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; + } + + IPA_USB_DBG("Start resume sequence: %s\n", + IPA3_USB_IS_TTYPE_DPL(ttype) ? + "DPL channel" : "Data Tethering channels"); + + /* Change state to RESUME_IN_PROGRESS */ + if (!ipa3_usb_set_state(IPA_USB_RESUME_IN_PROGRESS, false, ttype)) { + IPA_USB_ERR("failed to change state to resume_in_progress\n"); + result = -EFAULT; + goto bad_params; + } + + /* Request USB_PROD */ + if (ipa_pm_is_used()) + result = ipa_pm_activate_sync( + ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl); + else + result = ipa3_usb_request_prod(ttype); + if (result) + goto prod_req_fail; + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + /* Start UL channel */ + result = ipa3_start_gsi_channel(ul_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to start UL channel\n"); + goto start_ul_fail; + } + } + + /* Start DL/DPL channel */ + result = ipa3_start_gsi_channel(dl_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to start DL/DPL channel\n"); + goto start_dl_fail; + } + + /* Change state to CONNECTED */ + if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) { + IPA_USB_ERR("failed to change state to connected\n"); + result = -EFAULT; + goto state_change_connected_fail; + } + + IPA_USB_DBG_LOW("exit\n"); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return 0; + +state_change_connected_fail: + result = ipa3_stop_gsi_channel(dl_clnt_hdl); + if (result) + IPA_USB_ERR("Error stopping DL/DPL channel: %d\n", + result); +start_dl_fail: + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + result = ipa3_stop_gsi_channel(ul_clnt_hdl); + if (result) + IPA_USB_ERR("Error stopping UL channel: %d\n", result); + } +start_ul_fail: + if (ipa_pm_is_used()) + ipa_pm_deactivate_sync( + ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl); + else + ipa3_usb_release_prod(ttype); +prod_req_fail: + /* Change state back to prev_state */ + if (!ipa3_usb_set_state(prev_state, true, ttype)) + IPA_USB_ERR("failed to change state back to %s\n", + ipa3_usb_state_to_string(prev_state)); +bad_params: + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; +} +EXPORT_SYMBOL(ipa_usb_xdci_resume); + +static int __init ipa3_usb_init(void) +{ + int i; + unsigned long flags; + int res; + struct ipa3_usb_pm_context *pm_ctx; + + pr_debug("entry\n"); + ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL); + if (ipa3_usb_ctx == NULL) { + pr_err(":ipa_usb init failed\n"); + return -ENOMEM; + } + memset(ipa3_usb_ctx, 0, sizeof(struct ipa3_usb_context)); + + for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++) + ipa3_usb_ctx->teth_prot_ctx[i].state = + IPA_USB_TETH_PROT_INVALID; + ipa3_usb_ctx->num_init_prot = 0; + init_completion(&ipa3_usb_ctx->dev_ready_comp); + ipa3_usb_ctx->qmi_req_id = 0; + spin_lock_init(&ipa3_usb_ctx->state_lock); + ipa3_usb_ctx->dl_data_pending = false; + mutex_init(&ipa3_usb_ctx->general_mutex); + + /* init PM related members */ + pm_ctx = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].pm_ctx; + pm_ctx->hdl = ~0; + pm_ctx->remote_wakeup_work = &ipa3_usb_notify_remote_wakeup_work; + pm_ctx = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].pm_ctx; + pm_ctx->hdl = ~0; + pm_ctx->remote_wakeup_work = &ipa3_usb_dpl_notify_remote_wakeup_work; + + for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) { + ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_valid = false; + ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_valid = false; + init_completion(&ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_comp); + ipa3_usb_ctx->ttype_ctx[i].user_data = NULL; + } + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) { + ipa3_usb_ctx->ttype_ctx[i].state = IPA_USB_INVALID; + ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_state = + IPA_USB_CONS_RELEASED; + } + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + + ipa3_usb_ctx->wq = create_singlethread_workqueue("ipa_usb_wq"); + if (!ipa3_usb_ctx->wq) { + pr_err("failed to create workqueue\n"); + res = -EFAULT; + goto ipa_usb_workqueue_fail; + } + + ipa_usb_debugfs_init(); + + pr_info("exit: IPA_USB init success!\n"); + + return 0; + +ipa_usb_workqueue_fail: + pr_err("init failed (%d)\n", -res); + kfree(ipa3_usb_ctx); + return res; +} + +static void ipa3_usb_exit(void) +{ + IPA_USB_DBG_LOW("IPA_USB exit\n"); + ipa_usb_debugfs_remove(); + kfree(ipa3_usb_ctx); +} + +arch_initcall(ipa3_usb_init); +module_exit(ipa3_usb_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA USB client driver"); diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c new file mode 100644 index 000000000000..69c7f605ae60 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c @@ -0,0 +1,871 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include "../ipa_common_i.h" +#include "../ipa_v3/ipa_pm.h" + +#define OFFLOAD_DRV_NAME "ipa_wdi" +#define IPA_WDI_DBG(fmt, args...) \ + do { \ + pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_WDI_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_WDI_ERR(fmt, args...) \ + do { \ + pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +struct ipa_wdi_intf_info { + char netdev_name[IPA_RESOURCE_NAME_MAX]; + u8 hdr_len; + u32 partial_hdr_hdl[IPA_IP_MAX]; + struct list_head link; +}; + +struct ipa_wdi_context { + struct list_head head_intf_list; + struct completion wdi_completion; + struct mutex lock; + enum ipa_wdi_version wdi_version; + u8 is_smmu_enabled; + u32 tx_pipe_hdl; + u32 rx_pipe_hdl; + u8 num_sys_pipe_needed; + u32 sys_pipe_hdl[IPA_WDI_MAX_SUPPORTED_SYS_PIPE]; + u32 ipa_pm_hdl; +#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN + ipa_wdi_meter_notifier_cb wdi_notify; +#endif +}; + +static struct ipa_wdi_context *ipa_wdi_ctx; + +int ipa_wdi_init(struct ipa_wdi_init_in_params *in, + struct ipa_wdi_init_out_params *out) +{ + struct ipa_wdi_uc_ready_params uc_ready_params; + struct ipa_smmu_in_params smmu_in; + struct ipa_smmu_out_params smmu_out; + + if (ipa_wdi_ctx) { + IPA_WDI_ERR("ipa_wdi_ctx was initialized before\n"); + return -EFAULT; + } + + if (in->wdi_version > IPA_WDI_3 || in->wdi_version < IPA_WDI_1) { + IPA_WDI_ERR("wrong wdi version: %d\n", in->wdi_version); + return -EFAULT; + } + + ipa_wdi_ctx = kzalloc(sizeof(*ipa_wdi_ctx), GFP_KERNEL); + if (ipa_wdi_ctx == NULL) { + IPA_WDI_ERR("fail to alloc wdi ctx\n"); + return -ENOMEM; + } + mutex_init(&ipa_wdi_ctx->lock); + init_completion(&ipa_wdi_ctx->wdi_completion); + INIT_LIST_HEAD(&ipa_wdi_ctx->head_intf_list); + + ipa_wdi_ctx->wdi_version = in->wdi_version; + uc_ready_params.notify = in->notify; + uc_ready_params.priv = in->priv; +#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN + ipa_wdi_ctx->wdi_notify = in->wdi_notify; +#endif + + if (ipa_uc_reg_rdyCB(&uc_ready_params) != 0) { + mutex_destroy(&ipa_wdi_ctx->lock); + kfree(ipa_wdi_ctx); + ipa_wdi_ctx = NULL; + return -EFAULT; + } + + out->is_uC_ready = uc_ready_params.is_uC_ready; + + smmu_in.smmu_client = IPA_SMMU_WLAN_CLIENT; + if (ipa_get_smmu_params(&smmu_in, &smmu_out)) + out->is_smmu_enabled = false; + else + out->is_smmu_enabled = smmu_out.smmu_enable; + + ipa_wdi_ctx->is_smmu_enabled = out->is_smmu_enabled; + + return 0; +} +EXPORT_SYMBOL(ipa_wdi_init); + +int ipa_wdi_cleanup(void) +{ + struct ipa_wdi_intf_info *entry; + struct ipa_wdi_intf_info *next; + + /* clear interface list */ + list_for_each_entry_safe(entry, next, + &ipa_wdi_ctx->head_intf_list, link) { + list_del(&entry->link); + kfree(entry); + } + mutex_destroy(&ipa_wdi_ctx->lock); + kfree(ipa_wdi_ctx); + ipa_wdi_ctx = NULL; + return 0; +} +EXPORT_SYMBOL(ipa_wdi_cleanup); + +static int ipa_wdi_commit_partial_hdr( + struct ipa_ioc_add_hdr *hdr, + const char *netdev_name, + struct ipa_wdi_hdr_info *hdr_info) +{ + int i; + + if (!hdr || !hdr_info || !netdev_name) { + IPA_WDI_ERR("Invalid input\n"); + return -EINVAL; + } + + hdr->commit = 1; + hdr->num_hdrs = 2; + + snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name), + "%s_ipv4", netdev_name); + snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name), + "%s_ipv6", netdev_name); + for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) { + hdr->hdr[i].hdr_len = hdr_info[i].hdr_len; + memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len); + hdr->hdr[i].type = hdr_info[i].hdr_type; + hdr->hdr[i].is_partial = 1; + hdr->hdr[i].is_eth2_ofst_valid = 1; + hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset; + } + + if (ipa_add_hdr(hdr)) { + IPA_WDI_ERR("fail to add partial headers\n"); + return -EFAULT; + } + + return 0; +} + +int ipa_wdi_reg_intf(struct ipa_wdi_reg_intf_in_params *in) +{ + struct ipa_ioc_add_hdr *hdr; + struct ipa_wdi_intf_info *new_intf; + struct ipa_wdi_intf_info *entry; + struct ipa_tx_intf tx; + struct ipa_rx_intf rx; + struct ipa_ioc_tx_intf_prop tx_prop[2]; + struct ipa_ioc_rx_intf_prop rx_prop[2]; + u32 len; + int ret = 0; + + if (in == NULL) { + IPA_WDI_ERR("invalid params in=%pK\n", in); + return -EINVAL; + } + + if (!ipa_wdi_ctx) { + IPA_WDI_ERR("wdi ctx is not initialized\n"); + return -EPERM; + } + + IPA_WDI_DBG("register interface for netdev %s\n", + in->netdev_name); + + mutex_lock(&ipa_wdi_ctx->lock); + list_for_each_entry(entry, &ipa_wdi_ctx->head_intf_list, link) + if (strcmp(entry->netdev_name, in->netdev_name) == 0) { + IPA_WDI_DBG("intf was added before.\n"); + mutex_unlock(&ipa_wdi_ctx->lock); + return 0; + } + + IPA_WDI_DBG("intf was not added before, proceed.\n"); + new_intf = kzalloc(sizeof(*new_intf), GFP_KERNEL); + if (new_intf == NULL) { + IPA_WDI_ERR("fail to alloc new intf\n"); + mutex_unlock(&ipa_wdi_ctx->lock); + return -ENOMEM; + } + + INIT_LIST_HEAD(&new_intf->link); + strlcpy(new_intf->netdev_name, in->netdev_name, + sizeof(new_intf->netdev_name)); + new_intf->hdr_len = in->hdr_info[0].hdr_len; + + /* add partial header */ + len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add); + hdr = kzalloc(len, GFP_KERNEL); + if (hdr == NULL) { + IPA_WDI_ERR("fail to alloc %d bytes\n", len); + ret = -EFAULT; + goto fail_alloc_hdr; + } + + if (ipa_wdi_commit_partial_hdr(hdr, in->netdev_name, in->hdr_info)) { + IPA_WDI_ERR("fail to commit partial headers\n"); + ret = -EFAULT; + goto fail_commit_hdr; + } + + new_intf->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl; + new_intf->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl; + IPA_WDI_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n", + hdr->hdr[IPA_IP_v4].hdr_hdl, hdr->hdr[IPA_IP_v6].hdr_hdl); + + /* populate tx prop */ + tx.num_props = 2; + tx.prop = tx_prop; + + memset(tx_prop, 0, sizeof(tx_prop)); + tx_prop[0].ip = IPA_IP_v4; + tx_prop[0].dst_pipe = IPA_CLIENT_WLAN1_CONS; + tx_prop[0].alt_dst_pipe = in->alt_dst_pipe; + tx_prop[0].hdr_l2_type = in->hdr_info[0].hdr_type; + strlcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name, + sizeof(tx_prop[0].hdr_name)); + + tx_prop[1].ip = IPA_IP_v6; + tx_prop[1].dst_pipe = IPA_CLIENT_WLAN1_CONS; + tx_prop[1].alt_dst_pipe = in->alt_dst_pipe; + tx_prop[1].hdr_l2_type = in->hdr_info[1].hdr_type; + strlcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name, + sizeof(tx_prop[1].hdr_name)); + + /* populate rx prop */ + rx.num_props = 2; + rx.prop = rx_prop; + + memset(rx_prop, 0, sizeof(rx_prop)); + rx_prop[0].ip = IPA_IP_v4; + rx_prop[0].src_pipe = IPA_CLIENT_WLAN1_PROD; + rx_prop[0].hdr_l2_type = in->hdr_info[0].hdr_type; + if (in->is_meta_data_valid) { + rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_prop[0].attrib.meta_data = in->meta_data; + rx_prop[0].attrib.meta_data_mask = in->meta_data_mask; + } + + rx_prop[1].ip = IPA_IP_v6; + rx_prop[1].src_pipe = IPA_CLIENT_WLAN1_PROD; + rx_prop[1].hdr_l2_type = in->hdr_info[1].hdr_type; + if (in->is_meta_data_valid) { + rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_prop[1].attrib.meta_data = in->meta_data; + rx_prop[1].attrib.meta_data_mask = in->meta_data_mask; + } + + if (ipa_register_intf(in->netdev_name, &tx, &rx)) { + IPA_WDI_ERR("fail to add interface prop\n"); + ret = -EFAULT; + goto fail_commit_hdr; + } + + list_add(&new_intf->link, &ipa_wdi_ctx->head_intf_list); + init_completion(&ipa_wdi_ctx->wdi_completion); + + kfree(hdr); + mutex_unlock(&ipa_wdi_ctx->lock); + return 0; + +fail_commit_hdr: + kfree(hdr); +fail_alloc_hdr: + kfree(new_intf); + mutex_unlock(&ipa_wdi_ctx->lock); + return ret; +} +EXPORT_SYMBOL(ipa_wdi_reg_intf); + +int ipa_wdi_dereg_intf(const char *netdev_name) +{ + int len, ret = 0; + struct ipa_ioc_del_hdr *hdr = NULL; + struct ipa_wdi_intf_info *entry; + struct ipa_wdi_intf_info *next; + + if (!netdev_name) { + IPA_WDI_ERR("no netdev name.\n"); + return -EINVAL; + } + + if (!ipa_wdi_ctx) { + IPA_WDI_ERR("wdi ctx is not initialized.\n"); + return -EPERM; + } + + mutex_lock(&ipa_wdi_ctx->lock); + list_for_each_entry_safe(entry, next, &ipa_wdi_ctx->head_intf_list, + link) + if (strcmp(entry->netdev_name, netdev_name) == 0) { + len = sizeof(struct ipa_ioc_del_hdr) + + 2 * sizeof(struct ipa_hdr_del); + hdr = kzalloc(len, GFP_KERNEL); + if (hdr == NULL) { + IPA_WDI_ERR("fail to alloc %d bytes\n", len); + mutex_unlock(&ipa_wdi_ctx->lock); + return -ENOMEM; + } + + hdr->commit = 1; + hdr->num_hdls = 2; + hdr->hdl[0].hdl = entry->partial_hdr_hdl[0]; + hdr->hdl[1].hdl = entry->partial_hdr_hdl[1]; + IPA_WDI_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n", + hdr->hdl[0].hdl, hdr->hdl[1].hdl); + + if (ipa_del_hdr(hdr)) { + IPA_WDI_ERR("fail to delete partial header\n"); + ret = -EFAULT; + goto fail; + } + + if (ipa_deregister_intf(entry->netdev_name)) { + IPA_WDI_ERR("fail to del interface props\n"); + ret = -EFAULT; + goto fail; + } + + list_del(&entry->link); + kfree(entry); + + break; + } + +fail: + kfree(hdr); + mutex_unlock(&ipa_wdi_ctx->lock); + return ret; +} +EXPORT_SYMBOL(ipa_wdi_dereg_intf); + +static void ipa_wdi_rm_notify(void *user_data, enum ipa_rm_event event, + unsigned long data) +{ + if (!ipa_wdi_ctx) { + IPA_WDI_ERR("Invalid context\n"); + return; + } + + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + complete_all(&ipa_wdi_ctx->wdi_completion); + break; + + case IPA_RM_RESOURCE_RELEASED: + break; + + default: + IPA_WDI_ERR("Invalid RM Evt: %d", event); + break; + } +} + +static int ipa_wdi_cons_release(void) +{ + return 0; +} + +static int ipa_wdi_cons_request(void) +{ + int ret = 0; + + if (!ipa_wdi_ctx) { + IPA_WDI_ERR("wdi ctx is not initialized\n"); + ret = -EFAULT; + } + + return ret; +} + +static void ipa_wdi_pm_cb(void *p, enum ipa_pm_cb_event event) +{ + IPA_WDI_DBG("received pm event %d\n", event); +} + +int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in, + struct ipa_wdi_conn_out_params *out) +{ + int i, j, ret = 0; + struct ipa_rm_create_params param; + struct ipa_pm_register_params pm_params; + struct ipa_wdi_in_params in_tx; + struct ipa_wdi_in_params in_rx; + struct ipa_wdi_out_params out_tx; + struct ipa_wdi_out_params out_rx; + + if (!(in && out)) { + IPA_WDI_ERR("empty parameters. in=%pK out=%pK\n", in, out); + return -EINVAL; + } + + if (!ipa_wdi_ctx) { + IPA_WDI_ERR("wdi ctx is not initialized\n"); + return -EPERM; + } + + if (in->num_sys_pipe_needed > IPA_WDI_MAX_SUPPORTED_SYS_PIPE) { + IPA_WDI_ERR("ipa can only support up to %d sys pipe\n", + IPA_WDI_MAX_SUPPORTED_SYS_PIPE); + return -EINVAL; + } + ipa_wdi_ctx->num_sys_pipe_needed = in->num_sys_pipe_needed; + IPA_WDI_DBG("number of sys pipe %d\n", in->num_sys_pipe_needed); + + /* setup sys pipe when needed */ + for (i = 0; i < ipa_wdi_ctx->num_sys_pipe_needed; i++) { + ret = ipa_setup_sys_pipe(&in->sys_in[i], + &ipa_wdi_ctx->sys_pipe_hdl[i]); + if (ret) { + IPA_WDI_ERR("fail to setup sys pipe %d\n", i); + ret = -EFAULT; + goto fail_setup_sys_pipe; + } + } + + if (!ipa_pm_is_used()) { + memset(¶m, 0, sizeof(param)); + param.name = IPA_RM_RESOURCE_WLAN_PROD; + param.reg_params.user_data = ipa_wdi_ctx; + param.reg_params.notify_cb = ipa_wdi_rm_notify; + param.floor_voltage = IPA_VOLTAGE_SVS; + ret = ipa_rm_create_resource(¶m); + if (ret) { + IPA_WDI_ERR("fail to create WLAN_PROD resource\n"); + ret = -EFAULT; + goto fail_setup_sys_pipe; + } + + memset(¶m, 0, sizeof(param)); + param.name = IPA_RM_RESOURCE_WLAN_CONS; + param.request_resource = ipa_wdi_cons_request; + param.release_resource = ipa_wdi_cons_release; + ret = ipa_rm_create_resource(¶m); + if (ret) { + IPA_WDI_ERR("fail to create WLAN_CONS resource\n"); + goto fail_create_rm_cons; + } + + if (ipa_rm_add_dependency(IPA_RM_RESOURCE_WLAN_PROD, + IPA_RM_RESOURCE_APPS_CONS)) { + IPA_WDI_ERR("fail to add rm dependency\n"); + ret = -EFAULT; + goto fail_add_dependency; + } + } else { + pm_params.name = "wdi"; + pm_params.callback = ipa_wdi_pm_cb; + pm_params.user_data = NULL; + pm_params.group = IPA_PM_GROUP_DEFAULT; + if (ipa_pm_register(&pm_params, &ipa_wdi_ctx->ipa_pm_hdl)) { + IPA_WDI_ERR("fail to register ipa pm\n"); + ret = -EFAULT; + goto fail_setup_sys_pipe; + } + } + + if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) { + if (ipa_conn_wdi_pipes(in, out, ipa_wdi_ctx->wdi_notify)) { + IPA_WDI_ERR("fail to setup wdi pipes\n"); + ret = -EFAULT; + goto fail_connect_pipe; + } + } else { + memset(&in_tx, 0, sizeof(in_tx)); + memset(&in_rx, 0, sizeof(in_rx)); + memset(&out_tx, 0, sizeof(out_tx)); + memset(&out_rx, 0, sizeof(out_rx)); +#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN + in_rx.wdi_notify = ipa_wdi_ctx->wdi_notify; +#endif + if (in->is_smmu_enabled == false) { + /* firsr setup rx pipe */ + in_rx.sys.ipa_ep_cfg = in->u_rx.rx.ipa_ep_cfg; + in_rx.sys.client = in->u_rx.rx.client; + in_rx.sys.notify = in->notify; + in_rx.sys.priv = in->priv; + in_rx.smmu_enabled = in->is_smmu_enabled; + in_rx.u.ul.rdy_ring_base_pa = + in->u_rx.rx.transfer_ring_base_pa; + in_rx.u.ul.rdy_ring_size = + in->u_rx.rx.transfer_ring_size; + in_rx.u.ul.rdy_ring_rp_pa = + in->u_rx.rx.transfer_ring_doorbell_pa; + in_rx.u.ul.rdy_comp_ring_base_pa = + in->u_rx.rx.event_ring_base_pa; + in_rx.u.ul.rdy_comp_ring_wp_pa = + in->u_rx.rx.event_ring_doorbell_pa; + in_rx.u.ul.rdy_comp_ring_size = + in->u_rx.rx.event_ring_size; + if (ipa_connect_wdi_pipe(&in_rx, &out_rx)) { + IPA_WDI_ERR("fail to setup rx pipe\n"); + ret = -EFAULT; + goto fail_connect_pipe; + } + ipa_wdi_ctx->rx_pipe_hdl = out_rx.clnt_hdl; + out->rx_uc_db_pa = out_rx.uc_door_bell_pa; + IPA_WDI_DBG("rx uc db pa: 0x%pad\n", &out->rx_uc_db_pa); + + /* then setup tx pipe */ + in_tx.sys.ipa_ep_cfg = in->u_tx.tx.ipa_ep_cfg; + in_tx.sys.client = in->u_tx.tx.client; + in_tx.smmu_enabled = in->is_smmu_enabled; + in_tx.u.dl.comp_ring_base_pa = + in->u_tx.tx.transfer_ring_base_pa; + in_tx.u.dl.comp_ring_size = + in->u_tx.tx.transfer_ring_size; + in_tx.u.dl.ce_ring_base_pa = + in->u_tx.tx.event_ring_base_pa; + in_tx.u.dl.ce_door_bell_pa = + in->u_tx.tx.event_ring_doorbell_pa; + in_tx.u.dl.ce_ring_size = + in->u_tx.tx.event_ring_size; + in_tx.u.dl.num_tx_buffers = + in->u_tx.tx.num_pkt_buffers; + if (ipa_connect_wdi_pipe(&in_tx, &out_tx)) { + IPA_WDI_ERR("fail to setup tx pipe\n"); + ret = -EFAULT; + goto fail; + } + ipa_wdi_ctx->tx_pipe_hdl = out_tx.clnt_hdl; + out->tx_uc_db_pa = out_tx.uc_door_bell_pa; + IPA_WDI_DBG("tx uc db pa: 0x%pad\n", &out->tx_uc_db_pa); + } else { /* smmu is enabled */ + /* firsr setup rx pipe */ + in_rx.sys.ipa_ep_cfg = in->u_rx.rx_smmu.ipa_ep_cfg; + in_rx.sys.client = in->u_rx.rx_smmu.client; + in_rx.sys.notify = in->notify; + in_rx.sys.priv = in->priv; + in_rx.smmu_enabled = in->is_smmu_enabled; + in_rx.u.ul_smmu.rdy_ring = + in->u_rx.rx_smmu.transfer_ring_base; + in_rx.u.ul_smmu.rdy_ring_size = + in->u_rx.rx_smmu.transfer_ring_size; + in_rx.u.ul_smmu.rdy_ring_rp_pa = + in->u_rx.rx_smmu.transfer_ring_doorbell_pa; + in_rx.u.ul_smmu.rdy_comp_ring = + in->u_rx.rx_smmu.event_ring_base; + in_rx.u.ul_smmu.rdy_comp_ring_wp_pa = + in->u_rx.rx_smmu.event_ring_doorbell_pa; + in_rx.u.ul_smmu.rdy_comp_ring_size = + in->u_rx.rx_smmu.event_ring_size; + if (ipa_connect_wdi_pipe(&in_rx, &out_rx)) { + IPA_WDI_ERR("fail to setup rx pipe\n"); + ret = -EFAULT; + goto fail_connect_pipe; + } + ipa_wdi_ctx->rx_pipe_hdl = out_rx.clnt_hdl; + out->rx_uc_db_pa = out_rx.uc_door_bell_pa; + IPA_WDI_DBG("rx uc db pa: 0x%pad\n", &out->rx_uc_db_pa); + + /* then setup tx pipe */ + in_tx.sys.ipa_ep_cfg = in->u_tx.tx_smmu.ipa_ep_cfg; + in_tx.sys.client = in->u_tx.tx_smmu.client; + in_tx.smmu_enabled = in->is_smmu_enabled; + in_tx.u.dl_smmu.comp_ring = + in->u_tx.tx_smmu.transfer_ring_base; + in_tx.u.dl_smmu.comp_ring_size = + in->u_tx.tx_smmu.transfer_ring_size; + in_tx.u.dl_smmu.ce_ring = + in->u_tx.tx_smmu.event_ring_base; + in_tx.u.dl_smmu.ce_door_bell_pa = + in->u_tx.tx_smmu.event_ring_doorbell_pa; + in_tx.u.dl_smmu.ce_ring_size = + in->u_tx.tx_smmu.event_ring_size; + in_tx.u.dl_smmu.num_tx_buffers = + in->u_tx.tx_smmu.num_pkt_buffers; + if (ipa_connect_wdi_pipe(&in_tx, &out_tx)) { + IPA_WDI_ERR("fail to setup tx pipe\n"); + ret = -EFAULT; + goto fail; + } + ipa_wdi_ctx->tx_pipe_hdl = out_tx.clnt_hdl; + out->tx_uc_db_pa = out_tx.uc_door_bell_pa; + IPA_WDI_DBG("tx uc db pa: 0x%pad\n", &out->tx_uc_db_pa); + } + } + + return 0; + +fail: + ipa_disconnect_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl); +fail_connect_pipe: + if (!ipa_pm_is_used()) + ipa_rm_delete_dependency(IPA_RM_RESOURCE_WLAN_PROD, + IPA_RM_RESOURCE_APPS_CONS); + else + ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl); +fail_add_dependency: + if (!ipa_pm_is_used()) + ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS); +fail_create_rm_cons: + if (!ipa_pm_is_used()) + ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD); +fail_setup_sys_pipe: + for (j = 0; j < i; j++) + ipa_teardown_sys_pipe(ipa_wdi_ctx->sys_pipe_hdl[j]); + return ret; +} +EXPORT_SYMBOL(ipa_wdi_conn_pipes); + +int ipa_wdi_disconn_pipes(void) +{ + int i, ipa_ep_idx_rx, ipa_ep_idx_tx; + + if (!ipa_wdi_ctx) { + IPA_WDI_ERR("wdi ctx is not initialized\n"); + return -EPERM; + } + + /* tear down sys pipe if needed */ + for (i = 0; i < ipa_wdi_ctx->num_sys_pipe_needed; i++) { + if (ipa_teardown_sys_pipe(ipa_wdi_ctx->sys_pipe_hdl[i])) { + IPA_WDI_ERR("fail to tear down sys pipe %d\n", i); + return -EFAULT; + } + } + + ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD); + ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS); + + if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) { + if (ipa_disconn_wdi_pipes(ipa_ep_idx_rx, ipa_ep_idx_tx)) { + IPA_WDI_ERR("fail to tear down wdi pipes\n"); + return -EFAULT; + } + } else { + if (ipa_disconnect_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) { + IPA_WDI_ERR("fail to tear down wdi tx pipes\n"); + return -EFAULT; + } + if (ipa_disconnect_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) { + IPA_WDI_ERR("fail to tear down wdi rx pipes\n"); + return -EFAULT; + } + } + + if (!ipa_pm_is_used()) { + if (ipa_rm_delete_dependency(IPA_RM_RESOURCE_WLAN_PROD, + IPA_RM_RESOURCE_APPS_CONS)) { + IPA_WDI_ERR("fail to delete rm dependency\n"); + return -EFAULT; + } + + if (ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD)) { + IPA_WDI_ERR("fail to delete WLAN_PROD resource\n"); + return -EFAULT; + } + + if (ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS)) { + IPA_WDI_ERR("fail to delete WLAN_CONS resource\n"); + return -EFAULT; + } + } else { + if (ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl)) { + IPA_WDI_ERR("fail to deregister ipa pm\n"); + return -EFAULT; + } + } + + return 0; +} +EXPORT_SYMBOL(ipa_wdi_disconn_pipes); + +int ipa_wdi_enable_pipes(void) +{ + int ret; + int ipa_ep_idx_tx, ipa_ep_idx_rx; + + if (!ipa_wdi_ctx) { + IPA_WDI_ERR("wdi ctx is not initialized.\n"); + return -EPERM; + } + + ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD); + ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS); + + if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) { + if (ipa_enable_wdi_pipes(ipa_ep_idx_tx, ipa_ep_idx_rx)) { + IPA_WDI_ERR("fail to enable wdi pipes\n"); + return -EFAULT; + } + } else { + if (ipa_enable_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) { + IPA_WDI_ERR("fail to enable wdi tx pipe\n"); + return -EFAULT; + } + if (ipa_resume_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) { + IPA_WDI_ERR("fail to resume wdi tx pipe\n"); + return -EFAULT; + } + if (ipa_enable_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) { + IPA_WDI_ERR("fail to enable wdi rx pipe\n"); + return -EFAULT; + } + if (ipa_resume_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) { + IPA_WDI_ERR("fail to resume wdi rx pipe\n"); + return -EFAULT; + } + } + + if (!ipa_pm_is_used()) { + ret = ipa_rm_request_resource(IPA_RM_RESOURCE_WLAN_PROD); + if (ret == -EINPROGRESS) { + if (wait_for_completion_timeout( + &ipa_wdi_ctx->wdi_completion, 10*HZ) == 0) { + IPA_WDI_ERR("WLAN_PROD res req time out\n"); + return -EFAULT; + } + } else if (ret != 0) { + IPA_WDI_ERR("fail to request resource\n"); + return -EFAULT; + } + } else { + ret = ipa_pm_activate_sync(ipa_wdi_ctx->ipa_pm_hdl); + if (ret) { + IPA_WDI_ERR("fail to activate ipa pm\n"); + return -EFAULT; + } + } + + return 0; +} +EXPORT_SYMBOL(ipa_wdi_enable_pipes); + +int ipa_wdi_disable_pipes(void) +{ + int ret; + int ipa_ep_idx_tx, ipa_ep_idx_rx; + + if (!ipa_wdi_ctx) { + IPA_WDI_ERR("wdi ctx is not initialized.\n"); + return -EPERM; + } + + ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD); + ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS); + + if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) { + if (ipa_disable_wdi_pipes(ipa_ep_idx_tx, ipa_ep_idx_rx)) { + IPA_WDI_ERR("fail to disable wdi pipes\n"); + return -EFAULT; + } + } else { + if (ipa_suspend_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) { + IPA_WDI_ERR("fail to suspend wdi tx pipe\n"); + return -EFAULT; + } + if (ipa_disable_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) { + IPA_WDI_ERR("fail to disable wdi tx pipe\n"); + return -EFAULT; + } + if (ipa_suspend_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) { + IPA_WDI_ERR("fail to suspend wdi rx pipe\n"); + return -EFAULT; + } + if (ipa_disable_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) { + IPA_WDI_ERR("fail to disable wdi rx pipe\n"); + return -EFAULT; + } + } + + if (!ipa_pm_is_used()) { + ret = ipa_rm_release_resource(IPA_RM_RESOURCE_WLAN_PROD); + if (ret != 0) { + IPA_WDI_ERR("fail to release resource\n"); + return -EFAULT; + } + } else { + ret = ipa_pm_deactivate_sync(ipa_wdi_ctx->ipa_pm_hdl); + if (ret) { + IPA_WDI_ERR("fail to deactivate ipa pm\n"); + return -EFAULT; + } + } + + return 0; +} +EXPORT_SYMBOL(ipa_wdi_disable_pipes); + +int ipa_wdi_set_perf_profile(struct ipa_wdi_perf_profile *profile) +{ + struct ipa_rm_perf_profile rm_profile; + enum ipa_rm_resource_name resource_name; + + if (profile == NULL) { + IPA_WDI_ERR("Invalid input\n"); + return -EINVAL; + } + + if (!ipa_pm_is_used()) { + rm_profile.max_supported_bandwidth_mbps = + profile->max_supported_bw_mbps; + + if (profile->client == IPA_CLIENT_WLAN1_PROD) { + resource_name = IPA_RM_RESOURCE_WLAN_PROD; + } else if (profile->client == IPA_CLIENT_WLAN1_CONS) { + resource_name = IPA_RM_RESOURCE_WLAN_CONS; + } else { + IPA_WDI_ERR("not supported\n"); + return -EINVAL; + } + + if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) { + IPA_WDI_ERR("fail to setup rm perf profile\n"); + return -EFAULT; + } + } else { + if (ipa_pm_set_throughput(ipa_wdi_ctx->ipa_pm_hdl, + profile->max_supported_bw_mbps)) { + IPA_WDI_ERR("fail to setup pm perf profile\n"); + return -EFAULT; + } + } + + return 0; +} +EXPORT_SYMBOL(ipa_wdi_set_perf_profile); + +int ipa_wdi_create_smmu_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info) +{ + return ipa_create_wdi_mapping(num_buffers, info); +} +EXPORT_SYMBOL(ipa_wdi_create_smmu_mapping); + +int ipa_wdi_release_smmu_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info) +{ + return ipa_release_wdi_mapping(num_buffers, info); +} +EXPORT_SYMBOL(ipa_wdi_release_smmu_mapping); + +int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats) +{ + return ipa_get_wdi_stats(stats); +} +EXPORT_SYMBOL(ipa_wdi_get_stats); diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c new file mode 100644 index 000000000000..75877bb916cb --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c @@ -0,0 +1,1256 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ipa_common_i.h" +#include "../ipa_v3/ipa_pm.h" + +#define ODU_BRIDGE_DRV_NAME "odu_ipa_bridge" + +#define ODU_BRIDGE_DBG(fmt, args...) \ + do { \ + pr_debug(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) +#define ODU_BRIDGE_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) +#define ODU_BRIDGE_ERR(fmt, args...) \ + do { \ + pr_err(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define ODU_BRIDGE_FUNC_ENTRY() \ + ODU_BRIDGE_DBG_LOW("ENTRY\n") +#define ODU_BRIDGE_FUNC_EXIT() \ + ODU_BRIDGE_DBG_LOW("EXIT\n") + + +#define ODU_BRIDGE_IS_QMI_ADDR(daddr) \ + (memcmp(&(daddr), &odu_bridge_ctx->llv6_addr, sizeof((daddr))) \ + == 0) + +#define ODU_BRIDGE_IPV4_HDR_NAME "odu_br_ipv4" +#define ODU_BRIDGE_IPV6_HDR_NAME "odu_br_ipv6" + +#define IPA_ODU_SYS_DESC_FIFO_SZ 0x800 + +#ifdef CONFIG_COMPAT +#define ODU_BRIDGE_IOC_SET_LLV6_ADDR32 _IOW(ODU_BRIDGE_IOC_MAGIC, \ + ODU_BRIDGE_IOCTL_SET_LLV6_ADDR, \ + compat_uptr_t) +#endif + +#define IPA_ODU_VER_CHECK() \ + do { \ + ret = 0;\ + if (ipa_get_hw_type() == IPA_HW_None) { \ + pr_err("IPA HW is unknown\n"); \ + ret = -EFAULT; \ + } \ + else if (ipa_get_hw_type() < IPA_HW_v3_0) \ + ret = 1; \ + } while (0) + +/** + * struct stats - driver statistics, viewable using debugfs + * @num_ul_packets: number of packets bridged in uplink direction + * @num_dl_packets: number of packets bridged in downink direction + * bridge + * @num_lan_packets: number of packets bridged to APPS on bridge mode + */ +struct stats { + u64 num_ul_packets; + u64 num_dl_packets; + u64 num_lan_packets; +}; + +/** + * struct odu_bridge_ctx - ODU bridge driver context information + * @class: kernel class pointer + * @dev_num: kernel device number + * @dev: kernel device struct pointer + * @cdev: kernel character device struct + * @netdev_name: network interface name + * @device_ethaddr: network interface ethernet address + * @priv: client's private data. to be used in client's callbacks + * @tx_dp_notify: client callback for handling IPA ODU_PROD callback + * @send_dl_skb: client callback for sending skb in downlink direction + * @stats: statistics, how many packets were transmitted using the SW bridge + * @is_conencted: is bridge connected ? + * @is_suspended: is bridge suspended ? + * @mode: ODU mode (router/bridge) + * @lock: for the initialization, connect and disconnect synchronization + * @llv6_addr: link local IPv6 address of ODU network interface + * @odu_br_ipv4_hdr_hdl: handle for partial ipv4 ethernet header + * @odu_br_ipv6_hdr_hdl: handle for partial ipv6 ethernet header + * @odu_prod_hdl: handle for IPA_CLIENT_ODU_PROD pipe + * @odu_emb_cons_hdl: handle for IPA_CLIENT_ODU_EMB_CONS pipe + * @odu_teth_cons_hdl: handle for IPA_CLIENT_ODU_TETH_CONS pipe + * @rm_comp: completion object for IP RM + * @wakeup_request: client callback to wakeup + */ +struct odu_bridge_ctx { + struct class *class; + dev_t dev_num; + struct device *dev; + struct cdev cdev; + char netdev_name[IPA_RESOURCE_NAME_MAX]; + u8 device_ethaddr[ETH_ALEN]; + void *priv; + ipa_notify_cb tx_dp_notify; + int (*send_dl_skb)(void *priv, struct sk_buff *skb); + struct stats stats; + bool is_connected; + bool is_suspended; + enum odu_bridge_mode mode; + struct mutex lock; + struct in6_addr llv6_addr; + uint32_t odu_br_ipv4_hdr_hdl; + uint32_t odu_br_ipv6_hdr_hdl; + u32 odu_prod_hdl; + u32 odu_emb_cons_hdl; + u32 odu_teth_cons_hdl; + u32 ipa_sys_desc_size; + void *logbuf; + void *logbuf_low; + struct completion rm_comp; + void (*wakeup_request)(void *cl_priv); + u32 pm_hdl; +}; +static struct odu_bridge_ctx *odu_bridge_ctx; + +#ifdef CONFIG_DEBUG_FS +#define ODU_MAX_MSG_LEN 512 +static char dbg_buff[ODU_MAX_MSG_LEN]; +#endif + +static void odu_bridge_emb_cons_cb(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + ODU_BRIDGE_FUNC_ENTRY(); + if (evt != IPA_RECEIVE) { + ODU_BRIDGE_ERR("unexpected event\n"); + WARN_ON(1); + return; + } + odu_bridge_ctx->send_dl_skb(priv, (struct sk_buff *)data); + odu_bridge_ctx->stats.num_dl_packets++; + ODU_BRIDGE_FUNC_EXIT(); +} + +static void odu_bridge_teth_cons_cb(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct ipv6hdr *ipv6hdr; + struct sk_buff *skb = (struct sk_buff *)data; + struct sk_buff *skb_copied; + + ODU_BRIDGE_FUNC_ENTRY(); + if (evt != IPA_RECEIVE) { + ODU_BRIDGE_ERR("unexpected event\n"); + WARN_ON(1); + return; + } + + ipv6hdr = (struct ipv6hdr *)(skb->data + ETH_HLEN); + if (ipv6hdr->version == 6 && + ipv6_addr_is_multicast(&ipv6hdr->daddr)) { + ODU_BRIDGE_DBG_LOW("Multicast pkt, send to APPS and adapter\n"); + skb_copied = skb_clone(skb, GFP_KERNEL); + if (skb_copied) { + odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv, + IPA_RECEIVE, + (unsigned long) skb_copied); + odu_bridge_ctx->stats.num_lan_packets++; + } else { + ODU_BRIDGE_ERR("No memory\n"); + } + } + + odu_bridge_ctx->send_dl_skb(priv, skb); + odu_bridge_ctx->stats.num_dl_packets++; + ODU_BRIDGE_FUNC_EXIT(); +} + +static int odu_bridge_connect_router(void) +{ + struct ipa_sys_connect_params odu_prod_params; + struct ipa_sys_connect_params odu_emb_cons_params; + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + memset(&odu_prod_params, 0, sizeof(odu_prod_params)); + memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params)); + + /* configure RX (ODU->IPA) EP */ + odu_prod_params.client = IPA_CLIENT_ODU_PROD; + odu_prod_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN; + odu_prod_params.ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT; + odu_prod_params.desc_fifo_sz = odu_bridge_ctx->ipa_sys_desc_size; + odu_prod_params.priv = odu_bridge_ctx->priv; + odu_prod_params.notify = odu_bridge_ctx->tx_dp_notify; + res = ipa_setup_sys_pipe(&odu_prod_params, + &odu_bridge_ctx->odu_prod_hdl); + if (res) { + ODU_BRIDGE_ERR("fail to setup sys pipe ODU_PROD %d\n", res); + goto fail_odu_prod; + } + + /* configure TX (IPA->ODU) EP */ + odu_emb_cons_params.client = IPA_CLIENT_ODU_EMB_CONS; + odu_emb_cons_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN; + odu_emb_cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT; + odu_emb_cons_params.desc_fifo_sz = odu_bridge_ctx->ipa_sys_desc_size; + odu_emb_cons_params.priv = odu_bridge_ctx->priv; + odu_emb_cons_params.notify = odu_bridge_emb_cons_cb; + res = ipa_setup_sys_pipe(&odu_emb_cons_params, + &odu_bridge_ctx->odu_emb_cons_hdl); + if (res) { + ODU_BRIDGE_ERR("fail to setup sys pipe ODU_EMB_CONS %d\n", res); + goto fail_odu_emb_cons; + } + + ODU_BRIDGE_DBG("odu_prod_hdl = %d, odu_emb_cons_hdl = %d\n", + odu_bridge_ctx->odu_prod_hdl, odu_bridge_ctx->odu_emb_cons_hdl); + + ODU_BRIDGE_FUNC_EXIT(); + + return 0; + +fail_odu_emb_cons: + ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl); + odu_bridge_ctx->odu_prod_hdl = 0; +fail_odu_prod: + return res; +} + +static int odu_bridge_connect_bridge(void) +{ + struct ipa_sys_connect_params odu_prod_params; + struct ipa_sys_connect_params odu_emb_cons_params; + struct ipa_sys_connect_params odu_teth_cons_params; + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + memset(&odu_prod_params, 0, sizeof(odu_prod_params)); + memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params)); + + if (!ipa_pm_is_used()) { + /* Build IPA Resource manager dependency graph */ + ODU_BRIDGE_DBG_LOW("build dependency graph\n"); + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res && res != -EINPROGRESS) { + ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n"); + goto fail_add_dependency_1; + } + + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_ODU_ADAPT_CONS); + if (res && res != -EINPROGRESS) { + ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n"); + goto fail_add_dependency_2; + } + } + + /* configure RX (ODU->IPA) EP */ + odu_prod_params.client = IPA_CLIENT_ODU_PROD; + odu_prod_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ; + odu_prod_params.priv = odu_bridge_ctx->priv; + odu_prod_params.notify = odu_bridge_ctx->tx_dp_notify; + odu_prod_params.skip_ep_cfg = true; + res = ipa_setup_sys_pipe(&odu_prod_params, + &odu_bridge_ctx->odu_prod_hdl); + if (res) { + ODU_BRIDGE_ERR("fail to setup sys pipe ODU_PROD %d\n", res); + goto fail_odu_prod; + } + + /* configure TX tethered (IPA->ODU) EP */ + odu_teth_cons_params.client = IPA_CLIENT_ODU_TETH_CONS; + odu_teth_cons_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ; + odu_teth_cons_params.priv = odu_bridge_ctx->priv; + odu_teth_cons_params.notify = odu_bridge_teth_cons_cb; + odu_teth_cons_params.skip_ep_cfg = true; + res = ipa_setup_sys_pipe(&odu_teth_cons_params, + &odu_bridge_ctx->odu_teth_cons_hdl); + if (res) { + ODU_BRIDGE_ERR("fail to setup sys pipe ODU_TETH_CONS %d\n", + res); + goto fail_odu_teth_cons; + } + + /* configure TX embedded(IPA->ODU) EP */ + odu_emb_cons_params.client = IPA_CLIENT_ODU_EMB_CONS; + odu_emb_cons_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN; + odu_emb_cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT; + odu_emb_cons_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ; + odu_emb_cons_params.priv = odu_bridge_ctx->priv; + odu_emb_cons_params.notify = odu_bridge_emb_cons_cb; + res = ipa_setup_sys_pipe(&odu_emb_cons_params, + &odu_bridge_ctx->odu_emb_cons_hdl); + if (res) { + ODU_BRIDGE_ERR("fail to setup sys pipe ODU_EMB_CONS %d\n", res); + goto fail_odu_emb_cons; + } + + ODU_BRIDGE_DBG_LOW("odu_prod_hdl = %d, odu_emb_cons_hdl = %d\n", + odu_bridge_ctx->odu_prod_hdl, odu_bridge_ctx->odu_emb_cons_hdl); + ODU_BRIDGE_DBG_LOW("odu_teth_cons_hdl = %d\n", + odu_bridge_ctx->odu_teth_cons_hdl); + + ODU_BRIDGE_FUNC_EXIT(); + + return 0; + +fail_odu_emb_cons: + ipa_teardown_sys_pipe(odu_bridge_ctx->odu_teth_cons_hdl); + odu_bridge_ctx->odu_teth_cons_hdl = 0; +fail_odu_teth_cons: + ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl); + odu_bridge_ctx->odu_prod_hdl = 0; +fail_odu_prod: + if (!ipa_pm_is_used()) + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_ODU_ADAPT_CONS); +fail_add_dependency_2: + if (!ipa_pm_is_used()) + ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD, + IPA_RM_RESOURCE_Q6_CONS); +fail_add_dependency_1: + return res; +} + +static int odu_bridge_disconnect_router(void) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl); + if (res) + ODU_BRIDGE_ERR("teardown ODU PROD failed\n"); + odu_bridge_ctx->odu_prod_hdl = 0; + + res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_emb_cons_hdl); + if (res) + ODU_BRIDGE_ERR("teardown ODU EMB CONS failed\n"); + odu_bridge_ctx->odu_emb_cons_hdl = 0; + + ODU_BRIDGE_FUNC_EXIT(); + + return 0; +} + +static int odu_bridge_disconnect_bridge(void) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl); + if (res) + ODU_BRIDGE_ERR("teardown ODU PROD failed\n"); + odu_bridge_ctx->odu_prod_hdl = 0; + + res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_teth_cons_hdl); + if (res) + ODU_BRIDGE_ERR("teardown ODU TETH CONS failed\n"); + odu_bridge_ctx->odu_teth_cons_hdl = 0; + + res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_emb_cons_hdl); + if (res) + ODU_BRIDGE_ERR("teardown ODU EMB CONS failed\n"); + odu_bridge_ctx->odu_emb_cons_hdl = 0; + + if (!ipa_pm_is_used()) { + /* Delete IPA Resource manager dependency graph */ + ODU_BRIDGE_DBG("deleting dependency graph\n"); + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res && res != -EINPROGRESS) + ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n"); + + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_ODU_ADAPT_CONS); + if (res && res != -EINPROGRESS) + ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n"); + } + + return 0; +} + +/** + * odu_bridge_disconnect() - Disconnect odu bridge + * + * Disconnect all pipes and deletes IPA RM dependencies on bridge mode + * + * Return codes: 0- success, error otherwise + */ +int odu_bridge_disconnect(void) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + if (!odu_bridge_ctx) { + ODU_BRIDGE_ERR("Not initialized\n"); + return -EFAULT; + } + + if (!odu_bridge_ctx->is_connected) { + ODU_BRIDGE_ERR("Not connected\n"); + return -EFAULT; + } + + mutex_lock(&odu_bridge_ctx->lock); + if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) { + res = odu_bridge_disconnect_router(); + if (res) { + ODU_BRIDGE_ERR("disconnect_router failed %d\n", res); + goto out; + } + } else { + res = odu_bridge_disconnect_bridge(); + if (res) { + ODU_BRIDGE_ERR("disconnect_bridge failed %d\n", res); + goto out; + } + } + + odu_bridge_ctx->is_connected = false; + res = 0; +out: + mutex_unlock(&odu_bridge_ctx->lock); + ODU_BRIDGE_FUNC_EXIT(); + return res; +} +EXPORT_SYMBOL(odu_bridge_disconnect); + +/** + * odu_bridge_connect() - Connect odu bridge. + * + * Call to the mode-specific connect function for connection IPA pipes + * and adding IPA RM dependencies + + * Return codes: 0: success + * -EINVAL: invalid parameters + * -EPERM: Operation not permitted as the bridge is already + * connected + */ +int odu_bridge_connect(void) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + if (!odu_bridge_ctx) { + ODU_BRIDGE_ERR("Not initialized\n"); + return -EFAULT; + } + + if (odu_bridge_ctx->is_connected) { + ODU_BRIDGE_ERR("already connected\n"); + return -EFAULT; + } + + mutex_lock(&odu_bridge_ctx->lock); + if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) { + res = odu_bridge_connect_router(); + if (res) { + ODU_BRIDGE_ERR("connect_router failed\n"); + goto bail; + } + } else { + res = odu_bridge_connect_bridge(); + if (res) { + ODU_BRIDGE_ERR("connect_bridge failed\n"); + goto bail; + } + } + + odu_bridge_ctx->is_connected = true; + res = 0; +bail: + mutex_unlock(&odu_bridge_ctx->lock); + ODU_BRIDGE_FUNC_EXIT(); + return res; +} +EXPORT_SYMBOL(odu_bridge_connect); + +/** + * odu_bridge_set_mode() - Set bridge mode to Router/Bridge + * @mode: mode to be set + */ +static int odu_bridge_set_mode(enum odu_bridge_mode mode) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + if (mode < 0 || mode >= ODU_BRIDGE_MODE_MAX) { + ODU_BRIDGE_ERR("Unsupported mode: %d\n", mode); + return -EFAULT; + } + + ODU_BRIDGE_DBG_LOW("setting mode: %d\n", mode); + mutex_lock(&odu_bridge_ctx->lock); + + if (odu_bridge_ctx->mode == mode) { + ODU_BRIDGE_DBG_LOW("same mode\n"); + res = 0; + goto bail; + } + + if (odu_bridge_ctx->is_connected) { + /* first disconnect the old configuration */ + if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) { + res = odu_bridge_disconnect_router(); + if (res) { + ODU_BRIDGE_ERR("disconnect_router failed\n"); + goto bail; + } + } else { + res = odu_bridge_disconnect_bridge(); + if (res) { + ODU_BRIDGE_ERR("disconnect_bridge failed\n"); + goto bail; + } + } + + /* connect the new configuration */ + if (mode == ODU_BRIDGE_MODE_ROUTER) { + res = odu_bridge_connect_router(); + if (res) { + ODU_BRIDGE_ERR("connect_router failed\n"); + goto bail; + } + } else { + res = odu_bridge_connect_bridge(); + if (res) { + ODU_BRIDGE_ERR("connect_bridge failed\n"); + goto bail; + } + } + } + odu_bridge_ctx->mode = mode; + res = 0; +bail: + mutex_unlock(&odu_bridge_ctx->lock); + ODU_BRIDGE_FUNC_EXIT(); + return res; +}; + +/** + * odu_bridge_set_llv6_addr() - Set link local ipv6 address + * @llv6_addr: odu network interface link local address + * + * This function sets the link local ipv6 address provided by IOCTL + */ +static int odu_bridge_set_llv6_addr(struct in6_addr *llv6_addr) +{ + struct in6_addr llv6_addr_host; + + ODU_BRIDGE_FUNC_ENTRY(); + + llv6_addr_host.s6_addr32[0] = ntohl(llv6_addr->s6_addr32[0]); + llv6_addr_host.s6_addr32[1] = ntohl(llv6_addr->s6_addr32[1]); + llv6_addr_host.s6_addr32[2] = ntohl(llv6_addr->s6_addr32[2]); + llv6_addr_host.s6_addr32[3] = ntohl(llv6_addr->s6_addr32[3]); + + memcpy(&odu_bridge_ctx->llv6_addr, &llv6_addr_host, + sizeof(odu_bridge_ctx->llv6_addr)); + ODU_BRIDGE_DBG_LOW("LLV6 addr: %pI6c\n", &odu_bridge_ctx->llv6_addr); + + ODU_BRIDGE_FUNC_EXIT(); + + return 0; +}; + +static long odu_bridge_ioctl(struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int res = 0; + struct in6_addr llv6_addr; + + ODU_BRIDGE_DBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd)); + + if ((_IOC_TYPE(cmd) != ODU_BRIDGE_IOC_MAGIC) || + (_IOC_NR(cmd) >= ODU_BRIDGE_IOCTL_MAX)) { + ODU_BRIDGE_ERR("Invalid ioctl\n"); + return -ENOIOCTLCMD; + } + + switch (cmd) { + case ODU_BRIDGE_IOC_SET_MODE: + ODU_BRIDGE_DBG("ODU_BRIDGE_IOC_SET_MODE ioctl called\n"); + res = odu_bridge_set_mode(arg); + if (res) { + ODU_BRIDGE_ERR("Error, res = %d\n", res); + break; + } + break; + + case ODU_BRIDGE_IOC_SET_LLV6_ADDR: + ODU_BRIDGE_DBG("ODU_BRIDGE_IOC_SET_LLV6_ADDR ioctl called\n"); + res = copy_from_user(&llv6_addr, + (struct in6_addr *)arg, + sizeof(llv6_addr)); + if (res) { + ODU_BRIDGE_ERR("Error, res = %d\n", res); + res = -EFAULT; + break; + } + + res = odu_bridge_set_llv6_addr(&llv6_addr); + if (res) { + ODU_BRIDGE_ERR("Error, res = %d\n", res); + break; + } + break; + + default: + ODU_BRIDGE_ERR("Unknown ioctl: %d\n", cmd); + WARN_ON(1); + } + + return res; +} + +#ifdef CONFIG_COMPAT +static long compat_odu_bridge_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case ODU_BRIDGE_IOC_SET_LLV6_ADDR32: + cmd = ODU_BRIDGE_IOC_SET_LLV6_ADDR; + break; + case ODU_BRIDGE_IOC_SET_MODE: + break; + default: + return -ENOIOCTLCMD; + } + return odu_bridge_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + +#ifdef CONFIG_DEBUG_FS +static struct dentry *dent; +static struct dentry *dfile_stats; +static struct dentry *dfile_mode; + +static ssize_t odu_debugfs_stats(struct file *file, + char __user *ubuf, + size_t count, + loff_t *ppos) +{ + int nbytes = 0; + + nbytes += scnprintf(&dbg_buff[nbytes], + ODU_MAX_MSG_LEN - nbytes, + "UL packets: %lld\n", + odu_bridge_ctx->stats.num_ul_packets); + nbytes += scnprintf(&dbg_buff[nbytes], + ODU_MAX_MSG_LEN - nbytes, + "DL packets: %lld\n", + odu_bridge_ctx->stats.num_dl_packets); + nbytes += scnprintf(&dbg_buff[nbytes], + ODU_MAX_MSG_LEN - nbytes, + "LAN packets: %lld\n", + odu_bridge_ctx->stats.num_lan_packets); + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t odu_debugfs_hw_bridge_mode_write(struct file *file, + const char __user *ubuf, + size_t count, + loff_t *ppos) +{ + unsigned long missing; + enum odu_bridge_mode mode; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + if (count > 0) + dbg_buff[count-1] = '\0'; + + if (strcmp(dbg_buff, "router") == 0) { + mode = ODU_BRIDGE_MODE_ROUTER; + } else if (strcmp(dbg_buff, "bridge") == 0) { + mode = ODU_BRIDGE_MODE_BRIDGE; + } else { + ODU_BRIDGE_ERR("Bad mode, got %s,\n" + "Use or .\n", dbg_buff); + return count; + } + + odu_bridge_set_mode(mode); + return count; +} + +static ssize_t odu_debugfs_hw_bridge_mode_read(struct file *file, + char __user *ubuf, + size_t count, + loff_t *ppos) +{ + int nbytes = 0; + + switch (odu_bridge_ctx->mode) { + case ODU_BRIDGE_MODE_ROUTER: + nbytes += scnprintf(&dbg_buff[nbytes], + ODU_MAX_MSG_LEN - nbytes, + "router\n"); + break; + case ODU_BRIDGE_MODE_BRIDGE: + nbytes += scnprintf(&dbg_buff[nbytes], + ODU_MAX_MSG_LEN - nbytes, + "bridge\n"); + break; + default: + nbytes += scnprintf(&dbg_buff[nbytes], + ODU_MAX_MSG_LEN - nbytes, + "mode error\n"); + break; + + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +const struct file_operations odu_stats_ops = { + .read = odu_debugfs_stats, +}; + +const struct file_operations odu_hw_bridge_mode_ops = { + .read = odu_debugfs_hw_bridge_mode_read, + .write = odu_debugfs_hw_bridge_mode_write, +}; + +static void odu_debugfs_init(void) +{ + const mode_t read_only_mode = 0444; + const mode_t read_write_mode = 0666; + + dent = debugfs_create_dir("odu_ipa_bridge", 0); + if (IS_ERR(dent)) { + ODU_BRIDGE_ERR("fail to create folder odu_ipa_bridge\n"); + return; + } + + dfile_stats = + debugfs_create_file("stats", read_only_mode, dent, + 0, &odu_stats_ops); + if (!dfile_stats || IS_ERR(dfile_stats)) { + ODU_BRIDGE_ERR("fail to create file stats\n"); + goto fail; + } + + dfile_mode = + debugfs_create_file("mode", read_write_mode, + dent, 0, &odu_hw_bridge_mode_ops); + if (!dfile_mode || + IS_ERR(dfile_mode)) { + ODU_BRIDGE_ERR("fail to create file dfile_mode\n"); + goto fail; + } + + return; +fail: + debugfs_remove_recursive(dent); +} + +static void odu_debugfs_destroy(void) +{ + debugfs_remove_recursive(dent); +} + +#else +static void odu_debugfs_init(void) {} +static void odu_debugfs_destroy(void) {} +#endif /* CONFIG_DEBUG_FS */ + + +static const struct file_operations odu_bridge_drv_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = odu_bridge_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_odu_bridge_ioctl, +#endif +}; + +/** + * odu_bridge_tx_dp() - Send skb to ODU bridge + * @skb: skb to send + * @metadata: metadata on packet + * + * This function handles uplink packet. + * In Router Mode: + * packet is sent directly to IPA. + * In Router Mode: + * packet is classified if it should arrive to network stack. + * QMI IP packet should arrive to APPS network stack + * IPv6 Multicast packet should arrive to APPS network stack and Q6 + * + * Return codes: 0- success, error otherwise + */ +int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata) +{ + struct sk_buff *skb_copied = NULL; + struct ipv6hdr *ipv6hdr; + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + switch (odu_bridge_ctx->mode) { + case ODU_BRIDGE_MODE_ROUTER: + /* Router mode - pass skb to IPA */ + res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata); + if (res) { + ODU_BRIDGE_DBG("tx dp failed %d\n", res); + goto out; + } + odu_bridge_ctx->stats.num_ul_packets++; + goto out; + + case ODU_BRIDGE_MODE_BRIDGE: + ipv6hdr = (struct ipv6hdr *)(skb->data + ETH_HLEN); + if (ipv6hdr->version == 6 && + ODU_BRIDGE_IS_QMI_ADDR(ipv6hdr->daddr)) { + ODU_BRIDGE_DBG_LOW("QMI packet\n"); + skb_copied = skb_clone(skb, GFP_KERNEL); + if (!skb_copied) { + ODU_BRIDGE_ERR("No memory\n"); + return -ENOMEM; + } + odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv, + IPA_RECEIVE, + (unsigned long)skb_copied); + odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv, + IPA_WRITE_DONE, + (unsigned long)skb); + odu_bridge_ctx->stats.num_ul_packets++; + odu_bridge_ctx->stats.num_lan_packets++; + res = 0; + goto out; + } + + if (ipv6hdr->version == 6 && + ipv6_addr_is_multicast(&ipv6hdr->daddr)) { + ODU_BRIDGE_DBG_LOW( + "Multicast pkt, send to APPS and IPA\n"); + skb_copied = skb_clone(skb, GFP_KERNEL); + if (!skb_copied) { + ODU_BRIDGE_ERR("No memory\n"); + return -ENOMEM; + } + + res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata); + if (res) { + ODU_BRIDGE_DBG("tx dp failed %d\n", res); + dev_kfree_skb(skb_copied); + goto out; + } + + odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv, + IPA_RECEIVE, + (unsigned long)skb_copied); + odu_bridge_ctx->stats.num_ul_packets++; + odu_bridge_ctx->stats.num_lan_packets++; + goto out; + } + + res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata); + if (res) { + ODU_BRIDGE_DBG("tx dp failed %d\n", res); + goto out; + } + odu_bridge_ctx->stats.num_ul_packets++; + goto out; + + default: + ODU_BRIDGE_ERR("Unsupported mode: %d\n", odu_bridge_ctx->mode); + WARN_ON(1); + res = -EFAULT; + + } +out: + ODU_BRIDGE_FUNC_EXIT(); + return res; +} +EXPORT_SYMBOL(odu_bridge_tx_dp); + +static int odu_bridge_add_hdrs(void) +{ + struct ipa_ioc_add_hdr *hdrs; + struct ipa_hdr_add *ipv4_hdr; + struct ipa_hdr_add *ipv6_hdr; + struct ethhdr *eth_ipv4; + struct ethhdr *eth_ipv6; + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + hdrs = kzalloc(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr), + GFP_KERNEL); + if (!hdrs) { + ODU_BRIDGE_ERR("no mem\n"); + res = -ENOMEM; + goto out; + } + ipv4_hdr = &hdrs->hdr[0]; + eth_ipv4 = (struct ethhdr *)(ipv4_hdr->hdr); + ipv6_hdr = &hdrs->hdr[1]; + eth_ipv6 = (struct ethhdr *)(ipv6_hdr->hdr); + strlcpy(ipv4_hdr->name, ODU_BRIDGE_IPV4_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + memcpy(eth_ipv4->h_source, odu_bridge_ctx->device_ethaddr, ETH_ALEN); + eth_ipv4->h_proto = htons(ETH_P_IP); + ipv4_hdr->hdr_len = ETH_HLEN; + ipv4_hdr->is_partial = 1; + ipv4_hdr->is_eth2_ofst_valid = 1; + ipv4_hdr->eth2_ofst = 0; + strlcpy(ipv6_hdr->name, ODU_BRIDGE_IPV6_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + memcpy(eth_ipv6->h_source, odu_bridge_ctx->device_ethaddr, ETH_ALEN); + eth_ipv6->h_proto = htons(ETH_P_IPV6); + ipv6_hdr->hdr_len = ETH_HLEN; + ipv6_hdr->is_partial = 1; + ipv6_hdr->is_eth2_ofst_valid = 1; + ipv6_hdr->eth2_ofst = 0; + hdrs->commit = 1; + hdrs->num_hdrs = 2; + res = ipa_add_hdr(hdrs); + if (res) { + ODU_BRIDGE_ERR("Fail on Header-Insertion(%d)\n", res); + goto out_free_mem; + } + if (ipv4_hdr->status) { + ODU_BRIDGE_ERR("Fail on Header-Insertion ipv4(%d)\n", + ipv4_hdr->status); + res = ipv4_hdr->status; + goto out_free_mem; + } + if (ipv6_hdr->status) { + ODU_BRIDGE_ERR("Fail on Header-Insertion ipv6(%d)\n", + ipv6_hdr->status); + res = ipv6_hdr->status; + goto out_free_mem; + } + odu_bridge_ctx->odu_br_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl; + odu_bridge_ctx->odu_br_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl; + + res = 0; +out_free_mem: + kfree(hdrs); +out: + ODU_BRIDGE_FUNC_EXIT(); + return res; +} + +static void odu_bridge_del_hdrs(void) +{ + struct ipa_ioc_del_hdr *del_hdr; + struct ipa_hdr_del *ipv4; + struct ipa_hdr_del *ipv6; + int result; + + del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) + + sizeof(*ipv6), GFP_KERNEL); + if (!del_hdr) + return; + del_hdr->commit = 1; + del_hdr->num_hdls = 2; + ipv4 = &del_hdr->hdl[0]; + ipv4->hdl = odu_bridge_ctx->odu_br_ipv4_hdr_hdl; + ipv6 = &del_hdr->hdl[1]; + ipv6->hdl = odu_bridge_ctx->odu_br_ipv6_hdr_hdl; + result = ipa_del_hdr(del_hdr); + if (result || ipv4->status || ipv6->status) + ODU_BRIDGE_ERR("ipa_del_hdr failed"); + kfree(del_hdr); +} + +/** + * odu_bridge_register_properties() - set Tx/Rx properties for ipacm + * + * Register the network interface interface with Tx and Rx properties + * Tx properties are for data flowing from IPA to adapter, they + * have Header-Insertion properties both for Ipv4 and Ipv6 Ethernet framing. + * Rx properties are for data flowing from adapter to IPA, they have + * simple rule which always "hit". + * + */ +static int odu_bridge_register_properties(void) +{ + struct ipa_tx_intf tx_properties = {0}; + struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} }; + struct ipa_ioc_tx_intf_prop *ipv4_property; + struct ipa_ioc_tx_intf_prop *ipv6_property; + struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} }; + struct ipa_rx_intf rx_properties = {0}; + struct ipa_ioc_rx_intf_prop *rx_ipv4_property; + struct ipa_ioc_rx_intf_prop *rx_ipv6_property; + int res = 0; + + ODU_BRIDGE_FUNC_ENTRY(); + + tx_properties.prop = properties; + ipv4_property = &tx_properties.prop[0]; + ipv4_property->ip = IPA_IP_v4; + ipv4_property->dst_pipe = IPA_CLIENT_ODU_EMB_CONS; + ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + strlcpy(ipv4_property->hdr_name, ODU_BRIDGE_IPV4_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + ipv6_property = &tx_properties.prop[1]; + ipv6_property->ip = IPA_IP_v6; + ipv6_property->dst_pipe = IPA_CLIENT_ODU_EMB_CONS; + ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + strlcpy(ipv6_property->hdr_name, ODU_BRIDGE_IPV6_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + tx_properties.num_props = 2; + + rx_properties.prop = rx_ioc_properties; + rx_ipv4_property = &rx_properties.prop[0]; + rx_ipv4_property->ip = IPA_IP_v4; + rx_ipv4_property->attrib.attrib_mask = 0; + rx_ipv4_property->src_pipe = IPA_CLIENT_ODU_PROD; + rx_ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + rx_ipv6_property = &rx_properties.prop[1]; + rx_ipv6_property->ip = IPA_IP_v6; + rx_ipv6_property->attrib.attrib_mask = 0; + rx_ipv6_property->src_pipe = IPA_CLIENT_ODU_PROD; + rx_ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + rx_properties.num_props = 2; + + res = ipa_register_intf(odu_bridge_ctx->netdev_name, &tx_properties, + &rx_properties); + if (res) { + ODU_BRIDGE_ERR("fail on Tx/Rx properties registration %d\n", + res); + } + + ODU_BRIDGE_FUNC_EXIT(); + + return res; +} + +static void odu_bridge_deregister_properties(void) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + res = ipa_deregister_intf(odu_bridge_ctx->netdev_name); + if (res) + ODU_BRIDGE_ERR("Fail on Tx prop deregister %d\n", res); + ODU_BRIDGE_FUNC_EXIT(); +} + +/** + * odu_bridge_init() - Initialize the ODU bridge driver + * @params: initialization parameters + * + * This function initialize all bridge internal data and register odu bridge to + * kernel for IOCTL and debugfs. + * Header addition and properties are registered to IPA driver. + * + * Return codes: 0: success, + * -EINVAL - Bad parameter + * Other negative value - Failure + */ +int odu_bridge_init(struct odu_bridge_params *params) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + if (!params) { + ODU_BRIDGE_ERR("null pointer params\n"); + return -EINVAL; + } + if (!params->netdev_name) { + ODU_BRIDGE_ERR("null pointer params->netdev_name\n"); + return -EINVAL; + } + if (!params->tx_dp_notify) { + ODU_BRIDGE_ERR("null pointer params->tx_dp_notify\n"); + return -EINVAL; + } + if (!params->send_dl_skb) { + ODU_BRIDGE_ERR("null pointer params->send_dl_skb\n"); + return -EINVAL; + } + if (odu_bridge_ctx) { + ODU_BRIDGE_ERR("Already initialized\n"); + return -EFAULT; + } + if (!ipa_is_ready()) { + ODU_BRIDGE_ERR("IPA is not ready\n"); + return -EFAULT; + } + + ODU_BRIDGE_DBG("device_ethaddr=%pM\n", params->device_ethaddr); + + odu_bridge_ctx = kzalloc(sizeof(*odu_bridge_ctx), GFP_KERNEL); + if (!odu_bridge_ctx) { + ODU_BRIDGE_ERR("kzalloc err.\n"); + return -ENOMEM; + } + + odu_bridge_ctx->class = class_create(THIS_MODULE, ODU_BRIDGE_DRV_NAME); + if (!odu_bridge_ctx->class) { + ODU_BRIDGE_ERR("Class_create err.\n"); + res = -ENODEV; + goto fail_class_create; + } + + res = alloc_chrdev_region(&odu_bridge_ctx->dev_num, 0, 1, + ODU_BRIDGE_DRV_NAME); + if (res) { + ODU_BRIDGE_ERR("alloc_chrdev_region err.\n"); + res = -ENODEV; + goto fail_alloc_chrdev_region; + } + + odu_bridge_ctx->dev = device_create(odu_bridge_ctx->class, NULL, + odu_bridge_ctx->dev_num, odu_bridge_ctx, ODU_BRIDGE_DRV_NAME); + if (IS_ERR(odu_bridge_ctx->dev)) { + ODU_BRIDGE_ERR(":device_create err.\n"); + res = -ENODEV; + goto fail_device_create; + } + + cdev_init(&odu_bridge_ctx->cdev, &odu_bridge_drv_fops); + odu_bridge_ctx->cdev.owner = THIS_MODULE; + odu_bridge_ctx->cdev.ops = &odu_bridge_drv_fops; + + res = cdev_add(&odu_bridge_ctx->cdev, odu_bridge_ctx->dev_num, 1); + if (res) { + ODU_BRIDGE_ERR(":cdev_add err=%d\n", -res); + res = -ENODEV; + goto fail_cdev_add; + } + + odu_debugfs_init(); + + strlcpy(odu_bridge_ctx->netdev_name, params->netdev_name, + IPA_RESOURCE_NAME_MAX); + odu_bridge_ctx->priv = params->priv; + odu_bridge_ctx->tx_dp_notify = params->tx_dp_notify; + odu_bridge_ctx->send_dl_skb = params->send_dl_skb; + memcpy(odu_bridge_ctx->device_ethaddr, params->device_ethaddr, + ETH_ALEN); + odu_bridge_ctx->ipa_sys_desc_size = params->ipa_desc_size; + odu_bridge_ctx->mode = ODU_BRIDGE_MODE_ROUTER; + + mutex_init(&odu_bridge_ctx->lock); + + res = odu_bridge_add_hdrs(); + if (res) { + ODU_BRIDGE_ERR("fail on odu_bridge_add_hdr %d\n", res); + goto fail_add_hdrs; + } + + res = odu_bridge_register_properties(); + if (res) { + ODU_BRIDGE_ERR("fail on register properties %d\n", res); + goto fail_register_properties; + } + + ODU_BRIDGE_FUNC_EXIT(); + return 0; + +fail_register_properties: + odu_bridge_del_hdrs(); +fail_add_hdrs: + odu_debugfs_destroy(); +fail_cdev_add: + device_destroy(odu_bridge_ctx->class, odu_bridge_ctx->dev_num); +fail_device_create: + unregister_chrdev_region(odu_bridge_ctx->dev_num, 1); +fail_alloc_chrdev_region: + class_destroy(odu_bridge_ctx->class); +fail_class_create: + kfree(odu_bridge_ctx); + odu_bridge_ctx = NULL; + return res; +} +EXPORT_SYMBOL(odu_bridge_init); + +/** + * odu_bridge_cleanup() - De-Initialize the ODU bridge driver + * + * Return codes: 0: success, + * -EINVAL - Bad parameter + * Other negative value - Failure + */ +int odu_bridge_cleanup(void) +{ + ODU_BRIDGE_FUNC_ENTRY(); + + if (!odu_bridge_ctx) { + ODU_BRIDGE_ERR("Not initialized\n"); + return -EFAULT; + } + + if (odu_bridge_ctx->is_connected) { + ODU_BRIDGE_ERR("cannot deinit while bridge is conncetd\n"); + return -EFAULT; + } + + odu_bridge_deregister_properties(); + odu_bridge_del_hdrs(); + odu_debugfs_destroy(); + cdev_del(&odu_bridge_ctx->cdev); + device_destroy(odu_bridge_ctx->class, odu_bridge_ctx->dev_num); + unregister_chrdev_region(odu_bridge_ctx->dev_num, 1); + class_destroy(odu_bridge_ctx->class); + ipc_log_context_destroy(odu_bridge_ctx->logbuf); + ipc_log_context_destroy(odu_bridge_ctx->logbuf_low); + kfree(odu_bridge_ctx); + odu_bridge_ctx = NULL; + + ODU_BRIDGE_FUNC_EXIT(); + return 0; +} +EXPORT_SYMBOL(odu_bridge_cleanup); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ODU bridge driver"); diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c new file mode 100644 index 000000000000..3a407fcede82 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c @@ -0,0 +1,2704 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ipa_common_i.h" +#include "../ipa_v3/ipa_pm.h" + +#define CREATE_TRACE_POINTS +#include "rndis_ipa_trace.h" + +#define DRV_NAME "RNDIS_IPA" +#define DEBUGFS_DIR_NAME "rndis_ipa" +#define DEBUGFS_AGGR_DIR_NAME "rndis_ipa_aggregation" +#define NETDEV_NAME "rndis" +#define DRV_RESOURCE_ID IPA_RM_RESOURCE_RNDIS_PROD +#define IPV4_HDR_NAME "rndis_eth_ipv4" +#define IPV6_HDR_NAME "rndis_eth_ipv6" +#define IPA_TO_USB_CLIENT IPA_CLIENT_USB_CONS +#define INACTIVITY_MSEC_DELAY 100 +#define DEFAULT_OUTSTANDING_HIGH 64 +#define DEFAULT_OUTSTANDING_LOW 32 +#define DEBUGFS_TEMP_BUF_SIZE 4 +#define RNDIS_IPA_PKT_TYPE 0x00000001 +#define RNDIS_IPA_DFLT_RT_HDL 0 +#define FROM_IPA_TO_USB_BAMDMA 4 +#define FROM_USB_TO_IPA_BAMDMA 5 +#define BAM_DMA_MAX_PKT_NUMBER 10 +#define BAM_DMA_DATA_FIFO_SIZE \ + (BAM_DMA_MAX_PKT_NUMBER * \ + (ETH_FRAME_LEN + sizeof(struct rndis_pkt_hdr))) +#define BAM_DMA_DESC_FIFO_SIZE \ + (BAM_DMA_MAX_PKT_NUMBER * (sizeof(struct sps_iovec))) +#define TX_TIMEOUT (5 * HZ) +#define MIN_TX_ERROR_SLEEP_PERIOD 500 +#define DEFAULT_AGGR_TIME_LIMIT 1000 /* 1ms */ +#define DEFAULT_AGGR_PKT_LIMIT 0 + +#define IPA_RNDIS_IPC_LOG_PAGES 50 + +#define IPA_RNDIS_IPC_LOGGING(buf, fmt, args...) \ + do { \ + if (buf) \ + ipc_log_string((buf), fmt, __func__, __LINE__, \ + ## args); \ + } while (0) + +static void *ipa_rndis_logbuf; + +#define RNDIS_IPA_DEBUG(fmt, args...) \ + do { \ + pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ + if (ipa_rndis_logbuf) { \ + IPA_RNDIS_IPC_LOGGING(ipa_rndis_logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define RNDIS_IPA_DEBUG_XMIT(fmt, args...) \ + pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) + +#define RNDIS_IPA_ERROR(fmt, args...) \ + do { \ + pr_err(DRV_NAME "@%s@%d@ctx:%s: "\ + fmt, __func__, __LINE__, current->comm, ## args);\ + if (ipa_rndis_logbuf) { \ + IPA_RNDIS_IPC_LOGGING(ipa_rndis_logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define NULL_CHECK_RETVAL(ptr) \ + do { \ + if (!(ptr)) { \ + RNDIS_IPA_ERROR("null pointer #ptr\n"); \ + ret = -EINVAL; \ + } \ + } \ + while (0) + +#define RNDIS_HDR_OFST(field) offsetof(struct rndis_pkt_hdr, field) +#define RNDIS_IPA_LOG_ENTRY() RNDIS_IPA_DEBUG("begin\n") +#define RNDIS_IPA_LOG_EXIT() RNDIS_IPA_DEBUG("end\n") + +/** + * enum rndis_ipa_state - specify the current driver internal state + * which is guarded by a state machine. + * + * The driver internal state changes due to its external API usage. + * The driver saves its internal state to guard from caller illegal + * call sequence. + * states: + * UNLOADED is the first state which is the default one and is also the state + * after the driver gets unloaded(cleanup). + * INITIALIZED is the driver state once it finished registering + * the network device and all internal data struct were initialized + * CONNECTED is the driver state once the USB pipes were connected to IPA + * UP is the driver state after the interface mode was set to UP but the + * pipes are not connected yet - this state is meta-stable state. + * CONNECTED_AND_UP is the driver state when the pipe were connected and + * the interface got UP request from the network stack. this is the driver + * idle operation state which allows it to transmit/receive data. + * INVALID is a state which is not allowed. + */ +enum rndis_ipa_state { + RNDIS_IPA_UNLOADED = 0, + RNDIS_IPA_INITIALIZED = 1, + RNDIS_IPA_CONNECTED = 2, + RNDIS_IPA_UP = 3, + RNDIS_IPA_CONNECTED_AND_UP = 4, + RNDIS_IPA_INVALID = 5, +}; + +/** + * enum rndis_ipa_operation - enumerations used to describe the API operation + * + * Those enums are used as input for the driver state machine. + */ +enum rndis_ipa_operation { + RNDIS_IPA_INITIALIZE, + RNDIS_IPA_CONNECT, + RNDIS_IPA_OPEN, + RNDIS_IPA_STOP, + RNDIS_IPA_DISCONNECT, + RNDIS_IPA_CLEANUP, +}; + +#define RNDIS_IPA_STATE_DEBUG(ctx) \ + RNDIS_IPA_DEBUG("Driver state: %s\n",\ + rndis_ipa_state_string((ctx)->state)) + + +/** + * struct rndis_ipa_dev - main driver context parameters + * + * @net: network interface struct implemented by this driver + * @directory: debugfs directory for various debugging switches + * @tx_filter: flag that enable/disable Tx path to continue to IPA + * @tx_dropped: number of filtered out Tx packets + * @tx_dump_enable: dump all Tx packets + * @rx_filter: flag that enable/disable Rx path to continue to IPA + * @rx_dropped: number of filtered out Rx packets + * @rx_dump_enable: dump all Rx packets + * @icmp_filter: allow all ICMP packet to pass through the filters + * @rm_enable: flag that enable/disable Resource manager request prior to Tx + * @deaggregation_enable: enable/disable IPA HW deaggregation logic + * @during_xmit_error: flags that indicate that the driver is in a middle + * of error handling in Tx path + * @directory: holds all debug flags used by the driver to allow cleanup + * for driver unload + * @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table + * @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table + * @usb_to_ipa_hdl: save handle for IPA pipe operations + * @ipa_to_usb_hdl: save handle for IPA pipe operations + * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed + * @outstanding_high: number of outstanding packets allowed + * @outstanding_low: number of outstanding packets which shall cause + * to netdev queue start (after stopped due to outstanding_high reached) + * @error_msec_sleep_time: number of msec for sleeping in case of Tx error + * @state: current state of the driver + * @host_ethaddr: holds the tethered PC ethernet address + * @device_ethaddr: holds the device ethernet address + * @device_ready_notify: callback supplied by USB core driver + * This callback shall be called by the Netdev once the Netdev internal + * state is changed to RNDIS_IPA_CONNECTED_AND_UP + * @xmit_error_delayed_work: work item for cases where IPA driver Tx fails + * @state_lock: used to protect the state variable. + * @pm_hdl: handle for IPA PM framework + * @is_vlan_mode: should driver work in vlan mode? + */ +struct rndis_ipa_dev { + struct net_device *net; + bool tx_filter; + u32 tx_dropped; + bool tx_dump_enable; + bool rx_filter; + u32 rx_dropped; + bool rx_dump_enable; + bool icmp_filter; + bool rm_enable; + bool deaggregation_enable; + bool during_xmit_error; + struct dentry *directory; + u32 eth_ipv4_hdr_hdl; + u32 eth_ipv6_hdr_hdl; + u32 usb_to_ipa_hdl; + u32 ipa_to_usb_hdl; + atomic_t outstanding_pkts; + u32 outstanding_high; + u32 outstanding_low; + u32 error_msec_sleep_time; + enum rndis_ipa_state state; + u8 host_ethaddr[ETH_ALEN]; + u8 device_ethaddr[ETH_ALEN]; + void (*device_ready_notify)(void); + struct delayed_work xmit_error_delayed_work; + spinlock_t state_lock; /* Spinlock for the state variable.*/ + u32 pm_hdl; + bool is_vlan_mode; +}; + +/** + * rndis_pkt_hdr - RNDIS_IPA representation of REMOTE_NDIS_PACKET_MSG + * @msg_type: for REMOTE_NDIS_PACKET_MSG this value should be 1 + * @msg_len: total message length in bytes, including RNDIS header an payload + * @data_ofst: offset in bytes from start of the data_ofst to payload + * @data_len: payload size in bytes + * @zeroes: OOB place holder - not used for RNDIS_IPA. + */ +struct rndis_pkt_hdr { + __le32 msg_type; + __le32 msg_len; + __le32 data_ofst; + __le32 data_len; + __le32 zeroes[7]; +} __packed__; + +static int rndis_ipa_open(struct net_device *net); +static void rndis_ipa_packet_receive_notify + (void *private, enum ipa_dp_evt_type evt, unsigned long data); +static void rndis_ipa_tx_complete_notify + (void *private, enum ipa_dp_evt_type evt, unsigned long data); +static void rndis_ipa_tx_timeout(struct net_device *net); +static int rndis_ipa_stop(struct net_device *net); +static void rndis_ipa_enable_data_path(struct rndis_ipa_dev *rndis_ipa_ctx); +static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb, + struct rndis_ipa_dev *rndis_ipa_ctx); +static void rndis_ipa_xmit_error(struct sk_buff *skb); +static void rndis_ipa_xmit_error_aftercare_wq(struct work_struct *work); +static void rndis_ipa_prepare_header_insertion + (int eth_type, + const char *hdr_name, struct ipa_hdr_add *add_hdr, + const void *dst_mac, const void *src_mac, bool is_vlan_mode); +static int rndis_ipa_hdrs_cfg + (struct rndis_ipa_dev *rndis_ipa_ctx, + const void *dst_mac, const void *src_mac); +static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx); +static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net); +static int rndis_ipa_register_properties(char *netdev_name, bool is_vlan_mode); +static int rndis_ipa_deregister_properties(char *netdev_name); +static void rndis_ipa_rm_notify + (void *user_data, enum ipa_rm_event event, + unsigned long data); +static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx); +static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx); +static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx); +static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx); +static bool rx_filter(struct sk_buff *skb); +static bool tx_filter(struct sk_buff *skb); +static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx); +static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx); +static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx); +static netdev_tx_t rndis_ipa_start_xmit + (struct sk_buff *skb, struct net_device *net); +static int rndis_ipa_debugfs_atomic_open + (struct inode *inode, struct file *file); +static int rndis_ipa_debugfs_aggr_open + (struct inode *inode, struct file *file); +static ssize_t rndis_ipa_debugfs_aggr_write + (struct file *file, + const char __user *buf, size_t count, loff_t *ppos); +static ssize_t rndis_ipa_debugfs_atomic_read + (struct file *file, + char __user *ubuf, size_t count, loff_t *ppos); +static void rndis_ipa_dump_skb(struct sk_buff *skb); +static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx); +static void rndis_ipa_debugfs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx); +static int rndis_ipa_ep_registers_cfg + (u32 usb_to_ipa_hdl, + u32 ipa_to_usb_hdl, u32 max_xfer_size_bytes_to_dev, + u32 max_xfer_size_bytes_to_host, u32 mtu, + bool deaggr_enable, + bool is_vlan_mode); +static int rndis_ipa_set_device_ethernet_addr + (u8 *dev_ethaddr, + u8 device_ethaddr[]); +static enum rndis_ipa_state rndis_ipa_next_state + (enum rndis_ipa_state current_state, + enum rndis_ipa_operation operation); +static const char *rndis_ipa_state_string(enum rndis_ipa_state state); +static int rndis_ipa_init_module(void); +static void rndis_ipa_cleanup_module(void); + +struct rndis_ipa_dev *rndis_ipa; + +static const struct net_device_ops rndis_ipa_netdev_ops = { + .ndo_open = rndis_ipa_open, + .ndo_stop = rndis_ipa_stop, + .ndo_start_xmit = rndis_ipa_start_xmit, + .ndo_tx_timeout = rndis_ipa_tx_timeout, + .ndo_get_stats = rndis_ipa_get_stats, + .ndo_set_mac_address = eth_mac_addr, +}; + +const struct file_operations rndis_ipa_debugfs_atomic_ops = { + .open = rndis_ipa_debugfs_atomic_open, + .read = rndis_ipa_debugfs_atomic_read, +}; + +const struct file_operations rndis_ipa_aggr_ops = { + .open = rndis_ipa_debugfs_aggr_open, + .write = rndis_ipa_debugfs_aggr_write, +}; + +static struct ipa_ep_cfg ipa_to_usb_ep_cfg = { + .mode = { + .mode = IPA_BASIC, + .dst = IPA_CLIENT_APPS_LAN_CONS, + }, + .hdr = { + .hdr_len = ETH_HLEN + sizeof(struct rndis_pkt_hdr), + .hdr_ofst_metadata_valid = false, + .hdr_ofst_metadata = 0, + .hdr_additional_const_len = ETH_HLEN, + .hdr_ofst_pkt_size_valid = true, + .hdr_ofst_pkt_size = 3 * sizeof(u32), + .hdr_a5_mux = false, + .hdr_remove_additional = false, + .hdr_metadata_reg_valid = false, + }, + .hdr_ext = { + .hdr_pad_to_alignment = 0, + .hdr_total_len_or_pad_offset = 1 * sizeof(u32), + .hdr_payload_len_inc_padding = false, + .hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN, + .hdr_total_len_or_pad_valid = true, + .hdr_little_endian = true, + }, + .aggr = { + .aggr_en = IPA_ENABLE_AGGR, + .aggr = IPA_GENERIC, + .aggr_byte_limit = 4, + .aggr_time_limit = DEFAULT_AGGR_TIME_LIMIT, + .aggr_pkt_limit = DEFAULT_AGGR_PKT_LIMIT, + }, + .deaggr = { + .deaggr_hdr_len = 0, + .packet_offset_valid = 0, + .packet_offset_location = 0, + .max_packet_len = 0, + }, + .route = { + .rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL, + }, + .nat = { + .nat_en = IPA_SRC_NAT, + }, +}; + +static struct ipa_ep_cfg usb_to_ipa_ep_cfg_deaggr_dis = { + .mode = { + .mode = IPA_BASIC, + .dst = IPA_CLIENT_APPS_LAN_CONS, + }, + .hdr = { + .hdr_len = ETH_HLEN + sizeof(struct rndis_pkt_hdr), + .hdr_ofst_metadata_valid = false, + .hdr_ofst_metadata = 0, + .hdr_additional_const_len = 0, + .hdr_ofst_pkt_size_valid = true, + .hdr_ofst_pkt_size = 3 * sizeof(u32) + + sizeof(struct rndis_pkt_hdr), + .hdr_a5_mux = false, + .hdr_remove_additional = false, + .hdr_metadata_reg_valid = true, + }, + .hdr_ext = { + .hdr_pad_to_alignment = 0, + .hdr_total_len_or_pad_offset = 1 * sizeof(u32), + .hdr_payload_len_inc_padding = false, + .hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN, + .hdr_total_len_or_pad_valid = true, + .hdr_little_endian = true, + }, + + .aggr = { + .aggr_en = IPA_BYPASS_AGGR, + .aggr = 0, + .aggr_byte_limit = 0, + .aggr_time_limit = 0, + .aggr_pkt_limit = 0, + }, + .deaggr = { + .deaggr_hdr_len = 0, + .packet_offset_valid = false, + .packet_offset_location = 0, + .max_packet_len = 0, + }, + + .route = { + .rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL, + }, + .nat = { + .nat_en = IPA_BYPASS_NAT, + }, +}; + +static struct ipa_ep_cfg usb_to_ipa_ep_cfg_deaggr_en = { + .mode = { + .mode = IPA_BASIC, + .dst = IPA_CLIENT_APPS_LAN_CONS, + }, + .hdr = { + .hdr_len = ETH_HLEN, + .hdr_ofst_metadata_valid = false, + .hdr_ofst_metadata = 0, + .hdr_additional_const_len = 0, + .hdr_ofst_pkt_size_valid = true, + .hdr_ofst_pkt_size = 3 * sizeof(u32), + .hdr_a5_mux = false, + .hdr_remove_additional = false, + .hdr_metadata_reg_valid = true, + }, + .hdr_ext = { + .hdr_pad_to_alignment = 0, + .hdr_total_len_or_pad_offset = 1 * sizeof(u32), + .hdr_payload_len_inc_padding = false, + .hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN, + .hdr_total_len_or_pad_valid = true, + .hdr_little_endian = true, + }, + .aggr = { + .aggr_en = IPA_ENABLE_DEAGGR, + .aggr = IPA_GENERIC, + .aggr_byte_limit = 0, + .aggr_time_limit = 0, + .aggr_pkt_limit = 0, + }, + .deaggr = { + .deaggr_hdr_len = sizeof(struct rndis_pkt_hdr), + .packet_offset_valid = true, + .packet_offset_location = 8, + .max_packet_len = 8192, /* Will be overridden*/ + }, + .route = { + .rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL, + }, + .nat = { + .nat_en = IPA_BYPASS_NAT, + }, +}; + +/** + * rndis_template_hdr - RNDIS template structure for RNDIS_IPA SW insertion + * @msg_type: set for REMOTE_NDIS_PACKET_MSG (0x00000001) + * this value will be used for all data packets + * @msg_len: will add the skb length to get final size + * @data_ofst: this field value will not be changed + * @data_len: set as skb length to get final size + * @zeroes: make sure all OOB data is not used + */ +struct rndis_pkt_hdr rndis_template_hdr = { + .msg_type = RNDIS_IPA_PKT_TYPE, + .msg_len = sizeof(struct rndis_pkt_hdr), + .data_ofst = sizeof(struct rndis_pkt_hdr) - RNDIS_HDR_OFST(data_ofst), + .data_len = 0, + .zeroes = {0}, +}; + +static void rndis_ipa_msg_free_cb(void *buff, u32 len, u32 type) +{ + kfree(buff); +} + +/** + * rndis_ipa_init() - create network device and initialize internal + * data structures + * @params: in/out parameters required for initialization, + * see "struct ipa_usb_init_params" for more details + * + * Shall be called prior to pipe connection. + * Detailed description: + * - allocate the network device + * - set default values for driver internal switches and stash them inside + * the netdev private field + * - set needed headroom for RNDIS header + * - create debugfs folder and files + * - create IPA resource manager client + * - set the ethernet address for the netdev to be added on SW Tx path + * - add header insertion rules for IPA driver (based on host/device Ethernet + * addresses given in input params and on RNDIS data template struct) + * - register tx/rx properties to IPA driver (will be later used + * by IPA configuration manager to configure rest of the IPA rules) + * - set the carrier state to "off" (until connect is called) + * - register the network device + * - set the out parameters + * - change driver internal state to INITIALIZED + * + * Returns negative errno, or zero on success + */ +int rndis_ipa_init(struct ipa_usb_init_params *params) +{ + int result = 0; + struct net_device *net; + struct rndis_ipa_dev *rndis_ipa_ctx; + int ret; + + RNDIS_IPA_LOG_ENTRY(); + RNDIS_IPA_DEBUG("%s initializing\n", DRV_NAME); + ret = 0; + NULL_CHECK_RETVAL(params); + if (ret) + return ret; + + RNDIS_IPA_DEBUG + ("host_ethaddr=%pM, device_ethaddr=%pM\n", + params->host_ethaddr, + params->device_ethaddr); + + net = alloc_etherdev(sizeof(struct rndis_ipa_dev)); + if (!net) { + result = -ENOMEM; + RNDIS_IPA_ERROR("fail to allocate Ethernet device\n"); + goto fail_alloc_etherdev; + } + RNDIS_IPA_DEBUG("network device was successfully allocated\n"); + + rndis_ipa_ctx = netdev_priv(net); + if (!rndis_ipa_ctx) { + result = -ENOMEM; + RNDIS_IPA_ERROR("fail to extract netdev priv\n"); + goto fail_netdev_priv; + } + memset(rndis_ipa_ctx, 0, sizeof(*rndis_ipa_ctx)); + RNDIS_IPA_DEBUG("rndis_ipa_ctx (private)=%pK\n", rndis_ipa_ctx); + + spin_lock_init(&rndis_ipa_ctx->state_lock); + + rndis_ipa_ctx->net = net; + rndis_ipa_ctx->tx_filter = false; + rndis_ipa_ctx->rx_filter = false; + rndis_ipa_ctx->icmp_filter = true; + rndis_ipa_ctx->rm_enable = true; + rndis_ipa_ctx->tx_dropped = 0; + rndis_ipa_ctx->rx_dropped = 0; + rndis_ipa_ctx->tx_dump_enable = false; + rndis_ipa_ctx->rx_dump_enable = false; + rndis_ipa_ctx->deaggregation_enable = false; + rndis_ipa_ctx->outstanding_high = DEFAULT_OUTSTANDING_HIGH; + rndis_ipa_ctx->outstanding_low = DEFAULT_OUTSTANDING_LOW; + atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0); + memcpy + (rndis_ipa_ctx->device_ethaddr, params->device_ethaddr, + sizeof(rndis_ipa_ctx->device_ethaddr)); + memcpy + (rndis_ipa_ctx->host_ethaddr, params->host_ethaddr, + sizeof(rndis_ipa_ctx->host_ethaddr)); + INIT_DELAYED_WORK + (&rndis_ipa_ctx->xmit_error_delayed_work, + rndis_ipa_xmit_error_aftercare_wq); + rndis_ipa_ctx->error_msec_sleep_time = + MIN_TX_ERROR_SLEEP_PERIOD; + RNDIS_IPA_DEBUG("internal data structures were set\n"); + + if (!params->device_ready_notify) + RNDIS_IPA_DEBUG("device_ready_notify() was not supplied\n"); + rndis_ipa_ctx->device_ready_notify = params->device_ready_notify; + + snprintf(net->name, sizeof(net->name), "%s%%d", NETDEV_NAME); + RNDIS_IPA_DEBUG + ("Setting network interface driver name to: %s\n", + net->name); + + net->netdev_ops = &rndis_ipa_netdev_ops; + net->watchdog_timeo = TX_TIMEOUT; + + net->needed_headroom = sizeof(rndis_template_hdr); + RNDIS_IPA_DEBUG + ("Needed headroom for RNDIS header set to %d\n", + net->needed_headroom); + + rndis_ipa_debugfs_init(rndis_ipa_ctx); + + result = rndis_ipa_set_device_ethernet_addr + (net->dev_addr, rndis_ipa_ctx->device_ethaddr); + if (result) { + RNDIS_IPA_ERROR("set device MAC failed\n"); + goto fail_set_device_ethernet; + } + RNDIS_IPA_DEBUG("Device Ethernet address set %pM\n", net->dev_addr); + + if (ipa_is_vlan_mode(IPA_VLAN_IF_RNDIS, + &rndis_ipa_ctx->is_vlan_mode)) { + RNDIS_IPA_ERROR("couldn't acquire vlan mode, is ipa ready?\n"); + goto fail_get_vlan_mode; + } + + RNDIS_IPA_DEBUG("is_vlan_mode %d\n", rndis_ipa_ctx->is_vlan_mode); + + result = rndis_ipa_hdrs_cfg + (rndis_ipa_ctx, + params->host_ethaddr, + params->device_ethaddr); + if (result) { + RNDIS_IPA_ERROR("fail on ipa hdrs set\n"); + goto fail_hdrs_cfg; + } + RNDIS_IPA_DEBUG("IPA header-insertion configed for Ethernet+RNDIS\n"); + + result = rndis_ipa_register_properties(net->name, + rndis_ipa_ctx->is_vlan_mode); + if (result) { + RNDIS_IPA_ERROR("fail on properties set\n"); + goto fail_register_tx; + } + RNDIS_IPA_DEBUG("2 TX and 2 RX properties were registered\n"); + + netif_carrier_off(net); + RNDIS_IPA_DEBUG("set carrier off until pipes are connected\n"); + + result = register_netdev(net); + if (result) { + RNDIS_IPA_ERROR("register_netdev failed: %d\n", result); + goto fail_register_netdev; + } + RNDIS_IPA_DEBUG + ("netdev:%s registration succeeded, index=%d\n", + net->name, net->ifindex); + + rndis_ipa = rndis_ipa_ctx; + params->ipa_rx_notify = rndis_ipa_packet_receive_notify; + params->ipa_tx_notify = rndis_ipa_tx_complete_notify; + params->private = rndis_ipa_ctx; + params->skip_ep_cfg = false; + rndis_ipa_ctx->state = RNDIS_IPA_INITIALIZED; + RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx); + pr_info("RNDIS_IPA NetDev was initialized\n"); + + RNDIS_IPA_LOG_EXIT(); + + return 0; + +fail_register_netdev: + rndis_ipa_deregister_properties(net->name); +fail_register_tx: + rndis_ipa_hdrs_destroy(rndis_ipa_ctx); +fail_hdrs_cfg: +fail_get_vlan_mode: +fail_set_device_ethernet: + rndis_ipa_debugfs_destroy(rndis_ipa_ctx); +fail_netdev_priv: + free_netdev(net); +fail_alloc_etherdev: + return result; +} +EXPORT_SYMBOL(rndis_ipa_init); + +/** + * rndis_ipa_pipe_connect_notify() - notify rndis_ipa Netdev that the USB pipes + * were connected + * @usb_to_ipa_hdl: handle from IPA driver client for USB->IPA + * @ipa_to_usb_hdl: handle from IPA driver client for IPA->USB + * @private: same value that was set by init(), this parameter holds the + * network device pointer. + * @max_transfer_byte_size: RNDIS protocol specific, the maximum size that + * the host expect + * @max_packet_number: RNDIS protocol specific, the maximum packet number + * that the host expects + * + * Once USB driver finishes the pipe connection between IPA core + * and USB core this method shall be called in order to + * allow the driver to complete the data path configurations. + * Detailed description: + * - configure the IPA end-points register + * - notify the Linux kernel for "carrier_on" + * - change the driver internal state + * + * After this function is done the driver state changes to "Connected" or + * Connected and Up. + * This API is expected to be called after initialization() or + * after a call to disconnect(). + * + * Returns negative errno, or zero on success + */ +int rndis_ipa_pipe_connect_notify( + u32 usb_to_ipa_hdl, + u32 ipa_to_usb_hdl, + u32 max_xfer_size_bytes_to_dev, + u32 max_packet_number_to_dev, + u32 max_xfer_size_bytes_to_host, + void *private) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = private; + int next_state; + int result; + int ret; + unsigned long flags; + struct ipa_ecm_msg *rndis_msg; + struct ipa_msg_meta msg_meta; + + RNDIS_IPA_LOG_ENTRY(); + + ret = 0; + NULL_CHECK_RETVAL(private); + + if (ret) + return ret; + + RNDIS_IPA_DEBUG + ("usb_to_ipa_hdl=%d, ipa_to_usb_hdl=%d, private=0x%pK\n", + usb_to_ipa_hdl, ipa_to_usb_hdl, private); + RNDIS_IPA_DEBUG + ("max_xfer_sz_to_dev=%d, max_pkt_num_to_dev=%d\n", + max_xfer_size_bytes_to_dev, + max_packet_number_to_dev); + RNDIS_IPA_DEBUG + ("max_xfer_sz_to_host=%d\n", + max_xfer_size_bytes_to_host); + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + next_state = rndis_ipa_next_state + (rndis_ipa_ctx->state, + RNDIS_IPA_CONNECT); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_ERROR("use init()/disconnect() before connect()\n"); + return -EPERM; + } + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + + if (usb_to_ipa_hdl >= IPA_CLIENT_MAX) { + RNDIS_IPA_ERROR + ("usb_to_ipa_hdl(%d) - not valid ipa handle\n", + usb_to_ipa_hdl); + return -EINVAL; + } + if (ipa_to_usb_hdl >= IPA_CLIENT_MAX) { + RNDIS_IPA_ERROR + ("ipa_to_usb_hdl(%d) - not valid ipa handle\n", + ipa_to_usb_hdl); + return -EINVAL; + } + + if (ipa_pm_is_used()) + result = rndis_ipa_register_pm_client(rndis_ipa_ctx); + else + result = rndis_ipa_create_rm_resource(rndis_ipa_ctx); + if (result) { + RNDIS_IPA_ERROR("fail on RM create\n"); + goto fail_create_rm; + } + RNDIS_IPA_DEBUG("RM resource was created\n"); + + rndis_ipa_ctx->ipa_to_usb_hdl = ipa_to_usb_hdl; + rndis_ipa_ctx->usb_to_ipa_hdl = usb_to_ipa_hdl; + if (max_packet_number_to_dev > 1) + rndis_ipa_ctx->deaggregation_enable = true; + else + rndis_ipa_ctx->deaggregation_enable = false; + result = rndis_ipa_ep_registers_cfg + (usb_to_ipa_hdl, + ipa_to_usb_hdl, + max_xfer_size_bytes_to_dev, + max_xfer_size_bytes_to_host, + rndis_ipa_ctx->net->mtu, + rndis_ipa_ctx->deaggregation_enable, + rndis_ipa_ctx->is_vlan_mode); + if (result) { + RNDIS_IPA_ERROR("fail on ep cfg\n"); + goto fail; + } + RNDIS_IPA_DEBUG("end-points configured\n"); + + netif_stop_queue(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("netif_stop_queue() was called\n"); + + netif_carrier_on(rndis_ipa_ctx->net); + if (!netif_carrier_ok(rndis_ipa_ctx->net)) { + RNDIS_IPA_ERROR("netif_carrier_ok error\n"); + result = -EBUSY; + goto fail; + } + RNDIS_IPA_DEBUG("netif_carrier_on() was called\n"); + + rndis_msg = kzalloc(sizeof(*rndis_msg), GFP_KERNEL); + if (!rndis_msg) { + result = -ENOMEM; + goto fail; + } + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = ECM_CONNECT; + msg_meta.msg_len = sizeof(struct ipa_ecm_msg); + strlcpy(rndis_msg->name, rndis_ipa_ctx->net->name, + IPA_RESOURCE_NAME_MAX); + rndis_msg->ifindex = rndis_ipa_ctx->net->ifindex; + + result = ipa_send_msg(&msg_meta, rndis_msg, rndis_ipa_msg_free_cb); + if (result) { + RNDIS_IPA_ERROR("fail to send ECM_CONNECT for rndis\n"); + kfree(rndis_msg); + goto fail; + } + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, + RNDIS_IPA_CONNECT); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_ERROR("use init()/disconnect() before connect()\n"); + return -EPERM; + } + rndis_ipa_ctx->state = next_state; + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + + RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx); + + if (next_state == RNDIS_IPA_CONNECTED_AND_UP) + rndis_ipa_enable_data_path(rndis_ipa_ctx); + else + RNDIS_IPA_DEBUG("queue shall be started after open()\n"); + + pr_info("RNDIS_IPA NetDev pipes were connected\n"); + + RNDIS_IPA_LOG_EXIT(); + + return 0; + +fail: + if (ipa_pm_is_used()) + rndis_ipa_deregister_pm_client(rndis_ipa_ctx); + else + rndis_ipa_destroy_rm_resource(rndis_ipa_ctx); +fail_create_rm: + return result; +} +EXPORT_SYMBOL(rndis_ipa_pipe_connect_notify); + +/** + * rndis_ipa_open() - notify Linux network stack to start sending packets + * @net: the network interface supplied by the network stack + * + * Linux uses this API to notify the driver that the network interface + * transitions to the up state. + * The driver will instruct the Linux network stack to start + * delivering data packets. + * The driver internal state shall be changed to Up or Connected and Up + * + * Returns negative errno, or zero on success + */ +static int rndis_ipa_open(struct net_device *net) +{ + struct rndis_ipa_dev *rndis_ipa_ctx; + int next_state; + unsigned long flags; + + RNDIS_IPA_LOG_ENTRY(); + + rndis_ipa_ctx = netdev_priv(net); + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + + next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, RNDIS_IPA_OPEN); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_ERROR("can't bring driver up before initialize\n"); + return -EPERM; + } + + rndis_ipa_ctx->state = next_state; + + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + + RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx); + + if (next_state == RNDIS_IPA_CONNECTED_AND_UP) + rndis_ipa_enable_data_path(rndis_ipa_ctx); + else + RNDIS_IPA_DEBUG("queue shall be started after connect()\n"); + + pr_info("RNDIS_IPA NetDev was opened\n"); + + RNDIS_IPA_LOG_EXIT(); + + return 0; +} + +/** + * rndis_ipa_start_xmit() - send data from APPs to USB core via IPA core + * using SW path (Tx data path) + * Tx path for this Netdev is Apps-processor->IPA->USB + * @skb: packet received from Linux network stack destined for tethered PC + * @net: the network device being used to send this packet (rndis0) + * + * Several conditions needed in order to send the packet to IPA: + * - Transmit queue for the network driver is currently + * in "started" state + * - The driver internal state is in Connected and Up state. + * - Filters Tx switch are turned off + * - The IPA resource manager state for the driver producer client + * is "Granted" which implies that all the resources in the dependency + * graph are valid for data flow. + * - outstanding high boundary was not reached. + * + * In case the outstanding packets high boundary is reached, the driver will + * stop the send queue until enough packets are processed by + * the IPA core (based on calls to rndis_ipa_tx_complete_notify). + * + * In case all of the conditions are met, the network driver shall: + * - encapsulate the Ethernet packet with RNDIS header (REMOTE_NDIS_PACKET_MSG) + * - send the packet by using IPA Driver SW path (IP_PACKET_INIT) + * - Netdev status fields shall be updated based on the current Tx packet + * + * Returns NETDEV_TX_BUSY if retry should be made later, + * or NETDEV_TX_OK on success. + */ +static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb, + struct net_device *net) +{ + int ret; + netdev_tx_t status = NETDEV_TX_BUSY; + struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net); + + netif_trans_update(net); + + RNDIS_IPA_DEBUG_XMIT + ("Tx, len=%d, skb->protocol=%d, outstanding=%d\n", + skb->len, skb->protocol, + atomic_read(&rndis_ipa_ctx->outstanding_pkts)); + + if (unlikely(netif_queue_stopped(net))) { + RNDIS_IPA_ERROR("interface queue is stopped\n"); + goto out; + } + + if (unlikely(rndis_ipa_ctx->tx_dump_enable)) + rndis_ipa_dump_skb(skb); + + if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) { + RNDIS_IPA_ERROR("Missing pipe connected and/or iface up\n"); + return NETDEV_TX_BUSY; + } + + if (unlikely(tx_filter(skb))) { + dev_kfree_skb_any(skb); + RNDIS_IPA_DEBUG("packet got filtered out on Tx path\n"); + rndis_ipa_ctx->tx_dropped++; + status = NETDEV_TX_OK; + goto out; + } + + ret = resource_request(rndis_ipa_ctx); + if (ret) { + RNDIS_IPA_DEBUG("Waiting to resource\n"); + netif_stop_queue(net); + goto resource_busy; + } + + if (atomic_read(&rndis_ipa_ctx->outstanding_pkts) >= + rndis_ipa_ctx->outstanding_high) { + RNDIS_IPA_DEBUG("Outstanding high boundary reached (%d)\n", + rndis_ipa_ctx->outstanding_high); + netif_stop_queue(net); + RNDIS_IPA_DEBUG("send queue was stopped\n"); + status = NETDEV_TX_BUSY; + goto out; + } + + skb = rndis_encapsulate_skb(skb, rndis_ipa_ctx); + trace_rndis_tx_dp(skb->protocol); + ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL); + if (ret) { + RNDIS_IPA_ERROR("ipa transmit failed (%d)\n", ret); + goto fail_tx_packet; + } + + atomic_inc(&rndis_ipa_ctx->outstanding_pkts); + + status = NETDEV_TX_OK; + goto out; + +fail_tx_packet: + rndis_ipa_xmit_error(skb); +out: + resource_release(rndis_ipa_ctx); +resource_busy: + RNDIS_IPA_DEBUG + ("packet Tx done - %s\n", + (status == NETDEV_TX_OK) ? "OK" : "FAIL"); + + return status; +} + +/** + * rndis_ipa_tx_complete_notify() - notification for Netdev that the + * last packet was successfully sent + * @private: driver context stashed by IPA driver upon pipe connect + * @evt: event type (expected to be write-done event) + * @data: data provided with event (this is actually the skb that + * holds the sent packet) + * + * This function will be called on interrupt bottom halve deferred context. + * outstanding packets counter shall be decremented. + * Network stack send queue will be re-started in case low outstanding + * boundary is reached and queue was stopped before. + * At the end the skb shall be freed. + */ +static void rndis_ipa_tx_complete_notify( + void *private, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + struct rndis_ipa_dev *rndis_ipa_ctx = private; + int ret; + + ret = 0; + NULL_CHECK_RETVAL(private); + if (ret) + return; + + trace_rndis_status_rcvd(skb->protocol); + + RNDIS_IPA_DEBUG + ("Tx-complete, len=%d, skb->prot=%d, outstanding=%d\n", + skb->len, skb->protocol, + atomic_read(&rndis_ipa_ctx->outstanding_pkts)); + + if (unlikely((evt != IPA_WRITE_DONE))) { + RNDIS_IPA_ERROR("unsupported event on TX call-back\n"); + return; + } + + if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) { + RNDIS_IPA_DEBUG + ("dropping Tx-complete pkt, state=%s\n", + rndis_ipa_state_string(rndis_ipa_ctx->state)); + goto out; + } + + rndis_ipa_ctx->net->stats.tx_packets++; + rndis_ipa_ctx->net->stats.tx_bytes += skb->len; + + if (atomic_read(&rndis_ipa_ctx->outstanding_pkts) > 0) + atomic_dec(&rndis_ipa_ctx->outstanding_pkts); + + if + (netif_queue_stopped(rndis_ipa_ctx->net) && + netif_carrier_ok(rndis_ipa_ctx->net) && + atomic_read(&rndis_ipa_ctx->outstanding_pkts) < + (rndis_ipa_ctx->outstanding_low)) { + RNDIS_IPA_DEBUG("outstanding low boundary reached (%d)n", + rndis_ipa_ctx->outstanding_low); + netif_wake_queue(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("send queue was awaken\n"); + } + +out: + dev_kfree_skb_any(skb); +} + +static void rndis_ipa_tx_timeout(struct net_device *net) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net); + int outstanding = atomic_read(&rndis_ipa_ctx->outstanding_pkts); + + RNDIS_IPA_ERROR + ("possible IPA stall was detected, %d outstanding\n", + outstanding); + + net->stats.tx_errors++; +} + +/** + * rndis_ipa_rm_notify() - callback supplied to IPA resource manager + * for grant/release events + * user_data: the driver context supplied to IPA resource manager during call + * to ipa_rm_create_resource(). + * event: the event notified to us by IPA resource manager (Release/Grant) + * data: reserved field supplied by IPA resource manager + * + * This callback shall be called based on resource request/release sent + * to the IPA resource manager. + * In case the queue was stopped during EINPROGRESS for Tx path and the + * event received is Grant then the queue shall be restarted. + * In case the event notified is a release notification the netdev discard it. + */ +static void rndis_ipa_rm_notify( + void *user_data, enum ipa_rm_event event, + unsigned long data) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = user_data; + + RNDIS_IPA_LOG_ENTRY(); + + if (event == IPA_RM_RESOURCE_RELEASED) { + RNDIS_IPA_DEBUG("Resource Released\n"); + return; + } + + if (event != IPA_RM_RESOURCE_GRANTED) { + RNDIS_IPA_ERROR + ("Unexceoted event receieved from RM (%d\n)", event); + return; + } + RNDIS_IPA_DEBUG("Resource Granted\n"); + + if (netif_queue_stopped(rndis_ipa_ctx->net)) { + RNDIS_IPA_DEBUG("starting queue\n"); + netif_start_queue(rndis_ipa_ctx->net); + } else { + RNDIS_IPA_DEBUG("queue already awake\n"); + } + + RNDIS_IPA_LOG_EXIT(); +} + +/** + * rndis_ipa_packet_receive_notify() - Rx notify for packet sent from + * tethered PC (USB->IPA). + * is USB->IPA->Apps-processor + * @private: driver context + * @evt: event type + * @data: data provided with event + * + * Once IPA driver receives a packet from USB client this callback will be + * called from bottom-half interrupt handling context (ipa Rx workqueue). + * + * Packets that shall be sent to Apps processor may be of two types: + * 1) Packets that are destined for Apps (e.g: WEBSERVER running on Apps) + * 2) Exception packets that need special handling (based on IPA core + * configuration, e.g: new TCP session or any other packets that IPA core + * can't handle) + * If the next conditions are met, the packet shall be sent up to the + * Linux network stack: + * - Driver internal state is Connected and Up + * - Notification received from IPA driver meets the expected type + * for Rx packet + * -Filters Rx switch are turned off + * + * Prior to the sending to the network stack: + * - Netdev struct shall be stashed to the skb as required by the network stack + * - Ethernet header shall be removed (skb->data shall point to the Ethernet + * payload, Ethernet still stashed under MAC header). + * - The skb->pkt_protocol shall be set based on the ethernet destination + * address, Can be Broadcast, Multicast or Other-Host, The later + * pkt-types packets shall be dropped in case the Netdev is not + * in promisc mode. + * - Set the skb protocol field based on the EtherType field + * + * Netdev status fields shall be updated based on the current Rx packet + */ +static void rndis_ipa_packet_receive_notify( + void *private, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + struct rndis_ipa_dev *rndis_ipa_ctx = private; + int result; + unsigned int packet_len = skb->len; + + RNDIS_IPA_DEBUG + ("packet Rx, len=%d\n", + skb->len); + + if (unlikely(rndis_ipa_ctx->rx_dump_enable)) + rndis_ipa_dump_skb(skb); + + if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) { + RNDIS_IPA_DEBUG("use connect()/up() before receive()\n"); + RNDIS_IPA_DEBUG("packet dropped (length=%d)\n", + skb->len); + return; + } + + if (evt != IPA_RECEIVE) { + RNDIS_IPA_ERROR("a none IPA_RECEIVE event in driver RX\n"); + return; + } + + if (!rndis_ipa_ctx->deaggregation_enable) + skb_pull(skb, sizeof(struct rndis_pkt_hdr)); + + skb->dev = rndis_ipa_ctx->net; + skb->protocol = eth_type_trans(skb, rndis_ipa_ctx->net); + + if (rx_filter(skb)) { + RNDIS_IPA_DEBUG("packet got filtered out on RX path\n"); + rndis_ipa_ctx->rx_dropped++; + dev_kfree_skb_any(skb); + return; + } + + trace_rndis_netif_ni(skb->protocol); + result = netif_rx_ni(skb); + if (result) + RNDIS_IPA_ERROR("fail on netif_rx_ni\n"); + rndis_ipa_ctx->net->stats.rx_packets++; + rndis_ipa_ctx->net->stats.rx_bytes += packet_len; +} + +/** rndis_ipa_stop() - notify the network interface to stop + * sending/receiving data + * @net: the network device being stopped. + * + * This API is used by Linux network stack to notify the network driver that + * its state was changed to "down" + * The driver will stop the "send" queue and change its internal + * state to "Connected". + * The Netdev shall be returned to be "Up" after rndis_ipa_open(). + */ +static int rndis_ipa_stop(struct net_device *net) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net); + int next_state; + unsigned long flags; + + RNDIS_IPA_LOG_ENTRY(); + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + + next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, RNDIS_IPA_STOP); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_DEBUG("can't do network interface down without up\n"); + return -EPERM; + } + + rndis_ipa_ctx->state = next_state; + + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + + netif_stop_queue(net); + pr_info("RNDIS_IPA NetDev queue is stopped\n"); + + RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx); + + RNDIS_IPA_LOG_EXIT(); + + return 0; +} + +/** rndis_ipa_disconnect() - notify rndis_ipa Netdev that the USB pipes + * were disconnected + * @private: same value that was set by init(), this parameter holds the + * network device pointer. + * + * USB shall notify the Netdev after disconnecting the pipe. + * - The internal driver state shall returned to its previous + * state (Up or Initialized). + * - Linux network stack shall be informed for carrier off to notify + * user space for pipe disconnect + * - send queue shall be stopped + * During the transition between the pipe disconnection to + * the Netdev notification packets + * are expected to be dropped by IPA driver or IPA core. + */ +int rndis_ipa_pipe_disconnect_notify(void *private) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = private; + int next_state; + int outstanding_dropped_pkts; + int retval; + int ret; + unsigned long flags; + struct ipa_ecm_msg *rndis_msg; + struct ipa_msg_meta msg_meta; + + RNDIS_IPA_LOG_ENTRY(); + + ret = 0; + NULL_CHECK_RETVAL(rndis_ipa_ctx); + if (ret) + return ret; + RNDIS_IPA_DEBUG("private=0x%pK\n", private); + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + + next_state = rndis_ipa_next_state + (rndis_ipa_ctx->state, + RNDIS_IPA_DISCONNECT); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_ERROR("can't disconnect before connect\n"); + return -EPERM; + } + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + + if (rndis_ipa_ctx->during_xmit_error) { + RNDIS_IPA_DEBUG("canceling xmit-error delayed work\n"); + cancel_delayed_work_sync( + &rndis_ipa_ctx->xmit_error_delayed_work); + rndis_ipa_ctx->during_xmit_error = false; + } + + netif_carrier_off(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("carrier_off notification was sent\n"); + + rndis_msg = kzalloc(sizeof(*rndis_msg), GFP_KERNEL); + if (!rndis_msg) + return -ENOMEM; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = ECM_DISCONNECT; + msg_meta.msg_len = sizeof(struct ipa_ecm_msg); + strlcpy(rndis_msg->name, rndis_ipa_ctx->net->name, + IPA_RESOURCE_NAME_MAX); + rndis_msg->ifindex = rndis_ipa_ctx->net->ifindex; + + retval = ipa_send_msg(&msg_meta, rndis_msg, rndis_ipa_msg_free_cb); + if (retval) { + RNDIS_IPA_ERROR("fail to send ECM_DISCONNECT for rndis\n"); + kfree(rndis_msg); + return -EPERM; + } + + netif_stop_queue(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("queue stopped\n"); + + outstanding_dropped_pkts = + atomic_read(&rndis_ipa_ctx->outstanding_pkts); + + rndis_ipa_ctx->net->stats.tx_dropped += outstanding_dropped_pkts; + atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0); + + if (ipa_pm_is_used()) + retval = rndis_ipa_deregister_pm_client(rndis_ipa_ctx); + else + retval = rndis_ipa_destroy_rm_resource(rndis_ipa_ctx); + if (retval) { + RNDIS_IPA_ERROR("Fail to clean RM\n"); + return retval; + } + RNDIS_IPA_DEBUG("RM was successfully destroyed\n"); + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, + RNDIS_IPA_DISCONNECT); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_ERROR("can't disconnect before connect\n"); + return -EPERM; + } + rndis_ipa_ctx->state = next_state; + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + + RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx); + + pr_info("RNDIS_IPA NetDev pipes disconnected (%d outstanding clr)\n", + outstanding_dropped_pkts); + + RNDIS_IPA_LOG_EXIT(); + + return 0; +} +EXPORT_SYMBOL(rndis_ipa_pipe_disconnect_notify); + +/** + * rndis_ipa_cleanup() - unregister the network interface driver and free + * internal data structs. + * @private: same value that was set by init(), this + * parameter holds the network device pointer. + * + * This function shall be called once the network interface is not + * needed anymore, e.g: when the USB composition does not support it. + * This function shall be called after the pipes were disconnected. + * Detailed description: + * - remove header-insertion headers from IPA core + * - delete the driver dependency defined for IPA resource manager and + * destroy the producer resource. + * - remove the debugfs entries + * - deregister the network interface from Linux network stack + * - free all internal data structs + * + * It is assumed that no packets shall be sent through HW bridging + * during cleanup to avoid packets trying to add an header that is + * removed during cleanup (IPA configuration manager should have + * removed them at this point) + */ +void rndis_ipa_cleanup(void *private) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = private; + int next_state; + int ret; + unsigned long flags; + + RNDIS_IPA_LOG_ENTRY(); + + RNDIS_IPA_DEBUG("private=0x%pK\n", private); + + ret = 0; + NULL_CHECK_RETVAL(rndis_ipa_ctx); + if (ret) + return; + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + next_state = rndis_ipa_next_state + (rndis_ipa_ctx->state, + RNDIS_IPA_CLEANUP); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_ERROR("use disconnect()before clean()\n"); + return; + } + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + + RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx); + + ret = rndis_ipa_deregister_properties(rndis_ipa_ctx->net->name); + if (ret) { + RNDIS_IPA_ERROR("Fail to deregister Tx/Rx properties\n"); + return; + } + RNDIS_IPA_DEBUG("deregister Tx/Rx properties was successful\n"); + + ret = rndis_ipa_hdrs_destroy(rndis_ipa_ctx); + if (ret) + RNDIS_IPA_ERROR( + "Failed removing RNDIS headers from IPA core. Continue anyway\n"); + else + RNDIS_IPA_DEBUG("RNDIS headers were removed from IPA core\n"); + + rndis_ipa_debugfs_destroy(rndis_ipa_ctx); + RNDIS_IPA_DEBUG("debugfs remove was done\n"); + + unregister_netdev(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("netdev unregistered\n"); + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, + RNDIS_IPA_CLEANUP); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_ERROR("use disconnect()before clean()\n"); + return; + } + rndis_ipa_ctx->state = next_state; + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + free_netdev(rndis_ipa_ctx->net); + pr_info("RNDIS_IPA NetDev was cleaned\n"); + + RNDIS_IPA_LOG_EXIT(); +} +EXPORT_SYMBOL(rndis_ipa_cleanup); + +static void rndis_ipa_enable_data_path(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + if (rndis_ipa_ctx->device_ready_notify) { + rndis_ipa_ctx->device_ready_notify(); + RNDIS_IPA_DEBUG("USB device_ready_notify() was called\n"); + } else { + RNDIS_IPA_DEBUG("device_ready_notify() not supplied\n"); + } + + netif_start_queue(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("netif_start_queue() was called\n"); +} + +static void rndis_ipa_xmit_error(struct sk_buff *skb) +{ + bool retval; + struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev); + unsigned long delay_jiffies; + u8 rand_dealy_msec; + + RNDIS_IPA_LOG_ENTRY(); + + RNDIS_IPA_DEBUG("starting Tx-queue backoff\n"); + + netif_stop_queue(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("netif_stop_queue was called\n"); + + skb_pull(skb, sizeof(rndis_template_hdr)); + rndis_ipa_ctx->net->stats.tx_errors++; + + get_random_bytes(&rand_dealy_msec, sizeof(rand_dealy_msec)); + delay_jiffies = msecs_to_jiffies( + rndis_ipa_ctx->error_msec_sleep_time + rand_dealy_msec); + + retval = schedule_delayed_work( + &rndis_ipa_ctx->xmit_error_delayed_work, delay_jiffies); + if (!retval) { + RNDIS_IPA_ERROR("fail to schedule delayed work\n"); + netif_start_queue(rndis_ipa_ctx->net); + } else { + RNDIS_IPA_DEBUG + ("work scheduled to start Tx-queue in %d msec\n", + rndis_ipa_ctx->error_msec_sleep_time + + rand_dealy_msec); + rndis_ipa_ctx->during_xmit_error = true; + } + + RNDIS_IPA_LOG_EXIT(); +} + +static void rndis_ipa_xmit_error_aftercare_wq(struct work_struct *work) +{ + struct rndis_ipa_dev *rndis_ipa_ctx; + struct delayed_work *delayed_work; + + RNDIS_IPA_LOG_ENTRY(); + + RNDIS_IPA_DEBUG("Starting queue after xmit error\n"); + + delayed_work = to_delayed_work(work); + rndis_ipa_ctx = container_of + (delayed_work, struct rndis_ipa_dev, + xmit_error_delayed_work); + + if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) { + RNDIS_IPA_ERROR + ("error aftercare handling in bad state (%d)", + rndis_ipa_ctx->state); + return; + } + + rndis_ipa_ctx->during_xmit_error = false; + + netif_start_queue(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("netif_start_queue() was called\n"); + + RNDIS_IPA_LOG_EXIT(); +} + +/** + * rndis_ipa_prepare_header_insertion() - prepare the header insertion request + * for IPA driver + * eth_type: the Ethernet type for this header-insertion header + * hdr_name: string that shall represent this header in IPA data base + * add_hdr: output for caller to be used with ipa_add_hdr() to configure + * the IPA core + * dst_mac: tethered PC MAC (Ethernet) address to be added to packets + * for IPA->USB pipe + * src_mac: device MAC (Ethernet) address to be added to packets + * for IPA->USB pipe + * is_vlan_mode: should driver work in vlan mode? + * + * This function shall build the header-insertion block request for a + * single Ethernet+RNDIS header) + * this header shall be inserted for packets processed by IPA + * and destined for USB client. + * This header shall be used for HW bridging for packets destined for + * tethered PC. + * For SW data-path, this header won't be used. + */ +static void rndis_ipa_prepare_header_insertion( + int eth_type, + const char *hdr_name, struct ipa_hdr_add *add_hdr, + const void *dst_mac, const void *src_mac, bool is_vlan_mode) +{ + struct ethhdr *eth_hdr; + struct vlan_ethhdr *eth_vlan_hdr; + + add_hdr->hdr_len = sizeof(rndis_template_hdr); + add_hdr->is_partial = false; + strlcpy(add_hdr->name, hdr_name, IPA_RESOURCE_NAME_MAX); + + memcpy(add_hdr->hdr, &rndis_template_hdr, sizeof(rndis_template_hdr)); + add_hdr->is_eth2_ofst_valid = true; + add_hdr->eth2_ofst = sizeof(rndis_template_hdr); + + if (is_vlan_mode) { + eth_vlan_hdr = (struct vlan_ethhdr *)(add_hdr->hdr + + sizeof(rndis_template_hdr)); + memcpy(eth_vlan_hdr->h_dest, dst_mac, ETH_ALEN); + memcpy(eth_vlan_hdr->h_source, src_mac, ETH_ALEN); + eth_vlan_hdr->h_vlan_encapsulated_proto = htons(eth_type); + eth_vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); + add_hdr->hdr_len += VLAN_ETH_HLEN; + add_hdr->type = IPA_HDR_L2_802_1Q; + } else { + eth_hdr = (struct ethhdr *)(add_hdr->hdr + + sizeof(rndis_template_hdr)); + memcpy(eth_hdr->h_dest, dst_mac, ETH_ALEN); + memcpy(eth_hdr->h_source, src_mac, ETH_ALEN); + eth_hdr->h_proto = htons(eth_type); + add_hdr->hdr_len += ETH_HLEN; + add_hdr->type = IPA_HDR_L2_ETHERNET_II; + } +} + +/** + * rndis_ipa_hdrs_cfg() - configure header insertion block in IPA core + * to allow HW bridging + * @rndis_ipa_ctx: main driver context + * @dst_mac: destination MAC address (tethered PC) + * @src_mac: source MAC address (MDM device) + * + * This function shall add 2 headers. + * One header for Ipv4 and one header for Ipv6. + * Both headers shall contain Ethernet header and RNDIS header, the only + * difference shall be in the EtherTye field. + * Headers will be committed to HW + * + * Returns negative errno, or zero on success + */ +static int rndis_ipa_hdrs_cfg( + struct rndis_ipa_dev *rndis_ipa_ctx, + const void *dst_mac, const void *src_mac) +{ + struct ipa_ioc_add_hdr *hdrs; + struct ipa_hdr_add *ipv4_hdr; + struct ipa_hdr_add *ipv6_hdr; + int result = 0; + + RNDIS_IPA_LOG_ENTRY(); + + hdrs = kzalloc + (sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr), + GFP_KERNEL); + if (!hdrs) { + result = -ENOMEM; + goto fail_mem; + } + + ipv4_hdr = &hdrs->hdr[0]; + ipv6_hdr = &hdrs->hdr[1]; + rndis_ipa_prepare_header_insertion + (ETH_P_IP, IPV4_HDR_NAME, + ipv4_hdr, dst_mac, src_mac, rndis_ipa_ctx->is_vlan_mode); + rndis_ipa_prepare_header_insertion + (ETH_P_IPV6, IPV6_HDR_NAME, + ipv6_hdr, dst_mac, src_mac, rndis_ipa_ctx->is_vlan_mode); + + hdrs->commit = 1; + hdrs->num_hdrs = 2; + result = ipa_add_hdr(hdrs); + if (result) { + RNDIS_IPA_ERROR("Fail on Header-Insertion(%d)\n", result); + goto fail_add_hdr; + } + if (ipv4_hdr->status) { + RNDIS_IPA_ERROR("Fail on Header-Insertion ipv4(%d)\n", + ipv4_hdr->status); + result = ipv4_hdr->status; + goto fail_add_hdr; + } + if (ipv6_hdr->status) { + RNDIS_IPA_ERROR("Fail on Header-Insertion ipv6(%d)\n", + ipv6_hdr->status); + result = ipv6_hdr->status; + goto fail_add_hdr; + } + rndis_ipa_ctx->eth_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl; + rndis_ipa_ctx->eth_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl; + + RNDIS_IPA_LOG_EXIT(); + +fail_add_hdr: + kfree(hdrs); +fail_mem: + return result; +} + +/** + * rndis_ipa_hdrs_destroy() - remove the IPA core configuration done for + * the driver data path bridging. + * @rndis_ipa_ctx: the driver context + * + * Revert the work done on rndis_ipa_hdrs_cfg(), which is, + * remove 2 headers for Ethernet+RNDIS. + */ +static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + struct ipa_ioc_del_hdr *del_hdr; + struct ipa_hdr_del *ipv4; + struct ipa_hdr_del *ipv6; + int result; + + del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) + + sizeof(*ipv6), GFP_KERNEL); + if (!del_hdr) + return -ENOMEM; + + del_hdr->commit = 1; + del_hdr->num_hdls = 2; + + ipv4 = &del_hdr->hdl[0]; + ipv4->hdl = rndis_ipa_ctx->eth_ipv4_hdr_hdl; + ipv6 = &del_hdr->hdl[1]; + ipv6->hdl = rndis_ipa_ctx->eth_ipv6_hdr_hdl; + + result = ipa_del_hdr(del_hdr); + if (result || ipv4->status || ipv6->status) + RNDIS_IPA_ERROR("ipa_del_hdr failed\n"); + else + RNDIS_IPA_DEBUG("hdrs deletion done\n"); + + kfree(del_hdr); + return result; +} + +static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net) +{ + return &net->stats; +} + +/** + * rndis_ipa_register_properties() - set Tx/Rx properties needed + * by IPA configuration manager + * @netdev_name: a string with the name of the network interface device + * @is_vlan_mode: should driver work in vlan mode? + * + * Register Tx/Rx properties to allow user space configuration (IPA + * Configuration Manager): + * + * - Two Tx properties (IPA->USB): specify the header names and pipe number + * that shall be used by user space for header-addition configuration + * for ipv4/ipv6 packets flowing from IPA to USB for HW bridging data. + * That header-addition header is added by the Netdev and used by user + * space to close the the HW bridge by adding filtering and routing rules + * that point to this header. + * + * - Two Rx properties (USB->IPA): these properties shall be used by user space + * to configure the IPA core to identify the packets destined + * for Apps-processor by configuring the unicast rules destined for + * the Netdev IP address. + * This rules shall be added based on the attribute mask supplied at + * this function, that is, always hit rule. + */ +static int rndis_ipa_register_properties(char *netdev_name, bool is_vlan_mode) +{ + struct ipa_tx_intf tx_properties = {0}; + struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} }; + struct ipa_ioc_tx_intf_prop *ipv4_property; + struct ipa_ioc_tx_intf_prop *ipv6_property; + struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} }; + struct ipa_rx_intf rx_properties = {0}; + struct ipa_ioc_rx_intf_prop *rx_ipv4_property; + struct ipa_ioc_rx_intf_prop *rx_ipv6_property; + enum ipa_hdr_l2_type hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + int result = 0; + + RNDIS_IPA_LOG_ENTRY(); + + if (is_vlan_mode) + hdr_l2_type = IPA_HDR_L2_802_1Q; + + tx_properties.prop = properties; + ipv4_property = &tx_properties.prop[0]; + ipv4_property->ip = IPA_IP_v4; + ipv4_property->dst_pipe = IPA_TO_USB_CLIENT; + strlcpy + (ipv4_property->hdr_name, IPV4_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + ipv4_property->hdr_l2_type = hdr_l2_type; + ipv6_property = &tx_properties.prop[1]; + ipv6_property->ip = IPA_IP_v6; + ipv6_property->dst_pipe = IPA_TO_USB_CLIENT; + strlcpy + (ipv6_property->hdr_name, IPV6_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + ipv6_property->hdr_l2_type = hdr_l2_type; + tx_properties.num_props = 2; + + rx_properties.prop = rx_ioc_properties; + rx_ipv4_property = &rx_properties.prop[0]; + rx_ipv4_property->ip = IPA_IP_v4; + rx_ipv4_property->attrib.attrib_mask = 0; + rx_ipv4_property->src_pipe = IPA_CLIENT_USB_PROD; + rx_ipv4_property->hdr_l2_type = hdr_l2_type; + rx_ipv6_property = &rx_properties.prop[1]; + rx_ipv6_property->ip = IPA_IP_v6; + rx_ipv6_property->attrib.attrib_mask = 0; + rx_ipv6_property->src_pipe = IPA_CLIENT_USB_PROD; + rx_ipv6_property->hdr_l2_type = hdr_l2_type; + rx_properties.num_props = 2; + + result = ipa_register_intf("rndis0", &tx_properties, &rx_properties); + if (result) + RNDIS_IPA_ERROR("fail on Tx/Rx properties registration\n"); + else + RNDIS_IPA_DEBUG("Tx/Rx properties registration done\n"); + + RNDIS_IPA_LOG_EXIT(); + + return result; +} + +/** + * rndis_ipa_deregister_properties() - remove the 2 Tx and 2 Rx properties + * @netdev_name: a string with the name of the network interface device + * + * This function revert the work done on rndis_ipa_register_properties(). + */ +static int rndis_ipa_deregister_properties(char *netdev_name) +{ + int result; + + RNDIS_IPA_LOG_ENTRY(); + + result = ipa_deregister_intf(netdev_name); + if (result) { + RNDIS_IPA_DEBUG("Fail on Tx prop deregister\n"); + return result; + } + RNDIS_IPA_LOG_EXIT(); + + return 0; +} + +/** + * rndis_ipa_create_rm_resource() -creates the resource representing + * this Netdev and supply notification callback for resource event + * such as Grant/Release + * @rndis_ipa_ctx: this driver context + * + * In order make sure all needed resources are available during packet + * transmit this Netdev shall use Request/Release mechanism of + * the IPA resource manager. + * This mechanism shall iterate over a dependency graph and make sure + * all dependent entities are ready to for packet Tx + * transfer (Apps->IPA->USB). + * In this function the resource representing the Netdev is created + * in addition to the basic dependency between the Netdev and the USB client. + * Hence, USB client, is a dependency for the Netdev and may be notified in + * case of packet transmit from this Netdev to tethered Host. + * As implied from the "may" in the above sentence there is a scenario where + * the USB is not notified. This is done thanks to the IPA resource manager + * inactivity timer. + * The inactivity timer allow the Release requests to be delayed in order + * prevent ping-pong with the USB and other dependencies. + */ +static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + struct ipa_rm_create_params create_params = {0}; + struct ipa_rm_perf_profile profile; + int result; + + RNDIS_IPA_LOG_ENTRY(); + + create_params.name = DRV_RESOURCE_ID; + create_params.reg_params.user_data = rndis_ipa_ctx; + create_params.reg_params.notify_cb = rndis_ipa_rm_notify; + result = ipa_rm_create_resource(&create_params); + if (result) { + RNDIS_IPA_ERROR("Fail on ipa_rm_create_resource\n"); + goto fail_rm_create; + } + RNDIS_IPA_DEBUG("RM client was created\n"); + + profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; + ipa_rm_set_perf_profile(DRV_RESOURCE_ID, &profile); + + result = ipa_rm_inactivity_timer_init + (DRV_RESOURCE_ID, + INACTIVITY_MSEC_DELAY); + if (result) { + RNDIS_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n"); + goto fail_inactivity_timer; + } + + RNDIS_IPA_DEBUG("rm_it client was created\n"); + + result = ipa_rm_add_dependency_sync + (DRV_RESOURCE_ID, + IPA_RM_RESOURCE_USB_CONS); + + if (result && result != -EINPROGRESS) + RNDIS_IPA_ERROR("unable to add RNDIS/USB dependency (%d)\n", + result); + else + RNDIS_IPA_DEBUG("RNDIS/USB dependency was set\n"); + + result = ipa_rm_add_dependency_sync + (IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (result && result != -EINPROGRESS) + RNDIS_IPA_ERROR("unable to add USB/APPS dependency (%d)\n", + result); + else + RNDIS_IPA_DEBUG("USB/APPS dependency was set\n"); + + RNDIS_IPA_LOG_EXIT(); + + return 0; + +fail_inactivity_timer: +fail_rm_create: + return result; +} + +static void rndis_ipa_pm_cb(void *p, enum ipa_pm_cb_event event) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = p; + + RNDIS_IPA_LOG_ENTRY(); + + if (event != IPA_PM_CLIENT_ACTIVATED) { + RNDIS_IPA_ERROR("unexpected event %d\n", event); + WARN_ON(1); + return; + } + RNDIS_IPA_DEBUG("Resource Granted\n"); + + if (netif_queue_stopped(rndis_ipa_ctx->net)) { + RNDIS_IPA_DEBUG("starting queue\n"); + netif_start_queue(rndis_ipa_ctx->net); + } else { + RNDIS_IPA_DEBUG("queue already awake\n"); + } + + RNDIS_IPA_LOG_EXIT(); +} + +/** + * rndis_ipa_destroy_rm_resource() - delete the dependency and destroy + * the resource done on rndis_ipa_create_rm_resource() + * @rndis_ipa_ctx: this driver context + * + * This function shall delete the dependency create between + * the Netdev to the USB. + * In addition the inactivity time shall be destroy and the resource shall + * be deleted. + */ +static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + int result; + + RNDIS_IPA_LOG_ENTRY(); + + result = ipa_rm_delete_dependency + (DRV_RESOURCE_ID, + IPA_RM_RESOURCE_USB_CONS); + if (result && result != -EINPROGRESS) { + RNDIS_IPA_ERROR("Fail to delete RNDIS/USB dependency\n"); + goto bail; + } + RNDIS_IPA_DEBUG("RNDIS/USB dependency was successfully deleted\n"); + + result = ipa_rm_delete_dependency + (IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (result == -EINPROGRESS) { + RNDIS_IPA_DEBUG("RM dependency deletion is in progress"); + } else if (result) { + RNDIS_IPA_ERROR("Fail to delete USB/APPS dependency\n"); + goto bail; + } else { + RNDIS_IPA_DEBUG("USB/APPS dependency was deleted\n"); + } + + result = ipa_rm_inactivity_timer_destroy(DRV_RESOURCE_ID); + if (result) { + RNDIS_IPA_ERROR("Fail to destroy inactivity timern"); + goto bail; + } + RNDIS_IPA_DEBUG("RM inactivity timer was successfully destroy\n"); + + result = ipa_rm_delete_resource(DRV_RESOURCE_ID); + if (result) { + RNDIS_IPA_ERROR("resource deletion failed\n"); + goto bail; + } + RNDIS_IPA_DEBUG + ("Netdev RM resource was deleted (resid:%d)\n", + DRV_RESOURCE_ID); + + RNDIS_IPA_LOG_EXIT(); + +bail: + return result; +} + +static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + int result; + struct ipa_pm_register_params pm_reg; + + memset(&pm_reg, 0, sizeof(pm_reg)); + + pm_reg.name = rndis_ipa_ctx->net->name; + pm_reg.user_data = rndis_ipa_ctx; + pm_reg.callback = rndis_ipa_pm_cb; + pm_reg.group = IPA_PM_GROUP_APPS; + result = ipa_pm_register(&pm_reg, &rndis_ipa_ctx->pm_hdl); + if (result) { + RNDIS_IPA_ERROR("failed to create IPA PM client %d\n", result); + return result; + } + return 0; +} + +static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + ipa_pm_deactivate_sync(rndis_ipa_ctx->pm_hdl); + ipa_pm_deregister(rndis_ipa_ctx->pm_hdl); + rndis_ipa_ctx->pm_hdl = ~0; + return 0; +} + +/** + * resource_request() - request for the Netdev resource + * @rndis_ipa_ctx: main driver context + * + * This function shall send the IPA resource manager inactivity time a request + * to Grant the Netdev producer. + * In case the resource is already Granted the function shall return immediately + * and "pet" the inactivity timer. + * In case the resource was not already Granted this function shall + * return EINPROGRESS and the Netdev shall stop the send queue until + * the IPA resource manager notify it that the resource is + * granted (done in a differ context) + */ +static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + int result = 0; + + if (!rm_enabled(rndis_ipa_ctx)) + return result; + + if (ipa_pm_is_used()) + return ipa_pm_activate(rndis_ipa_ctx->pm_hdl); + + return ipa_rm_inactivity_timer_request_resource( + DRV_RESOURCE_ID); + +} + +/** + * resource_release() - release the Netdev resource + * @rndis_ipa_ctx: main driver context + * + * start the inactivity timer count down.by using the IPA resource + * manager inactivity time. + * The actual resource release shall occur only if no request shall be done + * during the INACTIVITY_MSEC_DELAY. + */ +static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + if (!rm_enabled(rndis_ipa_ctx)) + return; + if (ipa_pm_is_used()) + ipa_pm_deferred_deactivate(rndis_ipa_ctx->pm_hdl); + else + ipa_rm_inactivity_timer_release_resource(DRV_RESOURCE_ID); +} + +/** + * rndis_encapsulate_skb() - encapsulate the given Ethernet skb with + * an RNDIS header + * @skb: packet to be encapsulated with the RNDIS header + * @rndis_ipa_ctx: main driver context + * + * Shall use a template header for RNDIS and update it with the given + * skb values. + * Ethernet is expected to be already encapsulate the packet. + */ +static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb, + struct rndis_ipa_dev *rndis_ipa_ctx) +{ + struct rndis_pkt_hdr *rndis_hdr; + int payload_byte_len = skb->len; + + /* if there is no room in this skb, allocate a new one */ + if (unlikely(skb_headroom(skb) < sizeof(rndis_template_hdr))) { + struct sk_buff *new_skb = skb_copy_expand(skb, + sizeof(rndis_template_hdr), 0, GFP_ATOMIC); + if (!new_skb) { + RNDIS_IPA_ERROR("no memory for skb expand\n"); + return skb; + } + RNDIS_IPA_DEBUG("skb expanded. old %pK new %pK\n", + skb, new_skb); + dev_kfree_skb_any(skb); + skb = new_skb; + } + + if (rndis_ipa_ctx->is_vlan_mode) + if (unlikely(skb->protocol != htons(ETH_P_8021Q))) + RNDIS_IPA_DEBUG( + "ether_type != ETH_P_8021Q && vlan, prot = 0x%X\n" + , skb->protocol); + + /* make room at the head of the SKB to put the RNDIS header */ + rndis_hdr = (struct rndis_pkt_hdr *)skb_push(skb, + sizeof(rndis_template_hdr)); + + memcpy(rndis_hdr, &rndis_template_hdr, sizeof(*rndis_hdr)); + rndis_hdr->msg_len += payload_byte_len; + rndis_hdr->data_len += payload_byte_len; + + return skb; +} + +/** + * rx_filter() - logic that decide if the current skb is to be filtered out + * @skb: skb that may be sent up to the network stack + * + * This function shall do Rx packet filtering on the Netdev level. + */ +static bool rx_filter(struct sk_buff *skb) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev); + + return rndis_ipa_ctx->rx_filter; +} + +/** + * tx_filter() - logic that decide if the current skb is to be filtered out + * @skb: skb that may be sent to the USB core + * + * This function shall do Tx packet filtering on the Netdev level. + * ICMP filter bypass is possible to allow only ICMP packet to be + * sent (pings and etc) + */ + +static bool tx_filter(struct sk_buff *skb) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev); + bool is_icmp; + + if (likely(!rndis_ipa_ctx->tx_filter)) + return false; + + is_icmp = (skb->protocol == htons(ETH_P_IP) && + ip_hdr(skb)->protocol == IPPROTO_ICMP); + + if ((!rndis_ipa_ctx->icmp_filter) && is_icmp) + return false; + + return true; +} + +/** + * rm_enabled() - allow the use of resource manager Request/Release to + * be bypassed + * @rndis_ipa_ctx: main driver context + * + * By disabling the resource manager flag the Request for the Netdev resource + * shall be bypassed and the packet shall be sent. + * accordingly, Release request shall be bypass as well. + */ +static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + return rndis_ipa_ctx->rm_enable; +} + +/** + * rndis_ipa_ep_registers_cfg() - configure the USB endpoints + * @usb_to_ipa_hdl: handle received from ipa_connect which represents + * the USB to IPA end-point + * @ipa_to_usb_hdl: handle received from ipa_connect which represents + * the IPA to USB end-point + * @max_xfer_size_bytes_to_dev: the maximum size, in bytes, that the device + * expects to receive from the host. supplied on REMOTE_NDIS_INITIALIZE_CMPLT. + * @max_xfer_size_bytes_to_host: the maximum size, in bytes, that the host + * expects to receive from the device. supplied on REMOTE_NDIS_INITIALIZE_MSG. + * @mtu: the netdev MTU size, in bytes + * @deaggr_enable: should deaggregation be enabled? + * @is_vlan_mode: should driver work in vlan mode? + * + * USB to IPA pipe: + * - de-aggregation + * - Remove Ethernet header + * - Remove RNDIS header + * - SRC NAT + * - Default routing(0) + * IPA to USB Pipe: + * - aggregation + * - Add Ethernet header + * - Add RNDIS header + */ +static int rndis_ipa_ep_registers_cfg( + u32 usb_to_ipa_hdl, + u32 ipa_to_usb_hdl, + u32 max_xfer_size_bytes_to_dev, + u32 max_xfer_size_bytes_to_host, + u32 mtu, + bool deaggr_enable, + bool is_vlan_mode) +{ + int result; + struct ipa_ep_cfg *usb_to_ipa_ep_cfg; + int add = 0; + + if (deaggr_enable) { + usb_to_ipa_ep_cfg = &usb_to_ipa_ep_cfg_deaggr_en; + RNDIS_IPA_DEBUG("deaggregation enabled\n"); + } else { + usb_to_ipa_ep_cfg = &usb_to_ipa_ep_cfg_deaggr_dis; + RNDIS_IPA_DEBUG("deaggregation disabled\n"); + add = sizeof(struct rndis_pkt_hdr); + } + + if (is_vlan_mode) { + usb_to_ipa_ep_cfg->hdr.hdr_len = + VLAN_ETH_HLEN + add; + ipa_to_usb_ep_cfg.hdr.hdr_len = + VLAN_ETH_HLEN + sizeof(struct rndis_pkt_hdr); + ipa_to_usb_ep_cfg.hdr.hdr_additional_const_len = VLAN_ETH_HLEN; + } else { + usb_to_ipa_ep_cfg->hdr.hdr_len = + ETH_HLEN + add; + ipa_to_usb_ep_cfg.hdr.hdr_len = + ETH_HLEN + sizeof(struct rndis_pkt_hdr); + ipa_to_usb_ep_cfg.hdr.hdr_additional_const_len = ETH_HLEN; + } + + usb_to_ipa_ep_cfg->deaggr.max_packet_len = max_xfer_size_bytes_to_dev; + result = ipa_cfg_ep(usb_to_ipa_hdl, usb_to_ipa_ep_cfg); + if (result) { + pr_err("failed to configure USB to IPA point\n"); + return result; + } + RNDIS_IPA_DEBUG("IPA<-USB end-point configured\n"); + + ipa_to_usb_ep_cfg.aggr.aggr_byte_limit = + (max_xfer_size_bytes_to_host - mtu) / 1024; + + if (ipa_to_usb_ep_cfg.aggr.aggr_byte_limit == 0) { + ipa_to_usb_ep_cfg.aggr.aggr_time_limit = 0; + ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit = 1; + } else { + ipa_to_usb_ep_cfg.aggr.aggr_time_limit = + DEFAULT_AGGR_TIME_LIMIT; + ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit = + DEFAULT_AGGR_PKT_LIMIT; + } + + RNDIS_IPA_DEBUG( + "RNDIS aggregation param: en=%d byte_limit=%d time_limit=%d pkt_limit=%d\n" + , ipa_to_usb_ep_cfg.aggr.aggr_en, + ipa_to_usb_ep_cfg.aggr.aggr_byte_limit, + ipa_to_usb_ep_cfg.aggr.aggr_time_limit, + ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit); + + /* enable hdr_metadata_reg_valid */ + usb_to_ipa_ep_cfg->hdr.hdr_metadata_reg_valid = true; + + result = ipa_cfg_ep(ipa_to_usb_hdl, &ipa_to_usb_ep_cfg); + if (result) { + pr_err("failed to configure IPA to USB end-point\n"); + return result; + } + RNDIS_IPA_DEBUG("IPA->USB end-point configured\n"); + + return 0; +} + +/** + * rndis_ipa_set_device_ethernet_addr() - set device Ethernet address + * @dev_ethaddr: device Ethernet address + * + * Returns 0 for success, negative otherwise + */ +static int rndis_ipa_set_device_ethernet_addr( + u8 *dev_ethaddr, + u8 device_ethaddr[]) +{ + if (!is_valid_ether_addr(device_ethaddr)) + return -EINVAL; + memcpy(dev_ethaddr, device_ethaddr, ETH_ALEN); + + return 0; +} + +/** rndis_ipa_next_state - return the next state of the driver + * @current_state: the current state of the driver + * @operation: an enum which represent the operation being made on the driver + * by its API. + * + * This function implements the driver internal state machine. + * Its decisions are based on the driver current state and the operation + * being made. + * In case the operation is invalid this state machine will return + * the value RNDIS_IPA_INVALID to inform the caller for a forbidden sequence. + */ +static enum rndis_ipa_state rndis_ipa_next_state( + enum rndis_ipa_state current_state, + enum rndis_ipa_operation operation) +{ + int next_state = RNDIS_IPA_INVALID; + + switch (current_state) { + case RNDIS_IPA_UNLOADED: + if (operation == RNDIS_IPA_INITIALIZE) + next_state = RNDIS_IPA_INITIALIZED; + break; + case RNDIS_IPA_INITIALIZED: + if (operation == RNDIS_IPA_CONNECT) + next_state = RNDIS_IPA_CONNECTED; + else if (operation == RNDIS_IPA_OPEN) + next_state = RNDIS_IPA_UP; + else if (operation == RNDIS_IPA_CLEANUP) + next_state = RNDIS_IPA_UNLOADED; + break; + case RNDIS_IPA_CONNECTED: + if (operation == RNDIS_IPA_DISCONNECT) + next_state = RNDIS_IPA_INITIALIZED; + else if (operation == RNDIS_IPA_OPEN) + next_state = RNDIS_IPA_CONNECTED_AND_UP; + break; + case RNDIS_IPA_UP: + if (operation == RNDIS_IPA_STOP) + next_state = RNDIS_IPA_INITIALIZED; + else if (operation == RNDIS_IPA_CONNECT) + next_state = RNDIS_IPA_CONNECTED_AND_UP; + else if (operation == RNDIS_IPA_CLEANUP) + next_state = RNDIS_IPA_UNLOADED; + break; + case RNDIS_IPA_CONNECTED_AND_UP: + if (operation == RNDIS_IPA_STOP) + next_state = RNDIS_IPA_CONNECTED; + else if (operation == RNDIS_IPA_DISCONNECT) + next_state = RNDIS_IPA_UP; + break; + default: + RNDIS_IPA_ERROR("State is not supported\n"); + break; + } + + RNDIS_IPA_DEBUG + ("state transition ( %s -> %s )- %s\n", + rndis_ipa_state_string(current_state), + rndis_ipa_state_string(next_state), + next_state == RNDIS_IPA_INVALID ? + "Forbidden" : "Allowed"); + + return next_state; +} + +/** + * rndis_ipa_state_string - return the state string representation + * @state: enum which describe the state + */ +static const char *rndis_ipa_state_string(enum rndis_ipa_state state) +{ + switch (state) { + case RNDIS_IPA_UNLOADED: + return "RNDIS_IPA_UNLOADED"; + case RNDIS_IPA_INITIALIZED: + return "RNDIS_IPA_INITIALIZED"; + case RNDIS_IPA_CONNECTED: + return "RNDIS_IPA_CONNECTED"; + case RNDIS_IPA_UP: + return "RNDIS_IPA_UP"; + case RNDIS_IPA_CONNECTED_AND_UP: + return "RNDIS_IPA_CONNECTED_AND_UP"; + default: + return "Not supported"; + } +} + +static void rndis_ipa_dump_skb(struct sk_buff *skb) +{ + int i; + u32 *cur = (u32 *)skb->data; + u8 *byte; + + RNDIS_IPA_DEBUG + ("packet dump start for skb->len=%d\n", + skb->len); + + for (i = 0; i < (skb->len / 4); i++) { + byte = (u8 *)(cur + i); + pr_info + ("%2d %08x %02x %02x %02x %02x\n", + i, *(cur + i), + byte[0], byte[1], byte[2], byte[3]); + } + RNDIS_IPA_DEBUG + ("packet dump ended for skb->len=%d\n", skb->len); +} + +#ifdef CONFIG_DEBUG_FS +/** + * Creates the root folder for the driver + */ +static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + const mode_t flags_read_write = 0666; + const mode_t flags_read_only = 0444; + const mode_t flags_write_only = 0222; + struct dentry *file; + struct dentry *aggr_directory; + + RNDIS_IPA_LOG_ENTRY(); + + if (!rndis_ipa_ctx) + return; + + rndis_ipa_ctx->directory = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL); + if (!rndis_ipa_ctx->directory) { + RNDIS_IPA_ERROR("could not create debugfs directory entry\n"); + goto fail_directory; + } + + file = debugfs_create_bool + ("tx_filter", flags_read_write, + rndis_ipa_ctx->directory, &rndis_ipa_ctx->tx_filter); + if (!file) { + RNDIS_IPA_ERROR("could not create debugfs tx_filter file\n"); + goto fail_file; + } + + file = debugfs_create_bool + ("rx_filter", flags_read_write, + rndis_ipa_ctx->directory, &rndis_ipa_ctx->rx_filter); + if (!file) { + RNDIS_IPA_ERROR("could not create debugfs rx_filter file\n"); + goto fail_file; + } + + file = debugfs_create_bool + ("icmp_filter", flags_read_write, + rndis_ipa_ctx->directory, &rndis_ipa_ctx->icmp_filter); + if (!file) { + RNDIS_IPA_ERROR("could not create debugfs icmp_filter file\n"); + goto fail_file; + } + + file = debugfs_create_bool + ("rm_enable", flags_read_write, + rndis_ipa_ctx->directory, &rndis_ipa_ctx->rm_enable); + if (!file) { + RNDIS_IPA_ERROR("could not create debugfs rm file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("outstanding_high", flags_read_write, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->outstanding_high); + if (!file) { + RNDIS_IPA_ERROR("could not create outstanding_high file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("outstanding_low", flags_read_write, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->outstanding_low); + if (!file) { + RNDIS_IPA_ERROR("could not create outstanding_low file\n"); + goto fail_file; + } + + file = debugfs_create_file + ("outstanding", flags_read_only, + rndis_ipa_ctx->directory, + rndis_ipa_ctx, &rndis_ipa_debugfs_atomic_ops); + if (!file) { + RNDIS_IPA_ERROR("could not create outstanding file\n"); + goto fail_file; + } + + file = debugfs_create_u8 + ("state", flags_read_only, + rndis_ipa_ctx->directory, (u8 *)&rndis_ipa_ctx->state); + if (!file) { + RNDIS_IPA_ERROR("could not create state file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("tx_dropped", flags_read_only, + rndis_ipa_ctx->directory, &rndis_ipa_ctx->tx_dropped); + if (!file) { + RNDIS_IPA_ERROR("could not create tx_dropped file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("rx_dropped", flags_read_only, + rndis_ipa_ctx->directory, &rndis_ipa_ctx->rx_dropped); + if (!file) { + RNDIS_IPA_ERROR("could not create rx_dropped file\n"); + goto fail_file; + } + + aggr_directory = debugfs_create_dir + (DEBUGFS_AGGR_DIR_NAME, + rndis_ipa_ctx->directory); + if (!aggr_directory) { + RNDIS_IPA_ERROR("could not create debugfs aggr entry\n"); + goto fail_directory; + } + + file = debugfs_create_file + ("aggr_value_set", flags_write_only, + aggr_directory, + rndis_ipa_ctx, &rndis_ipa_aggr_ops); + if (!file) { + RNDIS_IPA_ERROR("could not create aggr_value_set file\n"); + goto fail_file; + } + + file = debugfs_create_u8 + ("aggr_enable", flags_read_write, + aggr_directory, (u8 *)&ipa_to_usb_ep_cfg.aggr.aggr_en); + if (!file) { + RNDIS_IPA_ERROR("could not create aggr_enable file\n"); + goto fail_file; + } + + file = debugfs_create_u8 + ("aggr_type", flags_read_write, + aggr_directory, (u8 *)&ipa_to_usb_ep_cfg.aggr.aggr); + if (!file) { + RNDIS_IPA_ERROR("could not create aggr_type file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("aggr_byte_limit", flags_read_write, + aggr_directory, + &ipa_to_usb_ep_cfg.aggr.aggr_byte_limit); + if (!file) { + RNDIS_IPA_ERROR("could not create aggr_byte_limit file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("aggr_time_limit", flags_read_write, + aggr_directory, + &ipa_to_usb_ep_cfg.aggr.aggr_time_limit); + if (!file) { + RNDIS_IPA_ERROR("could not create aggr_time_limit file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("aggr_pkt_limit", flags_read_write, + aggr_directory, + &ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit); + if (!file) { + RNDIS_IPA_ERROR("could not create aggr_pkt_limit file\n"); + goto fail_file; + } + + file = debugfs_create_bool + ("tx_dump_enable", flags_read_write, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->tx_dump_enable); + if (!file) { + RNDIS_IPA_ERROR("fail to create tx_dump_enable file\n"); + goto fail_file; + } + + file = debugfs_create_bool + ("rx_dump_enable", flags_read_write, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->rx_dump_enable); + if (!file) { + RNDIS_IPA_ERROR("fail to create rx_dump_enable file\n"); + goto fail_file; + } + + file = debugfs_create_bool + ("deaggregation_enable", flags_read_write, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->deaggregation_enable); + if (!file) { + RNDIS_IPA_ERROR("fail to create deaggregation_enable file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("error_msec_sleep_time", flags_read_write, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->error_msec_sleep_time); + if (!file) { + RNDIS_IPA_ERROR("fail to create error_msec_sleep_time file\n"); + goto fail_file; + } + + file = debugfs_create_bool + ("during_xmit_error", flags_read_only, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->during_xmit_error); + if (!file) { + RNDIS_IPA_ERROR("fail to create during_xmit_error file\n"); + goto fail_file; + } + + file = debugfs_create_bool("is_vlan_mode", flags_read_only, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->is_vlan_mode); + if (!file) { + RNDIS_IPA_ERROR("fail to create is_vlan_mode file\n"); + goto fail_file; + } + + RNDIS_IPA_DEBUG("debugfs entries were created\n"); + RNDIS_IPA_LOG_EXIT(); + + return; +fail_file: + debugfs_remove_recursive(rndis_ipa_ctx->directory); +fail_directory: + return; +} + +static void rndis_ipa_debugfs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + debugfs_remove_recursive(rndis_ipa_ctx->directory); +} + +#else /* !CONFIG_DEBUG_FS */ + +static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx) {} + +static void rndis_ipa_debugfs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx) {} + +#endif /* CONFIG_DEBUG_FS*/ + +static int rndis_ipa_debugfs_aggr_open + (struct inode *inode, + struct file *file) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = inode->i_private; + + file->private_data = rndis_ipa_ctx; + + return 0; +} + +static ssize_t rndis_ipa_debugfs_aggr_write + (struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = NULL; + int result; + + if (file == NULL) + return -EFAULT; + rndis_ipa_ctx = file->private_data; + + result = ipa_cfg_ep(rndis_ipa_ctx->usb_to_ipa_hdl, &ipa_to_usb_ep_cfg); + if (result) { + pr_err("failed to re-configure USB to IPA point\n"); + return result; + } + pr_info("IPA<-USB end-point re-configured\n"); + + return count; +} + +static int rndis_ipa_debugfs_atomic_open(struct inode *inode, struct file *file) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = inode->i_private; + + RNDIS_IPA_LOG_ENTRY(); + + file->private_data = &rndis_ipa_ctx->outstanding_pkts; + + RNDIS_IPA_LOG_EXIT(); + + return 0; +} + +static ssize_t rndis_ipa_debugfs_atomic_read + (struct file *file, char __user *ubuf, size_t count, loff_t *ppos) +{ + int nbytes; + u8 atomic_str[DEBUGFS_TEMP_BUF_SIZE] = {0}; + atomic_t *atomic_var = file->private_data; + + RNDIS_IPA_LOG_ENTRY(); + + nbytes = scnprintf + (atomic_str, sizeof(atomic_str), "%d\n", + atomic_read(atomic_var)); + + RNDIS_IPA_LOG_EXIT(); + + return simple_read_from_buffer(ubuf, count, ppos, atomic_str, nbytes); +} + +static int rndis_ipa_init_module(void) +{ + ipa_rndis_logbuf = ipc_log_context_create(IPA_RNDIS_IPC_LOG_PAGES, + "ipa_rndis", 0); + if (ipa_rndis_logbuf == NULL) + RNDIS_IPA_DEBUG("failed to create IPC log, continue...\n"); + + pr_info("RNDIS_IPA module is loaded.\n"); + return 0; +} + +static void rndis_ipa_cleanup_module(void) +{ + if (ipa_rndis_logbuf) + ipc_log_context_destroy(ipa_rndis_logbuf); + ipa_rndis_logbuf = NULL; + + pr_info("RNDIS_IPA module is unloaded.\n"); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("RNDIS_IPA network interface"); + +late_initcall(rndis_ipa_init_module); +module_exit(rndis_ipa_cleanup_module); diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa_trace.h b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa_trace.h new file mode 100644 index 000000000000..856890a3c5b4 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa_trace.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rndis_ipa +#define TRACE_INCLUDE_FILE rndis_ipa_trace + +#if !defined(_RNDIS_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _RNDIS_IPA_TRACE_H + +#include + +TRACE_EVENT( + rndis_netif_ni, + + TP_PROTO(unsigned long proto), + + TP_ARGS(proto), + + TP_STRUCT__entry( + __field(unsigned long, proto) + ), + + TP_fast_assign( + __entry->proto = proto; + ), + + TP_printk("proto =%lu\n", __entry->proto) +); + +TRACE_EVENT( + rndis_tx_dp, + + TP_PROTO(unsigned long proto), + + TP_ARGS(proto), + + TP_STRUCT__entry( + __field(unsigned long, proto) + ), + + TP_fast_assign( + __entry->proto = proto; + ), + + TP_printk("proto =%lu\n", __entry->proto) +); + +TRACE_EVENT( + rndis_status_rcvd, + + TP_PROTO(unsigned long proto), + + TP_ARGS(proto), + + TP_STRUCT__entry( + __field(unsigned long, proto) + ), + + TP_fast_assign( + __entry->proto = proto; + ), + + TP_printk("proto =%lu\n", __entry->proto) +); + +#endif /* _RNDIS_IPA_TRACE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h new file mode 100644 index 000000000000..6f24fdabcb97 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_common_i.h @@ -0,0 +1,439 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include + +#ifndef _IPA_COMMON_I_H_ +#define _IPA_COMMON_I_H_ +#include +#include +#include +#include +#include +#include + +#define WARNON_RATELIMIT_BURST 1 +#define IPA_RATELIMIT_BURST 1 + +#define __FILENAME__ \ + (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) + +#define IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client) \ + log_info.file = __FILENAME__; \ + log_info.line = __LINE__; \ + log_info.type = EP; \ + log_info.id_string = (client < 0 || client >= IPA_CLIENT_MAX) \ + ? "Invalid Client" : ipa_clients_strings[client] + +#define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \ + log_info.file = __FILENAME__; \ + log_info.line = __LINE__; \ + log_info.type = SIMPLE; \ + log_info.id_string = __func__ + +#define IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name) \ + log_info.file = __FILENAME__; \ + log_info.line = __LINE__; \ + log_info.type = RESOURCE; \ + log_info.id_string = resource_name + +#define IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str) \ + log_info.file = __FILENAME__; \ + log_info.line = __LINE__; \ + log_info.type = SPECIAL; \ + log_info.id_string = id_str + +#define IPA_ACTIVE_CLIENTS_INC_EP(client) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \ + ipa_inc_client_enable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_DEC_EP(client) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \ + ipa_dec_client_disable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_INC_SIMPLE() \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \ + ipa_inc_client_enable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_DEC_SIMPLE() \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \ + ipa_dec_client_disable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_INC_RESOURCE(resource_name) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \ + ipa_inc_client_enable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_DEC_RESOURCE(resource_name) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \ + ipa_dec_client_disable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_INC_SPECIAL(id_str) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \ + ipa_inc_client_enable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_DEC_SPECIAL(id_str) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \ + ipa_dec_client_disable_clks(&log_info); \ + } while (0) + +/* + * Printing one warning message in 5 seconds if multiple warning messages + * are coming back to back. + */ + +#define WARN_ON_RATELIMIT_IPA(condition) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + WARNON_RATELIMIT_BURST); \ + int rtn = !!(condition); \ + \ + if (unlikely(rtn && __ratelimit(&_rs))) \ + WARN_ON(rtn); \ +}) + +/* + * Printing one error message in 5 seconds if multiple error messages + * are coming back to back. + */ + +#define pr_err_ratelimited_ipa(fmt, args...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + IPA_RATELIMIT_BURST); \ + \ + if (__ratelimit(&_rs)) \ + pr_err(fmt, ## args); \ +}) + +#define ipa_assert_on(condition)\ +do {\ + if (unlikely(condition))\ + ipa_assert();\ +} while (0) + +#define IPA_CLIENT_IS_PROD(x) \ + (x < IPA_CLIENT_MAX && (x & 0x1) == 0) +#define IPA_CLIENT_IS_CONS(x) \ + (x < IPA_CLIENT_MAX && (x & 0x1) == 1) + +#define IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC (1000) +#define IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC (2000) + +enum ipa_active_client_log_type { + EP, + SIMPLE, + RESOURCE, + SPECIAL, + INVALID +}; + +struct ipa_active_client_logging_info { + const char *id_string; + char *file; + int line; + enum ipa_active_client_log_type type; +}; + +/** + * struct ipa_mem_buffer - IPA memory buffer + * @base: base + * @phys_base: physical base address + * @size: size of memory buffer + */ +struct ipa_mem_buffer { + void *base; + dma_addr_t phys_base; + u32 size; +}; + +/** + * enum ipa3_mhi_burst_mode - MHI channel burst mode state + * + * Values are according to MHI specification + * @IPA_MHI_BURST_MODE_DEFAULT: burst mode enabled for HW channels, + * disabled for SW channels + * @IPA_MHI_BURST_MODE_RESERVED: + * @IPA_MHI_BURST_MODE_DISABLE: Burst mode is disabled for this channel + * @IPA_MHI_BURST_MODE_ENABLE: Burst mode is enabled for this channel + * + */ +enum ipa3_mhi_burst_mode { + IPA_MHI_BURST_MODE_DEFAULT, + IPA_MHI_BURST_MODE_RESERVED, + IPA_MHI_BURST_MODE_DISABLE, + IPA_MHI_BURST_MODE_ENABLE, +}; + +/** + * enum ipa_hw_mhi_channel_states - MHI channel state machine + * + * Values are according to MHI specification + * @IPA_HW_MHI_CHANNEL_STATE_DISABLE: Channel is disabled and not processed by + * the host or device. + * @IPA_HW_MHI_CHANNEL_STATE_ENABLE: A channel is enabled after being + * initialized and configured by host, including its channel context and + * associated transfer ring. While this state, the channel is not active + * and the device does not process transfer. + * @IPA_HW_MHI_CHANNEL_STATE_RUN: The device processes transfers and doorbell + * for channels. + * @IPA_HW_MHI_CHANNEL_STATE_SUSPEND: Used to halt operations on the channel. + * The device does not process transfers for the channel in this state. + * This state is typically used to synchronize the transition to low power + * modes. + * @IPA_HW_MHI_CHANNEL_STATE_STOP: Used to halt operations on the channel. + * The device does not process transfers for the channel in this state. + * @IPA_HW_MHI_CHANNEL_STATE_ERROR: The device detected an error in an element + * from the transfer ring associated with the channel. + * @IPA_HW_MHI_CHANNEL_STATE_INVALID: Invalid state. Shall not be in use in + * operational scenario. + */ +enum ipa_hw_mhi_channel_states { + IPA_HW_MHI_CHANNEL_STATE_DISABLE = 0, + IPA_HW_MHI_CHANNEL_STATE_ENABLE = 1, + IPA_HW_MHI_CHANNEL_STATE_RUN = 2, + IPA_HW_MHI_CHANNEL_STATE_SUSPEND = 3, + IPA_HW_MHI_CHANNEL_STATE_STOP = 4, + IPA_HW_MHI_CHANNEL_STATE_ERROR = 5, + IPA_HW_MHI_CHANNEL_STATE_INVALID = 0xFF +}; + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO + * command. Parameters are sent as 32b immediate parameters. + * @isDlUlSyncEnabled: Flag to indicate if DL UL Syncronization is enabled + * @UlAccmVal: UL Timer Accumulation value (Period after which device will poll + * for UL data) + * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events + * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events + */ +union IpaHwMhiDlUlSyncCmdData_t { + struct IpaHwMhiDlUlSyncCmdParams_t { + u32 isDlUlSyncEnabled:8; + u32 UlAccmVal:8; + u32 ulMsiEventThreshold:8; + u32 dlMsiEventThreshold:8; + } params; + u32 raw32b; +}; + +struct ipa_mhi_ch_ctx { + u8 chstate;/*0-7*/ + u8 brstmode:2;/*8-9*/ + u8 pollcfg:6;/*10-15*/ + u16 rsvd;/*16-31*/ + u32 chtype; + u32 erindex; + u64 rbase; + u64 rlen; + u64 rp; + u64 wp; +} __packed; + +struct ipa_mhi_ev_ctx { + u32 intmodc:16; + u32 intmodt:16; + u32 ertype; + u32 msivec; + u64 rbase; + u64 rlen; + u64 rp; + u64 wp; +} __packed; + +struct ipa_mhi_init_uc_engine { + struct ipa_mhi_msi_info *msi; + u32 mmio_addr; + u32 host_ctrl_addr; + u32 host_data_addr; + u32 first_ch_idx; + u32 first_er_idx; + union IpaHwMhiDlUlSyncCmdData_t *ipa_cached_dl_ul_sync_info; +}; + +struct ipa_mhi_init_gsi_engine { + u32 first_ch_idx; +}; + +struct ipa_mhi_init_engine { + struct ipa_mhi_init_uc_engine uC; + struct ipa_mhi_init_gsi_engine gsi; +}; + +struct start_gsi_channel { + enum ipa_hw_mhi_channel_states state; + struct ipa_mhi_msi_info *msi; + struct ipa_mhi_ev_ctx *ev_ctx_host; + u64 event_context_addr; + struct ipa_mhi_ch_ctx *ch_ctx_host; + u64 channel_context_addr; + void (*ch_err_cb)(struct gsi_chan_err_notify *notify); + void (*ev_err_cb)(struct gsi_evt_err_notify *notify); + void *channel; + bool assert_bit40; + struct gsi_mhi_channel_scratch *mhi; + unsigned long *cached_gsi_evt_ring_hdl; + uint8_t evchid; +}; + +struct start_uc_channel { + enum ipa_hw_mhi_channel_states state; + u8 index; + u8 id; +}; + +struct start_mhi_channel { + struct start_uc_channel uC; + struct start_gsi_channel gsi; +}; + +struct ipa_mhi_connect_params_internal { + struct ipa_sys_connect_params *sys; + u8 channel_id; + struct start_mhi_channel start; +}; + +/** + * struct ipa_hdr_offset_entry - IPA header offset entry + * @link: entry's link in global header offset entries list + * @offset: the offset + * @bin: bin + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa_hdr_offset_entry { + struct list_head link; + u32 offset; + u32 bin; + bool ipacm_installed; +}; + +extern const char *ipa_clients_strings[]; + +#define IPA_IPC_LOGGING(buf, fmt, args...) \ + do { \ + if (buf) \ + ipc_log_string((buf), fmt, __func__, __LINE__, \ + ## args); \ + } while (0) + +void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id); +void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id); +int ipa_inc_client_enable_clks_no_block( + struct ipa_active_client_logging_info *id); +int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource); +int ipa_resume_resource(enum ipa_rm_resource_name name); +int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource); +int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage, + u32 bandwidth_mbps); +void *ipa_get_ipc_logbuf(void); +void *ipa_get_ipc_logbuf_low(void); +void ipa_assert(void); + +/* MHI */ +int ipa_mhi_init_engine(struct ipa_mhi_init_engine *params); +int ipa_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in, + u32 *clnt_hdl); +int ipa_disconnect_mhi_pipe(u32 clnt_hdl); +bool ipa_mhi_stop_gsi_channel(enum ipa_client_type client); +int ipa_qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req); +int ipa_qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req); +int ipa_generate_tag_process(void); +int ipa_disable_sps_pipe(enum ipa_client_type client); +int ipa_mhi_reset_channel_internal(enum ipa_client_type client); +int ipa_mhi_start_channel_internal(enum ipa_client_type client); +bool ipa_mhi_sps_channel_empty(enum ipa_client_type client); +int ipa_mhi_resume_channels_internal(enum ipa_client_type client, + bool LPTransitionRejected, bool brstmode_enabled, + union __packed gsi_channel_scratch ch_scratch, u8 index); +int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req); +int ipa_mhi_query_ch_info(enum ipa_client_type client, + struct gsi_chan_info *ch_info); +int ipa_mhi_destroy_channel(enum ipa_client_type client); +int ipa_mhi_is_using_dma(bool *flag); +const char *ipa_mhi_get_state_str(int state); + +/* MHI uC */ +int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd); +int ipa_uc_mhi_init + (void (*ready_cb)(void), void (*wakeup_request_cb)(void)); +void ipa_uc_mhi_cleanup(void); +int ipa_uc_mhi_reset_channel(int channelHandle); +int ipa_uc_mhi_suspend_channel(int channelHandle); +int ipa_uc_mhi_stop_event_update_channel(int channelHandle); +int ipa_uc_mhi_print_stats(char *dbg_buff, int size); + +/* uC */ +int ipa_uc_state_check(void); + +/* general */ +void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb); +void ipa_set_tag_process_before_gating(bool val); +bool ipa_has_open_aggr_frame(enum ipa_client_type client); +int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp); + +int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl, + struct ipa_ntn_conn_in_params *params); +u8 *ipa_write_64(u64 w, u8 *dest); +u8 *ipa_write_32(u32 w, u8 *dest); +u8 *ipa_write_16(u16 hw, u8 *dest); +u8 *ipa_write_8(u8 b, u8 *dest); +u8 *ipa_pad_to_64(u8 *dest); +u8 *ipa_pad_to_32(u8 *dest); +int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data), + void *user_data); +void ipa_ntn_uc_dereg_rdyCB(void); + +int ipa_conn_wdi_pipes(struct ipa_wdi_conn_in_params *in, + struct ipa_wdi_conn_out_params *out, + ipa_wdi_meter_notifier_cb wdi_notify); + +int ipa_disconn_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx); + +int ipa_enable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx); + +int ipa_disable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx); + +const char *ipa_get_version_string(enum ipa_hw_type ver); +int ipa_start_gsi_channel(u32 clnt_hdl); + +bool ipa_pm_is_used(void); + +int ipa_smmu_store_sgt(struct sg_table **out_ch_ptr, + struct sg_table *in_sgt_ptr); +int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr); + +int ipa_ut_module_init(void); +void ipa_ut_module_exit(void); + +#endif /* _IPA_COMMON_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_rm.c b/drivers/platform/msm/ipa/ipa_rm.c new file mode 100644 index 000000000000..310363fa5492 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm.c @@ -0,0 +1,1184 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include "ipa_rm_dependency_graph.h" +#include "ipa_rm_i.h" +#include "ipa_common_i.h" + +static const char *resource_name_to_str[IPA_RM_RESOURCE_MAX] = { + __stringify(IPA_RM_RESOURCE_Q6_PROD), + __stringify(IPA_RM_RESOURCE_Q6_CONS), + __stringify(IPA_RM_RESOURCE_USB_PROD), + __stringify(IPA_RM_RESOURCE_USB_CONS), + __stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD), + __stringify(IPA_RM_RESOURCE_USB_DPL_CONS), + __stringify(IPA_RM_RESOURCE_HSIC_PROD), + __stringify(IPA_RM_RESOURCE_HSIC_CONS), + __stringify(IPA_RM_RESOURCE_STD_ECM_PROD), + __stringify(IPA_RM_RESOURCE_APPS_CONS), + __stringify(IPA_RM_RESOURCE_RNDIS_PROD), + __stringify(RESERVED_CONS_11), + __stringify(IPA_RM_RESOURCE_WWAN_0_PROD), + __stringify(RESERVED_CONS_13), + __stringify(IPA_RM_RESOURCE_WLAN_PROD), + __stringify(IPA_RM_RESOURCE_WLAN_CONS), + __stringify(IPA_RM_RESOURCE_ODU_ADAPT_PROD), + __stringify(IPA_RM_RESOURCE_ODU_ADAPT_CONS), + __stringify(IPA_RM_RESOURCE_MHI_PROD), + __stringify(IPA_RM_RESOURCE_MHI_CONS), + __stringify(IPA_RM_RESOURCE_ETHERNET_PROD), + __stringify(IPA_RM_RESOURCE_ETHERNET_CONS), +}; + +struct ipa_rm_profile_vote_type { + enum ipa_voltage_level volt[IPA_RM_RESOURCE_MAX]; + enum ipa_voltage_level curr_volt; + u32 bw_resources[IPA_RM_RESOURCE_MAX]; + u32 curr_bw; +}; + +struct ipa_rm_context_type { + struct ipa_rm_dep_graph *dep_graph; + struct workqueue_struct *ipa_rm_wq; + spinlock_t ipa_rm_lock; + struct ipa_rm_profile_vote_type prof_vote; +}; +static struct ipa_rm_context_type *ipa_rm_ctx; + +struct ipa_rm_notify_ipa_work_type { + struct work_struct work; + enum ipa_voltage_level volt; + u32 bandwidth_mbps; +}; + +/** + * ipa_rm_create_resource() - create resource + * @create_params: [in] parameters needed + * for resource initialization + * + * Returns: 0 on success, negative on failure + * + * This function is called by IPA RM client to initialize client's resources. + * This API should be called before any other IPA RM API on a given resource + * name. + * + */ +int ipa_rm_create_resource(struct ipa_rm_create_params *create_params) +{ + struct ipa_rm_resource *resource; + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + if (!create_params) { + IPA_RM_ERR("invalid args\n"); + return -EINVAL; + } + IPA_RM_DBG("%s\n", ipa_rm_resource_str(create_params->name)); + + if (create_params->floor_voltage < 0 || + create_params->floor_voltage >= IPA_VOLTAGE_MAX) { + IPA_RM_ERR("invalid voltage %d\n", + create_params->floor_voltage); + return -EINVAL; + } + + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + create_params->name, + &resource) == 0) { + IPA_RM_ERR("resource already exists\n"); + result = -EEXIST; + goto bail; + } + result = ipa_rm_resource_create(create_params, + &resource); + if (result) { + IPA_RM_ERR("ipa_rm_resource_create() failed\n"); + goto bail; + } + result = ipa_rm_dep_graph_add(ipa_rm_ctx->dep_graph, resource); + if (result) { + IPA_RM_ERR("ipa_rm_dep_graph_add() failed\n"); + ipa_rm_resource_delete(resource); + goto bail; + } +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_create_resource); + +/** + * ipa_rm_delete_resource() - delete resource + * @resource_name: name of resource to be deleted + * + * Returns: 0 on success, negative on failure + * + * This function is called by IPA RM client to delete client's resources. + * + */ +int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name) +{ + struct ipa_rm_resource *resource; + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exist\n"); + result = -EINVAL; + goto bail; + } + result = ipa_rm_resource_delete(resource); + if (result) { + IPA_RM_ERR("ipa_rm_resource_delete() failed\n"); + goto bail; + } + result = ipa_rm_dep_graph_remove(ipa_rm_ctx->dep_graph, + resource_name); + if (result) { + IPA_RM_ERR("ipa_rm_dep_graph_remove() failed\n"); + goto bail; + } +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_delete_resource); + +static int _ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name, + bool userspace_dep) +{ + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + result = ipa_rm_dep_graph_add_dependency( + ipa_rm_ctx->dep_graph, + resource_name, + depends_on_name, + userspace_dep); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_add_dependency() - create dependency between 2 resources + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * Returns: 0 on success, negative on failure + * + * Side effects: IPA_RM_RESORCE_GRANTED could be generated + * in case client registered with IPA RM + */ +int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return _ipa_rm_add_dependency(resource_name, depends_on_name, false); +} +EXPORT_SYMBOL(ipa_rm_add_dependency); + +/** + * ipa_rm_add_dependency_from_ioctl() - create dependency between 2 resources + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * This function is expected to be called from IOCTL and the dependency will be + * marked as is was added by the userspace. + * + * Returns: 0 on success, negative on failure + * + * Side effects: IPA_RM_RESORCE_GRANTED could be generated + * in case client registered with IPA RM + */ +int ipa_rm_add_dependency_from_ioctl(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return _ipa_rm_add_dependency(resource_name, depends_on_name, true); +} + +static int _ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name, + bool userspsace_dep) +{ + int result; + struct ipa_rm_resource *consumer; + unsigned long time; + unsigned long flags; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + result = ipa_rm_dep_graph_add_dependency( + ipa_rm_ctx->dep_graph, + resource_name, + depends_on_name, + userspsace_dep); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + if (result == -EINPROGRESS) { + ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + depends_on_name, + &consumer); + IPA_RM_DBG("%s waits for GRANT of %s.\n", + ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name)); + time = wait_for_completion_timeout( + &((struct ipa_rm_resource_cons *)consumer)-> + request_consumer_in_progress, + HZ * 5); + result = 0; + if (!time) { + IPA_RM_ERR("TIMEOUT waiting for %s GRANT event.", + ipa_rm_resource_str(depends_on_name)); + result = -ETIME; + } else { + IPA_RM_DBG("%s waited for %s GRANT %lu time.\n", + ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name), + time); + } + } + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +/** + * ipa_rm_add_dependency_sync() - Create a dependency between 2 resources + * in a synchronized fashion. In case a producer resource is in GRANTED state + * and the newly added consumer resource is in RELEASED state, the consumer + * entity will be requested and the function will block until the consumer + * is granted. + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * This function is expected to be called from IOCTL and the dependency will be + * marked as is was added by the userspace. + * + * Returns: 0 on success, negative on failure + * + * Side effects: May block. See documentation above. + */ +int ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return _ipa_rm_add_dependency_sync(resource_name, depends_on_name, + false); +} +EXPORT_SYMBOL(ipa_rm_add_dependency_sync); + +/** + * ipa_rm_add_dependency_sync_from_ioctl() - Create a dependency between 2 + * resources in a synchronized fashion. In case a producer resource is in + * GRANTED state and the newly added consumer resource is in RELEASED state, + * the consumer entity will be requested and the function will block until + * the consumer is granted. + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * Returns: 0 on success, negative on failure + * + * Side effects: May block. See documentation above. + */ +int ipa_rm_add_dependency_sync_from_ioctl( + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return _ipa_rm_add_dependency_sync(resource_name, depends_on_name, + true); +} + +static int _ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name, + bool userspace_dep) +{ + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + result = ipa_rm_dep_graph_delete_dependency( + ipa_rm_ctx->dep_graph, + resource_name, + depends_on_name, + userspace_dep); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_delete_dependency() - delete dependency between 2 resources + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * Returns: 0 on success, negative on failure + * + * Side effects: IPA_RM_RESORCE_GRANTED could be generated + * in case client registered with IPA RM + */ +int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return _ipa_rm_delete_dependency(resource_name, depends_on_name, false); +} +EXPORT_SYMBOL(ipa_rm_delete_dependency); + +/** + * ipa_rm_delete_dependency_fron_ioctl() - delete dependency between 2 resources + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * This function is expected to be called from IOCTL and the dependency will be + * marked as is was added by the userspace. + * + * Returns: 0 on success, negative on failure + * + * Side effects: IPA_RM_RESORCE_GRANTED could be generated + * in case client registered with IPA RM + */ +int ipa_rm_delete_dependency_from_ioctl(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return _ipa_rm_delete_dependency(resource_name, depends_on_name, true); +} + +/** + * ipa_rm_request_resource() - request resource + * @resource_name: [in] name of the requested resource + * + * Returns: 0 on success, negative on failure + * + * All registered callbacks are called with IPA_RM_RESOURCE_GRANTED + * on successful completion of this operation. + */ +int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name) +{ + struct ipa_rm_resource *resource; + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { + IPA_RM_ERR("can be called on PROD only\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exist\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_producer_request( + (struct ipa_rm_resource_prod *)resource); + +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + return result; +} +EXPORT_SYMBOL(ipa_rm_request_resource); + +void delayed_release_work_func(struct work_struct *work) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_delayed_release_work_type *rwork = container_of( + to_delayed_work(work), + struct ipa_rm_delayed_release_work_type, + work); + + if (!IPA_RM_RESORCE_IS_CONS(rwork->resource_name)) { + IPA_RM_ERR("can be called on CONS only\n"); + kfree(rwork); + return; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + rwork->resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + goto bail; + } + + ipa_rm_resource_consumer_release( + (struct ipa_rm_resource_cons *)resource, rwork->needed_bw, + rwork->dec_usage_count); + +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + kfree(rwork); + +} + +/** + * ipa_rm_request_resource_with_timer() - requests the specified consumer + * resource and releases it after 1 second + * @resource_name: name of the requested resource + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_delayed_release_work_type *release_work; + int result; + + if (!IPA_RM_RESORCE_IS_CONS(resource_name)) { + IPA_RM_ERR("can be called on CONS only\n"); + return -EINVAL; + } + + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_consumer_request( + (struct ipa_rm_resource_cons *)resource, 0, false, true); + if (result != 0 && result != -EINPROGRESS) { + IPA_RM_ERR("consumer request returned error %d\n", result); + result = -EPERM; + goto bail; + } + + release_work = kzalloc(sizeof(*release_work), GFP_ATOMIC); + if (!release_work) { + result = -ENOMEM; + goto bail; + } + release_work->resource_name = resource->name; + release_work->needed_bw = 0; + release_work->dec_usage_count = false; + INIT_DELAYED_WORK(&release_work->work, delayed_release_work_func); + schedule_delayed_work(&release_work->work, + msecs_to_jiffies(IPA_RM_RELEASE_DELAY_IN_MSEC)); + result = 0; +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + return result; +} + +/** + * ipa_rm_release_resource() - release resource + * @resource_name: [in] name of the requested resource + * + * Returns: 0 on success, negative on failure + * + * All registered callbacks are called with IPA_RM_RESOURCE_RELEASED + * on successful completion of this operation. + */ +int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { + IPA_RM_ERR("can be called on PROD only\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_producer_release( + (struct ipa_rm_resource_prod *)resource); + +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + return result; +} +EXPORT_SYMBOL(ipa_rm_release_resource); + +/** + * ipa_rm_register() - register for event + * @resource_name: resource name + * @reg_params: [in] registration parameters + * + * Returns: 0 on success, negative on failure + * + * Registration parameters provided here should be the same + * as provided later in ipa_rm_deregister() call. + */ +int ipa_rm_register(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params) +{ + int result; + unsigned long flags; + struct ipa_rm_resource *resource; + + IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); + + if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { + IPA_RM_ERR("can be called on PROD only\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_producer_register( + (struct ipa_rm_resource_prod *)resource, + reg_params, + true); +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_register); + +/** + * ipa_rm_deregister() - cancel the registration + * @resource_name: resource name + * @reg_params: [in] registration parameters + * + * Returns: 0 on success, negative on failure + * + * Registration parameters provided here should be the same + * as provided in ipa_rm_register() call. + */ +int ipa_rm_deregister(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params) +{ + int result; + unsigned long flags; + struct ipa_rm_resource *resource; + + IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); + + if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { + IPA_RM_ERR("can be called on PROD only\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_producer_deregister( + (struct ipa_rm_resource_prod *)resource, + reg_params); +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_deregister); + +/** + * ipa_rm_set_perf_profile() - set performance profile + * @resource_name: resource name + * @profile: [in] profile information. + * + * Returns: 0 on success, negative on failure + * + * Set resource performance profile. + * Updates IPA driver if performance level changed. + */ +int ipa_rm_set_perf_profile(enum ipa_rm_resource_name resource_name, + struct ipa_rm_perf_profile *profile) +{ + int result; + unsigned long flags; + struct ipa_rm_resource *resource; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); + if (profile) + IPA_RM_DBG("BW: %d\n", profile->max_supported_bandwidth_mbps); + + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_set_perf_profile(resource, profile); + if (result) { + IPA_RM_ERR("ipa_rm_resource_set_perf_profile failed %d\n", + result); + goto bail; + } + + result = 0; +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_set_perf_profile); + +/** + * ipa_rm_notify_completion() - + * consumer driver notification for + * request_resource / release_resource operations + * completion + * @event: notified event + * @resource_name: resource name + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_notify_completion(enum ipa_rm_event event, + enum ipa_rm_resource_name resource_name) +{ + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("event %d on %s\n", event, + ipa_rm_resource_str(resource_name)); + if (!IPA_RM_RESORCE_IS_CONS(resource_name)) { + IPA_RM_ERR("can be called on CONS only\n"); + result = -EINVAL; + goto bail; + } + ipa_rm_wq_send_cmd(IPA_RM_WQ_RESOURCE_CB, + resource_name, + event, + false); + result = 0; +bail: + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_notify_completion); + +static void ipa_rm_wq_handler(struct work_struct *work) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_wq_work_type *ipa_rm_work = + container_of(work, + struct ipa_rm_wq_work_type, + work); + IPA_RM_DBG_LOW("%s cmd=%d event=%d notify_registered_only=%d\n", + ipa_rm_resource_str(ipa_rm_work->resource_name), + ipa_rm_work->wq_cmd, + ipa_rm_work->event, + ipa_rm_work->notify_registered_only); + switch (ipa_rm_work->wq_cmd) { + case IPA_RM_WQ_NOTIFY_PROD: + if (!IPA_RM_RESORCE_IS_PROD(ipa_rm_work->resource_name)) { + IPA_RM_ERR("resource is not PROD\n"); + goto free_work; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + ipa_rm_work->resource_name, + &resource) != 0){ + IPA_RM_ERR("resource does not exists\n"); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + goto free_work; + } + ipa_rm_resource_producer_notify_clients( + (struct ipa_rm_resource_prod *)resource, + ipa_rm_work->event, + ipa_rm_work->notify_registered_only); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + break; + case IPA_RM_WQ_NOTIFY_CONS: + break; + case IPA_RM_WQ_RESOURCE_CB: + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + ipa_rm_work->resource_name, + &resource) != 0){ + IPA_RM_ERR("resource does not exists\n"); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + goto free_work; + } + ipa_rm_resource_consumer_handle_cb( + (struct ipa_rm_resource_cons *)resource, + ipa_rm_work->event); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + break; + default: + break; + } + +free_work: + kfree((void *) work); +} + +static void ipa_rm_wq_resume_handler(struct work_struct *work) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_wq_suspend_resume_work_type *ipa_rm_work = + container_of(work, + struct ipa_rm_wq_suspend_resume_work_type, + work); + IPA_RM_DBG_LOW("resume work handler: %s", + ipa_rm_resource_str(ipa_rm_work->resource_name)); + + if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) { + IPA_RM_ERR("resource is not CONS\n"); + return; + } + IPA_ACTIVE_CLIENTS_INC_RESOURCE(ipa_rm_resource_str( + ipa_rm_work->resource_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + ipa_rm_work->resource_name, + &resource) != 0){ + IPA_RM_ERR("resource does not exists\n"); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str( + ipa_rm_work->resource_name)); + goto bail; + } + ipa_rm_resource_consumer_request_work( + (struct ipa_rm_resource_cons *)resource, + ipa_rm_work->prev_state, ipa_rm_work->needed_bw, true, + ipa_rm_work->inc_usage_count); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); +bail: + kfree(ipa_rm_work); +} + + +static void ipa_rm_wq_suspend_handler(struct work_struct *work) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_wq_suspend_resume_work_type *ipa_rm_work = + container_of(work, + struct ipa_rm_wq_suspend_resume_work_type, + work); + IPA_RM_DBG_LOW("suspend work handler: %s", + ipa_rm_resource_str(ipa_rm_work->resource_name)); + + if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) { + IPA_RM_ERR("resource is not CONS\n"); + return; + } + ipa_suspend_resource_sync(ipa_rm_work->resource_name); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + ipa_rm_work->resource_name, + &resource) != 0){ + IPA_RM_ERR("resource does not exists\n"); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + return; + } + ipa_rm_resource_consumer_release_work( + (struct ipa_rm_resource_cons *)resource, + ipa_rm_work->prev_state, + true); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + kfree(ipa_rm_work); +} + +/** + * ipa_rm_wq_send_cmd() - send a command for deferred work + * @wq_cmd: command that should be executed + * @resource_name: resource on which command should be executed + * @notify_registered_only: notify only clients registered by + * ipa_rm_register() + * + * Returns: 0 on success, negative otherwise + */ +int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_event event, + bool notify_registered_only) +{ + int result = -ENOMEM; + struct ipa_rm_wq_work_type *work = kzalloc(sizeof(*work), GFP_ATOMIC); + + if (work) { + INIT_WORK((struct work_struct *)work, ipa_rm_wq_handler); + work->wq_cmd = wq_cmd; + work->resource_name = resource_name; + work->event = event; + work->notify_registered_only = notify_registered_only; + result = queue_work(ipa_rm_ctx->ipa_rm_wq, + (struct work_struct *)work); + } + + return result; +} + +int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_state prev_state, + u32 needed_bw) +{ + int result = -ENOMEM; + struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work), + GFP_ATOMIC); + if (work) { + INIT_WORK((struct work_struct *)work, + ipa_rm_wq_suspend_handler); + work->resource_name = resource_name; + work->prev_state = prev_state; + work->needed_bw = needed_bw; + result = queue_work(ipa_rm_ctx->ipa_rm_wq, + (struct work_struct *)work); + } + + return result; +} + +int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_state prev_state, + u32 needed_bw, + bool inc_usage_count) +{ + int result = -ENOMEM; + struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work), + GFP_ATOMIC); + if (work) { + INIT_WORK((struct work_struct *)work, ipa_rm_wq_resume_handler); + work->resource_name = resource_name; + work->prev_state = prev_state; + work->needed_bw = needed_bw; + work->inc_usage_count = inc_usage_count; + result = queue_work(ipa_rm_ctx->ipa_rm_wq, + (struct work_struct *)work); + } else { + IPA_RM_ERR("no mem\n"); + } + + return result; +} +/** + * ipa_rm_initialize() - initialize IPA RM component + * + * Returns: 0 on success, negative otherwise + */ +int ipa_rm_initialize(void) +{ + int result; + + ipa_rm_ctx = kzalloc(sizeof(*ipa_rm_ctx), GFP_KERNEL); + if (!ipa_rm_ctx) { + IPA_RM_ERR("no mem\n"); + result = -ENOMEM; + goto bail; + } + ipa_rm_ctx->ipa_rm_wq = create_singlethread_workqueue("ipa_rm_wq"); + if (!ipa_rm_ctx->ipa_rm_wq) { + IPA_RM_ERR("create workqueue failed\n"); + result = -ENOMEM; + goto create_wq_fail; + } + result = ipa_rm_dep_graph_create(&(ipa_rm_ctx->dep_graph)); + if (result) { + IPA_RM_ERR("create dependency graph failed\n"); + goto graph_alloc_fail; + } + spin_lock_init(&ipa_rm_ctx->ipa_rm_lock); + IPA_RM_DBG("SUCCESS\n"); + + return 0; +graph_alloc_fail: + destroy_workqueue(ipa_rm_ctx->ipa_rm_wq); +create_wq_fail: + kfree(ipa_rm_ctx); +bail: + return result; +} + +/** + * ipa_rm_stat() - print RM stat + * @buf: [in] The user buff used to print + * @size: [in] The size of buf + * Returns: number of bytes used on success, negative on failure + * + * This function is called by ipa_debugfs in order to receive + * a full picture of the current state of the RM + */ + +int ipa_rm_stat(char *buf, int size) +{ + unsigned long flags; + int i, cnt = 0, result = EINVAL; + struct ipa_rm_resource *resource = NULL; + u32 sum_bw_prod = 0; + u32 sum_bw_cons = 0; + + if (!buf || size < 0) + return result; + + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + for (i = 0; i < IPA_RM_RESOURCE_MAX; ++i) { + if (!IPA_RM_RESORCE_IS_PROD(i)) + continue; + result = ipa_rm_dep_graph_get_resource( + ipa_rm_ctx->dep_graph, + i, + &resource); + if (!result) { + result = ipa_rm_resource_producer_print_stat( + resource, buf + cnt, + size-cnt); + if (result < 0) + goto bail; + cnt += result; + } + } + + for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) { + if (IPA_RM_RESORCE_IS_PROD(i)) + sum_bw_prod += ipa_rm_ctx->prof_vote.bw_resources[i]; + else + sum_bw_cons += ipa_rm_ctx->prof_vote.bw_resources[i]; + } + + result = scnprintf(buf + cnt, size - cnt, + "All prod bandwidth: %d, All cons bandwidth: %d\n", + sum_bw_prod, sum_bw_cons); + cnt += result; + + result = scnprintf(buf + cnt, size - cnt, + "Voting: voltage %d, bandwidth %d\n", + ipa_rm_ctx->prof_vote.curr_volt, + ipa_rm_ctx->prof_vote.curr_bw); + cnt += result; + + result = cnt; +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + return result; +} + +/** + * ipa_rm_resource_str() - returns string that represent the resource + * @resource_name: [in] resource name + */ +const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name) +{ + if (resource_name < 0 || resource_name >= IPA_RM_RESOURCE_MAX) + return "INVALID RESOURCE"; + + return resource_name_to_str[resource_name]; +}; + +static void ipa_rm_perf_profile_notify_to_ipa_work(struct work_struct *work) +{ + struct ipa_rm_notify_ipa_work_type *notify_work = container_of(work, + struct ipa_rm_notify_ipa_work_type, + work); + int res; + + IPA_RM_DBG_LOW("calling to IPA driver. voltage %d bandwidth %d\n", + notify_work->volt, notify_work->bandwidth_mbps); + + res = ipa_set_required_perf_profile(notify_work->volt, + notify_work->bandwidth_mbps); + if (res) { + IPA_RM_ERR("ipa_set_required_perf_profile failed %d\n", res); + goto bail; + } + + IPA_RM_DBG_LOW("IPA driver notified\n"); +bail: + kfree(notify_work); +} + +static void ipa_rm_perf_profile_notify_to_ipa(enum ipa_voltage_level volt, + u32 bandwidth) +{ + struct ipa_rm_notify_ipa_work_type *work; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; + + INIT_WORK(&work->work, ipa_rm_perf_profile_notify_to_ipa_work); + work->volt = volt; + work->bandwidth_mbps = bandwidth; + queue_work(ipa_rm_ctx->ipa_rm_wq, &work->work); +} + +/** + * ipa_rm_perf_profile_change() - change performance profile vote for resource + * @resource_name: [in] resource name + * + * change bandwidth and voltage vote based on resource state. + */ +void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name) +{ + enum ipa_voltage_level old_volt; + u32 *bw_ptr; + u32 old_bw; + struct ipa_rm_resource *resource; + int i; + u32 sum_bw_prod = 0; + u32 sum_bw_cons = 0; + + IPA_RM_DBG_LOW("%s\n", ipa_rm_resource_str(resource_name)); + + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + WARN_ON(1); + return; + } + + old_volt = ipa_rm_ctx->prof_vote.curr_volt; + old_bw = ipa_rm_ctx->prof_vote.curr_bw; + + bw_ptr = &ipa_rm_ctx->prof_vote.bw_resources[resource_name]; + + switch (resource->state) { + case IPA_RM_GRANTED: + case IPA_RM_REQUEST_IN_PROGRESS: + IPA_RM_DBG_LOW("max_bw = %d, needed_bw = %d\n", + resource->max_bw, resource->needed_bw); + *bw_ptr = min(resource->max_bw, resource->needed_bw); + ipa_rm_ctx->prof_vote.volt[resource_name] = + resource->floor_voltage; + break; + + case IPA_RM_RELEASE_IN_PROGRESS: + case IPA_RM_RELEASED: + *bw_ptr = 0; + ipa_rm_ctx->prof_vote.volt[resource_name] = 0; + break; + + default: + IPA_RM_ERR("unknown state %d\n", resource->state); + WARN_ON(1); + return; + } + IPA_RM_DBG_LOW("resource bandwidth: %d voltage: %d\n", *bw_ptr, + resource->floor_voltage); + + ipa_rm_ctx->prof_vote.curr_volt = IPA_VOLTAGE_UNSPECIFIED; + for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) { + if (ipa_rm_ctx->prof_vote.volt[i] > + ipa_rm_ctx->prof_vote.curr_volt) { + ipa_rm_ctx->prof_vote.curr_volt = + ipa_rm_ctx->prof_vote.volt[i]; + } + } + + for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) { + if (IPA_RM_RESORCE_IS_PROD(i)) + sum_bw_prod += ipa_rm_ctx->prof_vote.bw_resources[i]; + else + sum_bw_cons += ipa_rm_ctx->prof_vote.bw_resources[i]; + } + + IPA_RM_DBG_LOW("all prod bandwidth: %d all cons bandwidth: %d\n", + sum_bw_prod, sum_bw_cons); + ipa_rm_ctx->prof_vote.curr_bw = min(sum_bw_prod, sum_bw_cons); + + if (ipa_rm_ctx->prof_vote.curr_volt == old_volt && + ipa_rm_ctx->prof_vote.curr_bw == old_bw) { + IPA_RM_DBG_LOW("same voting\n"); + return; + } + + IPA_RM_DBG_LOW("new voting: voltage %d bandwidth %d\n", + ipa_rm_ctx->prof_vote.curr_volt, + ipa_rm_ctx->prof_vote.curr_bw); + + ipa_rm_perf_profile_notify_to_ipa(ipa_rm_ctx->prof_vote.curr_volt, + ipa_rm_ctx->prof_vote.curr_bw); + + return; +}; +/** + * ipa_rm_exit() - free all IPA RM resources + */ +void ipa_rm_exit(void) +{ + IPA_RM_DBG("ENTER\n"); + ipa_rm_dep_graph_delete(ipa_rm_ctx->dep_graph); + destroy_workqueue(ipa_rm_ctx->ipa_rm_wq); + kfree(ipa_rm_ctx); + ipa_rm_ctx = NULL; + IPA_RM_DBG("EXIT\n"); +} diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c new file mode 100644 index 000000000000..df582f5934e1 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include "ipa_rm_dependency_graph.h" +#include "ipa_rm_i.h" + +static int ipa_rm_dep_get_index(enum ipa_rm_resource_name resource_name) +{ + int resource_index = IPA_RM_INDEX_INVALID; + + if (IPA_RM_RESORCE_IS_PROD(resource_name)) + resource_index = ipa_rm_prod_index(resource_name); + else if (IPA_RM_RESORCE_IS_CONS(resource_name)) + resource_index = ipa_rm_cons_index(resource_name); + + return resource_index; +} + +/** + * ipa_rm_dep_graph_create() - creates graph + * @dep_graph: [out] created dependency graph + * + * Returns: dependency graph on success, NULL on failure + */ +int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph) +{ + int result = 0; + + *dep_graph = kzalloc(sizeof(**dep_graph), GFP_KERNEL); + if (!*dep_graph) + result = -ENOMEM; + return result; +} + +/** + * ipa_rm_dep_graph_delete() - destroyes the graph + * @graph: [in] dependency graph + * + * Frees all resources. + */ +void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph) +{ + int resource_index; + + if (!graph) { + IPA_RM_ERR("invalid params\n"); + return; + } + for (resource_index = 0; + resource_index < IPA_RM_RESOURCE_MAX; + resource_index++) + kfree(graph->resource_table[resource_index]); + memset(graph->resource_table, 0, sizeof(graph->resource_table)); +} + +/** + * ipa_rm_dep_graph_get_resource() - provides a resource by name + * @graph: [in] dependency graph + * @name: [in] name of the resource + * @resource: [out] resource in case of success + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_dep_graph_get_resource( + struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + struct ipa_rm_resource **resource) +{ + int result; + int resource_index; + + if (!graph) { + result = -EINVAL; + goto bail; + } + resource_index = ipa_rm_dep_get_index(resource_name); + if (resource_index == IPA_RM_INDEX_INVALID) { + result = -EINVAL; + goto bail; + } + *resource = graph->resource_table[resource_index]; + if (!*resource) { + result = -EINVAL; + goto bail; + } + result = 0; +bail: + return result; +} + +/** + * ipa_rm_dep_graph_add() - adds resource to graph + * @graph: [in] dependency graph + * @resource: [in] resource to add + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph, + struct ipa_rm_resource *resource) +{ + int result = 0; + int resource_index; + + if (!graph || !resource) { + result = -EINVAL; + goto bail; + } + resource_index = ipa_rm_dep_get_index(resource->name); + if (resource_index == IPA_RM_INDEX_INVALID) { + result = -EINVAL; + goto bail; + } + graph->resource_table[resource_index] = resource; +bail: + return result; +} + +/** + * ipa_rm_dep_graph_remove() - removes resource from graph + * @graph: [in] dependency graph + * @resource: [in] resource to add + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name) +{ + if (!graph) + return -EINVAL; + graph->resource_table[resource_name] = NULL; + + return 0; +} + +/** + * ipa_rm_dep_graph_add_dependency() - adds dependency between + * two nodes in graph + * @graph: [in] dependency graph + * @resource_name: [in] resource to add + * @depends_on_name: [in] resource to add + * @userspace_dep: [in] operation requested by userspace ? + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name, + bool userspace_dep) +{ + struct ipa_rm_resource *dependent = NULL; + struct ipa_rm_resource *dependency = NULL; + int result; + + if (!graph || + !IPA_RM_RESORCE_IS_PROD(resource_name) || + !IPA_RM_RESORCE_IS_CONS(depends_on_name)) { + IPA_RM_ERR("invalid params\n"); + result = -EINVAL; + goto bail; + } + if (ipa_rm_dep_graph_get_resource(graph, + resource_name, + &dependent)) { + IPA_RM_ERR("%s does not exist\n", + ipa_rm_resource_str(resource_name)); + result = -EINVAL; + goto bail; + } + if (ipa_rm_dep_graph_get_resource(graph, + depends_on_name, + &dependency)) { + IPA_RM_ERR("%s does not exist\n", + ipa_rm_resource_str(depends_on_name)); + result = -EINVAL; + goto bail; + } + result = ipa_rm_resource_add_dependency(dependent, dependency, + userspace_dep); +bail: + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_dep_graph_delete_dependency() - deleted dependency between + * two nodes in graph + * @graph: [in] dependency graph + * @resource_name: [in] resource to delete + * @depends_on_name: [in] resource to delete + * @userspace_dep: [in] operation requested by userspace ? + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name, + bool userspace_dep) +{ + struct ipa_rm_resource *dependent = NULL; + struct ipa_rm_resource *dependency = NULL; + int result; + + if (!graph || + !IPA_RM_RESORCE_IS_PROD(resource_name) || + !IPA_RM_RESORCE_IS_CONS(depends_on_name)) { + IPA_RM_ERR("invalid params\n"); + result = -EINVAL; + goto bail; + } + + if (ipa_rm_dep_graph_get_resource(graph, + resource_name, + &dependent)) { + IPA_RM_DBG("%s does not exist\n", + ipa_rm_resource_str(resource_name)); + result = -EINVAL; + goto bail; + } + + if (ipa_rm_dep_graph_get_resource(graph, + depends_on_name, + &dependency)) { + IPA_RM_DBG("%s does not exist\n", + ipa_rm_resource_str(depends_on_name)); + result = -EINVAL; + goto bail; + } + + result = ipa_rm_resource_delete_dependency(dependent, dependency, + userspace_dep); +bail: + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h new file mode 100644 index 000000000000..6fae81ac97cc --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_RM_DEPENDENCY_GRAPH_H_ +#define _IPA_RM_DEPENDENCY_GRAPH_H_ + +#include +#include +#include "ipa_rm_resource.h" + +struct ipa_rm_dep_graph { + struct ipa_rm_resource *resource_table[IPA_RM_RESOURCE_MAX]; +}; + +int ipa_rm_dep_graph_get_resource( + struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name name, + struct ipa_rm_resource **resource); + +int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph); + +void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph); + +int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph, + struct ipa_rm_resource *resource); + +int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name); + +int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name, + bool userspsace_dep); + +int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name, + bool userspsace_dep); + +#endif /* _IPA_RM_DEPENDENCY_GRAPH_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_rm_i.h b/drivers/platform/msm/ipa/ipa_rm_i.h new file mode 100644 index 000000000000..b0af3416b34e --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_i.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_RM_I_H_ +#define _IPA_RM_I_H_ + +#include +#include +#include "ipa_rm_resource.h" +#include "ipa_common_i.h" + +#define IPA_RM_DRV_NAME "ipa_rm" + +#define IPA_RM_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \ + ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) +#define IPA_RM_DBG(fmt, args...) \ + do { \ + pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \ + ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_RM_ERR(fmt, args...) \ + do { \ + pr_err(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \ + ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_RM_RESORCE_IS_PROD(x) \ + (x < IPA_RM_RESOURCE_MAX && (x & 0x1) == 0) +#define IPA_RM_RESORCE_IS_CONS(x) \ + (x < IPA_RM_RESOURCE_MAX && (x & 0x1) == 1) +#define IPA_RM_INDEX_INVALID (-1) +#define IPA_RM_RELEASE_DELAY_IN_MSEC 1000 + +int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name); +int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name); + +/** + * struct ipa_rm_delayed_release_work_type - IPA RM delayed resource release + * work type + * @delayed_work: work struct + * @ipa_rm_resource_name: name of the resource on which this work should be done + * @needed_bw: bandwidth required for resource in Mbps + * @dec_usage_count: decrease usage count on release ? + */ +struct ipa_rm_delayed_release_work_type { + struct delayed_work work; + enum ipa_rm_resource_name resource_name; + u32 needed_bw; + bool dec_usage_count; + +}; + +/** + * enum ipa_rm_wq_cmd - workqueue commands + */ +enum ipa_rm_wq_cmd { + IPA_RM_WQ_NOTIFY_PROD, + IPA_RM_WQ_NOTIFY_CONS, + IPA_RM_WQ_RESOURCE_CB +}; + +/** + * struct ipa_rm_wq_work_type - IPA RM worqueue specific + * work type + * @work: work struct + * @wq_cmd: command that should be processed in workqueue context + * @resource_name: name of the resource on which this work + * should be done + * @dep_graph: data structure to search for resource if exists + * @event: event to notify + * @notify_registered_only: notify only clients registered by + * ipa_rm_register() + */ +struct ipa_rm_wq_work_type { + struct work_struct work; + enum ipa_rm_wq_cmd wq_cmd; + enum ipa_rm_resource_name resource_name; + enum ipa_rm_event event; + bool notify_registered_only; +}; + +/** + * struct ipa_rm_wq_suspend_resume_work_type - IPA RM worqueue resume or + * suspend work type + * @work: work struct + * @resource_name: name of the resource on which this work + * should be done + * @prev_state: + * @needed_bw: + */ +struct ipa_rm_wq_suspend_resume_work_type { + struct work_struct work; + enum ipa_rm_resource_name resource_name; + enum ipa_rm_resource_state prev_state; + u32 needed_bw; + bool inc_usage_count; + +}; + +int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_event event, + bool notify_registered_only); + +int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_state prev_state, + u32 needed_bw, + bool inc_usage_count); + +int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_state prev_state, + u32 needed_bw); + +int ipa_rm_initialize(void); + +int ipa_rm_stat(char *buf, int size); + +const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name); + +void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name); + +int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name); + +void delayed_release_work_func(struct work_struct *work); + +int ipa_rm_add_dependency_from_ioctl(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name); + +int ipa_rm_delete_dependency_from_ioctl(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name); + +void ipa_rm_exit(void); + +#endif /* _IPA_RM_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c new file mode 100644 index 000000000000..d65d54c8ee61 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_rm_i.h" + +#define MAX_WS_NAME 20 + +/** + * struct ipa_rm_it_private - IPA RM Inactivity Timer private + * data + * @initied: indicates if instance was initialized + * @lock - spinlock for mutual exclusion + * @resource_name - resource name + * @work: delayed work object for running delayed releas + * function + * @resource_requested: boolean flag indicates if resource was requested + * @reschedule_work: boolean flag indicates to not release and to + * reschedule the release work. + * @work_in_progress: boolean flag indicates is release work was scheduled. + * @jiffies: number of jiffies for timeout + * + * WWAN private - holds all relevant info about WWAN driver + */ +struct ipa_rm_it_private { + bool initied; + enum ipa_rm_resource_name resource_name; + spinlock_t lock; + struct delayed_work work; + bool resource_requested; + bool reschedule_work; + bool work_in_progress; + unsigned long jiffies; + struct wakeup_source w_lock; + char w_lock_name[MAX_WS_NAME]; +}; + +static struct ipa_rm_it_private ipa_rm_it_handles[IPA_RM_RESOURCE_MAX]; + +/** + * ipa_rm_inactivity_timer_func() - called when timer expired in + * the context of the shared workqueue. Checks internally if + * reschedule_work flag is set. In case it is not set this function calls to + * ipa_rm_release_resource(). In case reschedule_work is set this function + * reschedule the work. This flag is cleared cleared when + * calling to ipa_rm_inactivity_timer_release_resource(). + * + * @work: work object provided by the work queue + * + * Return codes: + * None + */ +static void ipa_rm_inactivity_timer_func(struct work_struct *work) +{ + + struct ipa_rm_it_private *me = container_of(to_delayed_work(work), + struct ipa_rm_it_private, + work); + unsigned long flags; + + IPA_RM_DBG_LOW("timer expired for resource %d\n", me->resource_name); + + spin_lock_irqsave( + &ipa_rm_it_handles[me->resource_name].lock, flags); + if (ipa_rm_it_handles[me->resource_name].reschedule_work) { + IPA_RM_DBG_LOW("setting delayed work\n"); + ipa_rm_it_handles[me->resource_name].reschedule_work = false; + queue_delayed_work(system_unbound_wq, + &ipa_rm_it_handles[me->resource_name].work, + ipa_rm_it_handles[me->resource_name].jiffies); + } else if (ipa_rm_it_handles[me->resource_name].resource_requested) { + IPA_RM_DBG_LOW("not calling release\n"); + ipa_rm_it_handles[me->resource_name].work_in_progress = false; + } else { + IPA_RM_DBG_LOW("calling release_resource on resource %d\n", + me->resource_name); + __pm_relax(&ipa_rm_it_handles[me->resource_name].w_lock); + ipa_rm_release_resource(me->resource_name); + ipa_rm_it_handles[me->resource_name].work_in_progress = false; + } + spin_unlock_irqrestore( + &ipa_rm_it_handles[me->resource_name].lock, flags); +} + +/** + * ipa_rm_inactivity_timer_init() - Init function for IPA RM + * inactivity timer. This function shall be called prior calling + * any other API of IPA RM inactivity timer. + * + * @resource_name: Resource name. @see ipa_rm.h + * @msecs: time in miliseccond, that IPA RM inactivity timer + * shall wait prior calling to ipa_rm_release_resource(). + * + * Return codes: + * 0: success + * -EINVAL: invalid parameters + */ +int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name, + unsigned long msecs) +{ + struct wakeup_source *pwlock; + char *name; + + IPA_RM_DBG_LOW("resource %d\n", resource_name); + + if (resource_name < 0 || + resource_name >= IPA_RM_RESOURCE_MAX) { + IPA_RM_ERR("Invalid parameter\n"); + return -EINVAL; + } + + if (ipa_rm_it_handles[resource_name].initied) { + IPA_RM_ERR("resource %d already inited\n", resource_name); + return -EINVAL; + } + + spin_lock_init(&ipa_rm_it_handles[resource_name].lock); + ipa_rm_it_handles[resource_name].resource_name = resource_name; + ipa_rm_it_handles[resource_name].jiffies = msecs_to_jiffies(msecs); + ipa_rm_it_handles[resource_name].resource_requested = false; + ipa_rm_it_handles[resource_name].reschedule_work = false; + ipa_rm_it_handles[resource_name].work_in_progress = false; + pwlock = &(ipa_rm_it_handles[resource_name].w_lock); + name = ipa_rm_it_handles[resource_name].w_lock_name; + snprintf(name, MAX_WS_NAME, "IPA_RM%d\n", resource_name); + wakeup_source_init(pwlock, name); + INIT_DELAYED_WORK(&ipa_rm_it_handles[resource_name].work, + ipa_rm_inactivity_timer_func); + ipa_rm_it_handles[resource_name].initied = true; + + return 0; +} +EXPORT_SYMBOL(ipa_rm_inactivity_timer_init); + +/** + * ipa_rm_inactivity_timer_destroy() - De-Init function for IPA + * RM inactivity timer. + * @resource_name: Resource name. @see ipa_rm.h + * Return codes: + * 0: success + * -EINVAL: invalid parameters + */ +int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name) +{ + struct wakeup_source *pwlock; + + IPA_RM_DBG_LOW("resource %d\n", resource_name); + + if (resource_name < 0 || + resource_name >= IPA_RM_RESOURCE_MAX) { + IPA_RM_ERR("Invalid parameter\n"); + return -EINVAL; + } + + if (!ipa_rm_it_handles[resource_name].initied) { + IPA_RM_ERR("resource %d already inited\n", + resource_name); + return -EINVAL; + } + + cancel_delayed_work_sync(&ipa_rm_it_handles[resource_name].work); + pwlock = &(ipa_rm_it_handles[resource_name].w_lock); + wakeup_source_trash(pwlock); + + memset(&ipa_rm_it_handles[resource_name], 0, + sizeof(struct ipa_rm_it_private)); + + return 0; +} +EXPORT_SYMBOL(ipa_rm_inactivity_timer_destroy); + +/** + * ipa_rm_inactivity_timer_request_resource() - Same as + * ipa_rm_request_resource(), with a difference that calling to + * this function will also cancel the inactivity timer, if + * ipa_rm_inactivity_timer_release_resource() was called earlier. + * + * @resource_name: Resource name. @see ipa_rm.h + * + * Return codes: + * 0: success + * -EINVAL: invalid parameters + */ +int ipa_rm_inactivity_timer_request_resource( + enum ipa_rm_resource_name resource_name) +{ + int ret; + unsigned long flags; + + IPA_RM_DBG_LOW("resource %d\n", resource_name); + + if (resource_name < 0 || + resource_name >= IPA_RM_RESOURCE_MAX) { + IPA_RM_ERR("Invalid parameter\n"); + return -EINVAL; + } + + if (!ipa_rm_it_handles[resource_name].initied) { + IPA_RM_ERR("Not initialized\n"); + return -EINVAL; + } + + spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags); + ipa_rm_it_handles[resource_name].resource_requested = true; + spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags); + ret = ipa_rm_request_resource(resource_name); + IPA_RM_DBG_LOW("resource %d: returning %d\n", resource_name, ret); + + return ret; +} +EXPORT_SYMBOL(ipa_rm_inactivity_timer_request_resource); + +/** + * ipa_rm_inactivity_timer_release_resource() - Sets the + * inactivity timer to the timeout set by + * ipa_rm_inactivity_timer_init(). When the timeout expires, IPA + * RM inactivity timer will call to ipa_rm_release_resource(). + * If a call to ipa_rm_inactivity_timer_request_resource() was + * made BEFORE the timeout has expired, rge timer will be + * cancelled. + * + * @resource_name: Resource name. @see ipa_rm.h + * + * Return codes: + * 0: success + * -EINVAL: invalid parameters + */ +int ipa_rm_inactivity_timer_release_resource( + enum ipa_rm_resource_name resource_name) +{ + unsigned long flags; + + IPA_RM_DBG_LOW("resource %d\n", resource_name); + + if (resource_name < 0 || + resource_name >= IPA_RM_RESOURCE_MAX) { + IPA_RM_ERR("Invalid parameter\n"); + return -EINVAL; + } + + if (!ipa_rm_it_handles[resource_name].initied) { + IPA_RM_ERR("Not initialized\n"); + return -EINVAL; + } + + spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags); + ipa_rm_it_handles[resource_name].resource_requested = false; + if (ipa_rm_it_handles[resource_name].work_in_progress) { + IPA_RM_DBG_LOW("Timer already set, no sched again %d\n", + resource_name); + ipa_rm_it_handles[resource_name].reschedule_work = true; + spin_unlock_irqrestore( + &ipa_rm_it_handles[resource_name].lock, flags); + return 0; + } + ipa_rm_it_handles[resource_name].work_in_progress = true; + ipa_rm_it_handles[resource_name].reschedule_work = false; + __pm_stay_awake(&ipa_rm_it_handles[resource_name].w_lock); + IPA_RM_DBG_LOW("setting delayed work\n"); + queue_delayed_work(system_unbound_wq, + &ipa_rm_it_handles[resource_name].work, + ipa_rm_it_handles[resource_name].jiffies); + spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags); + + return 0; +} +EXPORT_SYMBOL(ipa_rm_inactivity_timer_release_resource); + diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.c b/drivers/platform/msm/ipa/ipa_rm_peers_list.c new file mode 100644 index 000000000000..9b621a17deed --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.c @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include "ipa_rm_i.h" + +/** + * ipa_rm_peers_list_get_resource_index() - resource name to index + * of this resource in corresponding peers list + * @resource_name: [in] resource name + * + * Returns: resource index mapping, IPA_RM_INDEX_INVALID + * in case provided resource name isn't contained in enum + * ipa_rm_resource_name. + * + */ +static int ipa_rm_peers_list_get_resource_index( + enum ipa_rm_resource_name resource_name) +{ + int resource_index = IPA_RM_INDEX_INVALID; + + if (IPA_RM_RESORCE_IS_PROD(resource_name)) + resource_index = ipa_rm_prod_index(resource_name); + else if (IPA_RM_RESORCE_IS_CONS(resource_name)) + resource_index = ipa_rm_cons_index(resource_name); + + return resource_index; +} + +static bool ipa_rm_peers_list_check_index(int index, + struct ipa_rm_peers_list *peers_list) +{ + return !(index > peers_list->max_peers || index < 0); +} + +/** + * ipa_rm_peers_list_create() - creates the peers list + * + * @max_peers: maximum number of peers in new list + * @peers_list: [out] newly created peers list + * + * Returns: 0 in case of SUCCESS, negative otherwise + */ +int ipa_rm_peers_list_create(int max_peers, + struct ipa_rm_peers_list **peers_list) +{ + int result; + + *peers_list = kzalloc(sizeof(**peers_list), GFP_ATOMIC); + if (!*peers_list) { + IPA_RM_ERR("no mem\n"); + result = -ENOMEM; + goto bail; + } + + (*peers_list)->max_peers = max_peers; + (*peers_list)->peers = kzalloc((*peers_list)->max_peers * + sizeof(*((*peers_list)->peers)), GFP_ATOMIC); + if (!((*peers_list)->peers)) { + IPA_RM_ERR("no mem\n"); + result = -ENOMEM; + goto list_alloc_fail; + } + + return 0; + +list_alloc_fail: + kfree(*peers_list); +bail: + return result; +} + +/** + * ipa_rm_peers_list_delete() - deletes the peers list + * + * @peers_list: peers list + * + */ +void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list) +{ + if (peers_list) { + kfree(peers_list->peers); + kfree(peers_list); + } +} + +/** + * ipa_rm_peers_list_remove_peer() - removes peer from the list + * + * @peers_list: peers list + * @resource_name: name of the resource to remove + * + */ +void ipa_rm_peers_list_remove_peer( + struct ipa_rm_peers_list *peers_list, + enum ipa_rm_resource_name resource_name) +{ + if (!peers_list) + return; + + peers_list->peers[ipa_rm_peers_list_get_resource_index( + resource_name)].resource = NULL; + peers_list->peers[ipa_rm_peers_list_get_resource_index( + resource_name)].userspace_dep = false; + peers_list->peers_count--; +} + +/** + * ipa_rm_peers_list_add_peer() - adds peer to the list + * + * @peers_list: peers list + * @resource: resource to add + * + */ +void ipa_rm_peers_list_add_peer( + struct ipa_rm_peers_list *peers_list, + struct ipa_rm_resource *resource, + bool userspace_dep) +{ + if (!peers_list || !resource) + return; + + peers_list->peers[ipa_rm_peers_list_get_resource_index( + resource->name)].resource = resource; + peers_list->peers[ipa_rm_peers_list_get_resource_index( + resource->name)].userspace_dep = userspace_dep; + peers_list->peers_count++; +} + +/** + * ipa_rm_peers_list_is_empty() - checks + * if resource peers list is empty + * + * @peers_list: peers list + * + * Returns: true if the list is empty, false otherwise + */ +bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list) +{ + bool result = true; + + if (!peers_list) + goto bail; + + if (peers_list->peers_count > 0) + result = false; +bail: + return result; +} + +/** + * ipa_rm_peers_list_has_last_peer() - checks + * if resource peers list has exactly one peer + * + * @peers_list: peers list + * + * Returns: true if the list has exactly one peer, false otherwise + */ +bool ipa_rm_peers_list_has_last_peer( + struct ipa_rm_peers_list *peers_list) +{ + bool result = false; + + if (!peers_list) + goto bail; + + if (peers_list->peers_count == 1) + result = true; +bail: + return result; +} + +/** + * ipa_rm_peers_list_check_dependency() - check dependency + * between 2 peer lists + * @resource_peers: first peers list + * @resource_name: first peers list resource name + * @depends_on_peers: second peers list + * @depends_on_name: second peers list resource name + * @userspace_dep: [out] dependency was created by userspace + * + * Returns: true if there is dependency, false otherwise + * + */ +bool ipa_rm_peers_list_check_dependency( + struct ipa_rm_peers_list *resource_peers, + enum ipa_rm_resource_name resource_name, + struct ipa_rm_peers_list *depends_on_peers, + enum ipa_rm_resource_name depends_on_name, + bool *userspace_dep) +{ + bool result = false; + int resource_index; + struct ipa_rm_resource_peer *peer_ptr; + + if (!resource_peers || !depends_on_peers || !userspace_dep) + return result; + + resource_index = ipa_rm_peers_list_get_resource_index(depends_on_name); + peer_ptr = &resource_peers->peers[resource_index]; + if (peer_ptr->resource != NULL) { + result = true; + *userspace_dep = peer_ptr->userspace_dep; + } + + resource_index = ipa_rm_peers_list_get_resource_index(resource_name); + peer_ptr = &depends_on_peers->peers[resource_index]; + if (peer_ptr->resource != NULL) { + result = true; + *userspace_dep = peer_ptr->userspace_dep; + } + + return result; +} + +/** + * ipa_rm_peers_list_get_resource() - get resource by + * resource index + * @resource_index: resource index + * @resource_peers: peers list + * + * Returns: the resource if found, NULL otherwise + */ +struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index, + struct ipa_rm_peers_list *resource_peers) +{ + struct ipa_rm_resource *result = NULL; + + if (!ipa_rm_peers_list_check_index(resource_index, resource_peers)) + goto bail; + + result = resource_peers->peers[resource_index].resource; +bail: + return result; +} + +/** + * ipa_rm_peers_list_get_userspace_dep() - returns whether resource dependency + * was added by userspace + * @resource_index: resource index + * @resource_peers: peers list + * + * Returns: true if dependency was added by userspace, false by kernel + */ +bool ipa_rm_peers_list_get_userspace_dep(int resource_index, + struct ipa_rm_peers_list *resource_peers) +{ + bool result = false; + + if (!ipa_rm_peers_list_check_index(resource_index, resource_peers)) + goto bail; + + result = resource_peers->peers[resource_index].userspace_dep; +bail: + return result; +} + +/** + * ipa_rm_peers_list_get_size() - get peers list sise + * + * @peers_list: peers list + * + * Returns: the size of the peers list + */ +int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list) +{ + return peers_list->max_peers; +} diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.h b/drivers/platform/msm/ipa/ipa_rm_peers_list.h new file mode 100644 index 000000000000..a79143a9dd56 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_RM_PEERS_LIST_H_ +#define _IPA_RM_PEERS_LIST_H_ + +#include "ipa_rm_resource.h" + +struct ipa_rm_resource_peer { + struct ipa_rm_resource *resource; + bool userspace_dep; +}; + +/** + * struct ipa_rm_peers_list - IPA RM resource peers list + * @peers: the list of references to resources dependent on this resource + * in case of producer or list of dependencies in case of consumer + * @max_peers: maximum number of peers for this resource + * @peers_count: actual number of peers for this resource + */ +struct ipa_rm_peers_list { + struct ipa_rm_resource_peer *peers; + int max_peers; + int peers_count; +}; + +int ipa_rm_peers_list_create(int max_peers, + struct ipa_rm_peers_list **peers_list); +void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list); +void ipa_rm_peers_list_remove_peer( + struct ipa_rm_peers_list *peers_list, + enum ipa_rm_resource_name resource_name); +void ipa_rm_peers_list_add_peer( + struct ipa_rm_peers_list *peers_list, + struct ipa_rm_resource *resource, + bool userspace_dep); +bool ipa_rm_peers_list_check_dependency( + struct ipa_rm_peers_list *resource_peers, + enum ipa_rm_resource_name resource_name, + struct ipa_rm_peers_list *depends_on_peers, + enum ipa_rm_resource_name depends_on_name, + bool *userspace_dep); +struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index, + struct ipa_rm_peers_list *peers_list); +bool ipa_rm_peers_list_get_userspace_dep(int resource_index, + struct ipa_rm_peers_list *resource_peers); +int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list); +bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list); +bool ipa_rm_peers_list_has_last_peer( + struct ipa_rm_peers_list *peers_list); + + +#endif /* _IPA_RM_PEERS_LIST_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c new file mode 100644 index 000000000000..732c8b219772 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_resource.c @@ -0,0 +1,1204 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include "ipa_rm_resource.h" +#include "ipa_rm_i.h" +#include "ipa_common_i.h" +/** + * ipa_rm_dep_prod_index() - producer name to producer index mapping + * @resource_name: [in] resource name (should be of producer) + * + * Returns: resource index mapping, IPA_RM_INDEX_INVALID + * in case provided resource name isn't contained + * in enum ipa_rm_resource_name or is not of producers. + * + */ +int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name) +{ + int result = resource_name; + + switch (resource_name) { + case IPA_RM_RESOURCE_Q6_PROD: + case IPA_RM_RESOURCE_USB_PROD: + case IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD: + case IPA_RM_RESOURCE_HSIC_PROD: + case IPA_RM_RESOURCE_STD_ECM_PROD: + case IPA_RM_RESOURCE_RNDIS_PROD: + case IPA_RM_RESOURCE_WWAN_0_PROD: + case IPA_RM_RESOURCE_WLAN_PROD: + case IPA_RM_RESOURCE_ODU_ADAPT_PROD: + case IPA_RM_RESOURCE_MHI_PROD: + case IPA_RM_RESOURCE_ETHERNET_PROD: + break; + default: + result = IPA_RM_INDEX_INVALID; + break; + } + + return result; +} + +/** + * ipa_rm_cons_index() - consumer name to consumer index mapping + * @resource_name: [in] resource name (should be of consumer) + * + * Returns: resource index mapping, IPA_RM_INDEX_INVALID + * in case provided resource name isn't contained + * in enum ipa_rm_resource_name or is not of consumers. + * + */ +int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name) +{ + int result = resource_name; + + switch (resource_name) { + case IPA_RM_RESOURCE_Q6_CONS: + case IPA_RM_RESOURCE_USB_CONS: + case IPA_RM_RESOURCE_HSIC_CONS: + case IPA_RM_RESOURCE_WLAN_CONS: + case IPA_RM_RESOURCE_APPS_CONS: + case IPA_RM_RESOURCE_ODU_ADAPT_CONS: + case IPA_RM_RESOURCE_MHI_CONS: + case IPA_RM_RESOURCE_USB_DPL_CONS: + case IPA_RM_RESOURCE_ETHERNET_CONS: + break; + default: + result = IPA_RM_INDEX_INVALID; + break; + } + + return result; +} + +int ipa_rm_resource_consumer_release_work( + struct ipa_rm_resource_cons *consumer, + enum ipa_rm_resource_state prev_state, + bool notify_completion) +{ + int driver_result; + + IPA_RM_DBG_LOW("calling driver CB\n"); + driver_result = consumer->release_resource(); + IPA_RM_DBG_LOW("driver CB returned with %d\n", driver_result); + /* + * Treat IPA_RM_RELEASE_IN_PROGRESS as IPA_RM_RELEASED + * for CONS which remains in RELEASE_IN_PROGRESS. + */ + if (driver_result == -EINPROGRESS) + driver_result = 0; + if (driver_result != 0 && driver_result != -EINPROGRESS) { + IPA_RM_ERR("driver CB returned error %d\n", driver_result); + consumer->resource.state = prev_state; + goto bail; + } + if (driver_result == 0) { + if (notify_completion) + ipa_rm_resource_consumer_handle_cb(consumer, + IPA_RM_RESOURCE_RELEASED); + else + consumer->resource.state = IPA_RM_RELEASED; + } + complete_all(&consumer->request_consumer_in_progress); + + ipa_rm_perf_profile_change(consumer->resource.name); +bail: + return driver_result; +} + +int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer, + enum ipa_rm_resource_state prev_state, + u32 prod_needed_bw, + bool notify_completion, + bool dec_client_on_err) +{ + int driver_result; + + IPA_RM_DBG_LOW("calling driver CB\n"); + driver_result = consumer->request_resource(); + IPA_RM_DBG_LOW("driver CB returned with %d\n", driver_result); + if (driver_result == 0) { + if (notify_completion) { + ipa_rm_resource_consumer_handle_cb(consumer, + IPA_RM_RESOURCE_GRANTED); + } else { + consumer->resource.state = IPA_RM_GRANTED; + ipa_rm_perf_profile_change(consumer->resource.name); + ipa_resume_resource(consumer->resource.name); + } + } else if (driver_result != -EINPROGRESS) { + consumer->resource.state = prev_state; + consumer->resource.needed_bw -= prod_needed_bw; + if (dec_client_on_err) + consumer->usage_count--; + } + + return driver_result; +} + +int ipa_rm_resource_consumer_request( + struct ipa_rm_resource_cons *consumer, + u32 prod_needed_bw, + bool inc_usage_count, + bool wake_client) +{ + int result = 0; + enum ipa_rm_resource_state prev_state; + struct ipa_active_client_logging_info log_info; + + IPA_RM_DBG_LOW("%s state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); + + prev_state = consumer->resource.state; + consumer->resource.needed_bw += prod_needed_bw; + switch (consumer->resource.state) { + case IPA_RM_RELEASED: + case IPA_RM_RELEASE_IN_PROGRESS: + reinit_completion(&consumer->request_consumer_in_progress); + consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS; + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, + ipa_rm_resource_str(consumer->resource.name)); + if (prev_state == IPA_RM_RELEASE_IN_PROGRESS || + ipa_inc_client_enable_clks_no_block(&log_info) != 0) { + IPA_RM_DBG_LOW("async resume work for %s\n", + ipa_rm_resource_str(consumer->resource.name)); + ipa_rm_wq_send_resume_cmd(consumer->resource.name, + prev_state, + prod_needed_bw, + inc_usage_count); + result = -EINPROGRESS; + break; + } + result = ipa_rm_resource_consumer_request_work(consumer, + prev_state, + prod_needed_bw, + false, + inc_usage_count); + break; + case IPA_RM_GRANTED: + if (wake_client) { + result = ipa_rm_resource_consumer_request_work( + consumer, prev_state, prod_needed_bw, false, + inc_usage_count); + break; + } + ipa_rm_perf_profile_change(consumer->resource.name); + break; + case IPA_RM_REQUEST_IN_PROGRESS: + result = -EINPROGRESS; + break; + default: + consumer->resource.needed_bw -= prod_needed_bw; + result = -EPERM; + goto bail; + } + if (inc_usage_count) + consumer->usage_count++; +bail: + IPA_RM_DBG_LOW("%s new state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); + IPA_RM_DBG_LOW("EXIT with %d\n", result); + + return result; +} + +int ipa_rm_resource_consumer_release( + struct ipa_rm_resource_cons *consumer, + u32 prod_needed_bw, + bool dec_usage_count) +{ + int result = 0; + enum ipa_rm_resource_state save_state; + + IPA_RM_DBG_LOW("%s state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); + save_state = consumer->resource.state; + consumer->resource.needed_bw -= prod_needed_bw; + switch (consumer->resource.state) { + case IPA_RM_RELEASED: + break; + case IPA_RM_GRANTED: + case IPA_RM_REQUEST_IN_PROGRESS: + if (dec_usage_count && consumer->usage_count > 0) + consumer->usage_count--; + if (consumer->usage_count == 0) { + consumer->resource.state = IPA_RM_RELEASE_IN_PROGRESS; + if (save_state == IPA_RM_REQUEST_IN_PROGRESS || + ipa_suspend_resource_no_block( + consumer->resource.name) != 0) { + ipa_rm_wq_send_suspend_cmd( + consumer->resource.name, + save_state, + prod_needed_bw); + result = -EINPROGRESS; + goto bail; + } + result = ipa_rm_resource_consumer_release_work(consumer, + save_state, false); + goto bail; + } else if (consumer->resource.state == IPA_RM_GRANTED) { + ipa_rm_perf_profile_change(consumer->resource.name); + } + break; + case IPA_RM_RELEASE_IN_PROGRESS: + if (dec_usage_count && consumer->usage_count > 0) + consumer->usage_count--; + result = -EINPROGRESS; + break; + default: + result = -EPERM; + goto bail; + } +bail: + IPA_RM_DBG_LOW("%s new state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); + IPA_RM_DBG_LOW("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_resource_producer_notify_clients() - notify + * all registered clients of given producer + * @producer: producer + * @event: event to notify + * @notify_registered_only: notify only clients registered by + * ipa_rm_register() + */ +void ipa_rm_resource_producer_notify_clients( + struct ipa_rm_resource_prod *producer, + enum ipa_rm_event event, + bool notify_registered_only) +{ + struct ipa_rm_notification_info *reg_info; + + IPA_RM_DBG_LOW("%s event: %d notify_registered_only: %d\n", + ipa_rm_resource_str(producer->resource.name), + event, + notify_registered_only); + + list_for_each_entry(reg_info, &(producer->event_listeners), link) { + if (notify_registered_only && !reg_info->explicit) + continue; + + IPA_RM_DBG_LOW("Notifying %s event: %d\n", + ipa_rm_resource_str(producer->resource.name), event); + reg_info->reg_params.notify_cb(reg_info->reg_params.user_data, + event, + 0); + IPA_RM_DBG_LOW("back from client CB\n"); + } +} + +static int ipa_rm_resource_producer_create(struct ipa_rm_resource **resource, + struct ipa_rm_resource_prod **producer, + struct ipa_rm_create_params *create_params, + int *max_peers) +{ + int result = 0; + + *producer = kzalloc(sizeof(**producer), GFP_ATOMIC); + if (*producer == NULL) { + result = -ENOMEM; + goto bail; + } + + INIT_LIST_HEAD(&((*producer)->event_listeners)); + result = ipa_rm_resource_producer_register(*producer, + &(create_params->reg_params), + false); + if (result) { + IPA_RM_ERR("ipa_rm_resource_producer_register() failed\n"); + goto register_fail; + } + + (*resource) = (struct ipa_rm_resource *) (*producer); + (*resource)->type = IPA_RM_PRODUCER; + *max_peers = IPA_RM_RESOURCE_MAX; + goto bail; +register_fail: + kfree(*producer); +bail: + return result; +} + +static void ipa_rm_resource_producer_delete( + struct ipa_rm_resource_prod *producer) +{ + struct ipa_rm_notification_info *reg_info; + struct list_head *pos, *q; + + ipa_rm_resource_producer_release(producer); + list_for_each_safe(pos, q, &(producer->event_listeners)) { + reg_info = list_entry(pos, + struct ipa_rm_notification_info, + link); + list_del(pos); + kfree(reg_info); + } +} + +static int ipa_rm_resource_consumer_create(struct ipa_rm_resource **resource, + struct ipa_rm_resource_cons **consumer, + struct ipa_rm_create_params *create_params, + int *max_peers) +{ + int result = 0; + + *consumer = kzalloc(sizeof(**consumer), GFP_ATOMIC); + if (*consumer == NULL) { + result = -ENOMEM; + goto bail; + } + + (*consumer)->request_resource = create_params->request_resource; + (*consumer)->release_resource = create_params->release_resource; + (*resource) = (struct ipa_rm_resource *) (*consumer); + (*resource)->type = IPA_RM_CONSUMER; + init_completion(&((*consumer)->request_consumer_in_progress)); + *max_peers = IPA_RM_RESOURCE_MAX; +bail: + return result; +} + +/** + * ipa_rm_resource_create() - creates resource + * @create_params: [in] parameters needed + * for resource initialization with IPA RM + * @resource: [out] created resource + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_resource_create( + struct ipa_rm_create_params *create_params, + struct ipa_rm_resource **resource) +{ + struct ipa_rm_resource_cons *consumer; + struct ipa_rm_resource_prod *producer; + int max_peers; + int result = 0; + + if (!create_params) { + result = -EINVAL; + goto bail; + } + + if (IPA_RM_RESORCE_IS_PROD(create_params->name)) { + result = ipa_rm_resource_producer_create(resource, + &producer, + create_params, + &max_peers); + if (result) { + IPA_RM_ERR("ipa_rm_resource_producer_create failed\n"); + goto bail; + } + } else if (IPA_RM_RESORCE_IS_CONS(create_params->name)) { + result = ipa_rm_resource_consumer_create(resource, + &consumer, + create_params, + &max_peers); + if (result) { + IPA_RM_ERR("ipa_rm_resource_producer_create failed\n"); + goto bail; + } + } else { + IPA_RM_ERR("invalid resource\n"); + result = -EPERM; + goto bail; + } + + result = ipa_rm_peers_list_create(max_peers, + &((*resource)->peers_list)); + if (result) { + IPA_RM_ERR("ipa_rm_peers_list_create failed\n"); + goto peers_alloc_fail; + } + (*resource)->name = create_params->name; + (*resource)->floor_voltage = create_params->floor_voltage; + (*resource)->state = IPA_RM_RELEASED; + goto bail; + +peers_alloc_fail: + ipa_rm_resource_delete(*resource); +bail: + return result; +} + +/** + * ipa_rm_resource_delete() - deletes resource + * @resource: [in] resource + * for resource initialization with IPA RM + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_resource_delete(struct ipa_rm_resource *resource) +{ + struct ipa_rm_resource *consumer; + struct ipa_rm_resource *producer; + int peers_index; + int result = 0; + int list_size; + bool userspace_dep; + + if (!resource) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + IPA_RM_DBG("ENTER with resource %d\n", resource->name); + if (resource->type == IPA_RM_PRODUCER) { + if (resource->peers_list) { + list_size = ipa_rm_peers_list_get_size( + resource->peers_list); + for (peers_index = 0; + peers_index < list_size; + peers_index++) { + consumer = ipa_rm_peers_list_get_resource( + peers_index, + resource->peers_list); + if (consumer) { + userspace_dep = + ipa_rm_peers_list_get_userspace_dep( + peers_index, + resource->peers_list); + ipa_rm_resource_delete_dependency( + resource, + consumer, + userspace_dep); + } + } + } + + ipa_rm_resource_producer_delete( + (struct ipa_rm_resource_prod *) resource); + } else if (resource->type == IPA_RM_CONSUMER) { + if (resource->peers_list) { + list_size = ipa_rm_peers_list_get_size( + resource->peers_list); + for (peers_index = 0; + peers_index < list_size; + peers_index++){ + producer = ipa_rm_peers_list_get_resource( + peers_index, + resource->peers_list); + if (producer) { + userspace_dep = + ipa_rm_peers_list_get_userspace_dep( + peers_index, + resource->peers_list); + ipa_rm_resource_delete_dependency( + producer, + resource, + userspace_dep); + } + } + } + } + ipa_rm_peers_list_delete(resource->peers_list); + kfree(resource); + return result; +} + +/** + * ipa_rm_resource_register() - register resource + * @resource: [in] resource + * @reg_params: [in] registration parameters + * @explicit: [in] registered explicitly by ipa_rm_register() + * + * Returns: 0 on success, negative on failure + * + * Producer resource is expected for this call. + * + */ +int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer, + struct ipa_rm_register_params *reg_params, + bool explicit) +{ + int result = 0; + struct ipa_rm_notification_info *reg_info; + struct list_head *pos; + + if (!producer || !reg_params) { + IPA_RM_ERR("invalid params\n"); + result = -EPERM; + goto bail; + } + + list_for_each(pos, &(producer->event_listeners)) { + reg_info = list_entry(pos, + struct ipa_rm_notification_info, + link); + if (reg_info->reg_params.notify_cb == + reg_params->notify_cb) { + IPA_RM_ERR("already registered\n"); + result = -EPERM; + goto bail; + } + + } + + reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); + if (reg_info == NULL) { + result = -ENOMEM; + goto bail; + } + + reg_info->reg_params.user_data = reg_params->user_data; + reg_info->reg_params.notify_cb = reg_params->notify_cb; + reg_info->explicit = explicit; + INIT_LIST_HEAD(®_info->link); + list_add(®_info->link, &producer->event_listeners); +bail: + return result; +} + +/** + * ipa_rm_resource_deregister() - register resource + * @resource: [in] resource + * @reg_params: [in] registration parameters + * + * Returns: 0 on success, negative on failure + * + * Producer resource is expected for this call. + * This function deleted only single instance of + * registration info. + * + */ +int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer, + struct ipa_rm_register_params *reg_params) +{ + int result = -EINVAL; + struct ipa_rm_notification_info *reg_info; + struct list_head *pos, *q; + + if (!producer || !reg_params) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + list_for_each_safe(pos, q, &(producer->event_listeners)) { + reg_info = list_entry(pos, + struct ipa_rm_notification_info, + link); + if (reg_info->reg_params.notify_cb == + reg_params->notify_cb) { + list_del(pos); + kfree(reg_info); + result = 0; + goto bail; + } + } +bail: + return result; +} + +/** + * ipa_rm_resource_add_dependency() - add dependency between two + * given resources + * @resource: [in] resource resource + * @depends_on: [in] depends_on resource + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource, + struct ipa_rm_resource *depends_on, + bool userspace_dep) +{ + int result = 0; + int consumer_result; + bool add_dep_by_userspace; + + if (!resource || !depends_on) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + if (ipa_rm_peers_list_check_dependency(resource->peers_list, + resource->name, + depends_on->peers_list, + depends_on->name, + &add_dep_by_userspace)) { + IPA_RM_ERR("dependency already exists, added by %s\n", + add_dep_by_userspace ? "userspace" : "kernel"); + return -EEXIST; + } + + ipa_rm_peers_list_add_peer(resource->peers_list, depends_on, + userspace_dep); + ipa_rm_peers_list_add_peer(depends_on->peers_list, resource, + userspace_dep); + IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(resource->name), + resource->state); + + resource->needed_bw += depends_on->max_bw; + switch (resource->state) { + case IPA_RM_RELEASED: + case IPA_RM_RELEASE_IN_PROGRESS: + break; + case IPA_RM_GRANTED: + case IPA_RM_REQUEST_IN_PROGRESS: + { + enum ipa_rm_resource_state prev_state = resource->state; + + resource->state = IPA_RM_REQUEST_IN_PROGRESS; + ((struct ipa_rm_resource_prod *) + resource)->pending_request++; + consumer_result = ipa_rm_resource_consumer_request( + (struct ipa_rm_resource_cons *)depends_on, + resource->max_bw, + true, false); + if (consumer_result != -EINPROGRESS) { + resource->state = prev_state; + ((struct ipa_rm_resource_prod *) + resource)->pending_request--; + ipa_rm_perf_profile_change(resource->name); + } + result = consumer_result; + break; + } + default: + IPA_RM_ERR("invalid state\n"); + result = -EPERM; + goto bail; + } +bail: + IPA_RM_DBG("%s new state: %d\n", ipa_rm_resource_str(resource->name), + resource->state); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_resource_delete_dependency() - add dependency between two + * given resources + * @resource: [in] resource resource + * @depends_on: [in] depends_on resource + * + * Returns: 0 on success, negative on failure + * In case the resource state was changed, a notification + * will be sent to the RM client + */ +int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource, + struct ipa_rm_resource *depends_on, + bool userspace_dep) +{ + int result = 0; + bool state_changed = false; + bool release_consumer = false; + enum ipa_rm_event evt; + bool add_dep_by_userspace; + + if (!resource || !depends_on) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + if (!ipa_rm_peers_list_check_dependency(resource->peers_list, + resource->name, + depends_on->peers_list, + depends_on->name, + &add_dep_by_userspace)) { + IPA_RM_ERR("dependency does not exist\n"); + return -EINVAL; + } + + /* + * to avoid race conditions between kernel and userspace + * need to check that the dependency was added by same entity + */ + if (add_dep_by_userspace != userspace_dep) { + IPA_RM_DBG("dependency was added by %s\n", + add_dep_by_userspace ? "userspace" : "kernel"); + IPA_RM_DBG("ignore request to delete dependency by %s\n", + userspace_dep ? "userspace" : "kernel"); + return 0; + } + + IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(resource->name), + resource->state); + + resource->needed_bw -= depends_on->max_bw; + switch (resource->state) { + case IPA_RM_RELEASED: + break; + case IPA_RM_GRANTED: + ipa_rm_perf_profile_change(resource->name); + release_consumer = true; + break; + case IPA_RM_RELEASE_IN_PROGRESS: + if (((struct ipa_rm_resource_prod *) + resource)->pending_release > 0) + ((struct ipa_rm_resource_prod *) + resource)->pending_release--; + if (depends_on->state == IPA_RM_RELEASE_IN_PROGRESS && + ((struct ipa_rm_resource_prod *) + resource)->pending_release == 0) { + resource->state = IPA_RM_RELEASED; + state_changed = true; + evt = IPA_RM_RESOURCE_RELEASED; + ipa_rm_perf_profile_change(resource->name); + } + break; + case IPA_RM_REQUEST_IN_PROGRESS: + release_consumer = true; + if (((struct ipa_rm_resource_prod *) + resource)->pending_request > 0) + ((struct ipa_rm_resource_prod *) + resource)->pending_request--; + if (depends_on->state == IPA_RM_REQUEST_IN_PROGRESS && + ((struct ipa_rm_resource_prod *) + resource)->pending_request == 0) { + resource->state = IPA_RM_GRANTED; + state_changed = true; + evt = IPA_RM_RESOURCE_GRANTED; + ipa_rm_perf_profile_change(resource->name); + } + break; + default: + result = -EINVAL; + goto bail; + } + if (state_changed) { + (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, + resource->name, + evt, + false); + } + IPA_RM_DBG("%s new state: %d\n", ipa_rm_resource_str(resource->name), + resource->state); + ipa_rm_peers_list_remove_peer(resource->peers_list, + depends_on->name); + ipa_rm_peers_list_remove_peer(depends_on->peers_list, + resource->name); + if (release_consumer) + (void) ipa_rm_resource_consumer_release( + (struct ipa_rm_resource_cons *)depends_on, + resource->max_bw, + true); +bail: + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_resource_producer_request() - producer resource request + * @producer: [in] producer + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer) +{ + int peers_index; + int result = 0; + struct ipa_rm_resource *consumer; + int consumer_result; + enum ipa_rm_resource_state state; + + state = producer->resource.state; + switch (producer->resource.state) { + case IPA_RM_RELEASED: + case IPA_RM_RELEASE_IN_PROGRESS: + producer->resource.state = IPA_RM_REQUEST_IN_PROGRESS; + break; + case IPA_RM_GRANTED: + goto unlock_and_bail; + case IPA_RM_REQUEST_IN_PROGRESS: + result = -EINPROGRESS; + goto unlock_and_bail; + default: + result = -EINVAL; + goto unlock_and_bail; + } + + producer->pending_request = 0; + for (peers_index = 0; + peers_index < ipa_rm_peers_list_get_size( + producer->resource.peers_list); + peers_index++) { + consumer = ipa_rm_peers_list_get_resource(peers_index, + producer->resource.peers_list); + if (consumer) { + producer->pending_request++; + consumer_result = ipa_rm_resource_consumer_request( + (struct ipa_rm_resource_cons *)consumer, + producer->resource.max_bw, + true, false); + if (consumer_result == -EINPROGRESS) { + result = -EINPROGRESS; + } else { + producer->pending_request--; + if (consumer_result != 0) { + result = consumer_result; + goto bail; + } + } + } + } + + if (producer->pending_request == 0) { + producer->resource.state = IPA_RM_GRANTED; + ipa_rm_perf_profile_change(producer->resource.name); + (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, + producer->resource.name, + IPA_RM_RESOURCE_GRANTED, + true); + result = 0; + } +unlock_and_bail: + if (state != producer->resource.state) + IPA_RM_DBG_LOW("%s state changed %d->%d\n", + ipa_rm_resource_str(producer->resource.name), + state, + producer->resource.state); +bail: + return result; +} + +/** + * ipa_rm_resource_producer_release() - producer resource release + * producer: [in] producer resource + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer) +{ + int peers_index; + int result = 0; + struct ipa_rm_resource *consumer; + int consumer_result; + enum ipa_rm_resource_state state; + + state = producer->resource.state; + switch (producer->resource.state) { + case IPA_RM_RELEASED: + goto bail; + case IPA_RM_GRANTED: + case IPA_RM_REQUEST_IN_PROGRESS: + producer->resource.state = IPA_RM_RELEASE_IN_PROGRESS; + break; + case IPA_RM_RELEASE_IN_PROGRESS: + result = -EINPROGRESS; + goto bail; + default: + result = -EPERM; + goto bail; + } + + producer->pending_release = 0; + for (peers_index = 0; + peers_index < ipa_rm_peers_list_get_size( + producer->resource.peers_list); + peers_index++) { + consumer = ipa_rm_peers_list_get_resource(peers_index, + producer->resource.peers_list); + if (consumer) { + producer->pending_release++; + consumer_result = ipa_rm_resource_consumer_release( + (struct ipa_rm_resource_cons *)consumer, + producer->resource.max_bw, + true); + producer->pending_release--; + } + } + + if (producer->pending_release == 0) { + producer->resource.state = IPA_RM_RELEASED; + ipa_rm_perf_profile_change(producer->resource.name); + (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, + producer->resource.name, + IPA_RM_RESOURCE_RELEASED, + true); + } +bail: + if (state != producer->resource.state) + IPA_RM_DBG_LOW("%s state changed %d->%d\n", + ipa_rm_resource_str(producer->resource.name), + state, + producer->resource.state); + + return result; +} + +static void ipa_rm_resource_producer_handle_cb( + struct ipa_rm_resource_prod *producer, + enum ipa_rm_event event) +{ + IPA_RM_DBG_LOW("%s state: %d event: %d pending_request: %d\n", + ipa_rm_resource_str(producer->resource.name), + producer->resource.state, + event, + producer->pending_request); + + switch (producer->resource.state) { + case IPA_RM_REQUEST_IN_PROGRESS: + if (event != IPA_RM_RESOURCE_GRANTED) + goto unlock_and_bail; + if (producer->pending_request > 0) { + producer->pending_request--; + if (producer->pending_request == 0) { + producer->resource.state = + IPA_RM_GRANTED; + ipa_rm_perf_profile_change( + producer->resource.name); + ipa_rm_resource_producer_notify_clients( + producer, + IPA_RM_RESOURCE_GRANTED, + false); + goto bail; + } + } + break; + case IPA_RM_RELEASE_IN_PROGRESS: + if (event != IPA_RM_RESOURCE_RELEASED) + goto unlock_and_bail; + if (producer->pending_release > 0) { + producer->pending_release--; + if (producer->pending_release == 0) { + producer->resource.state = + IPA_RM_RELEASED; + ipa_rm_perf_profile_change( + producer->resource.name); + ipa_rm_resource_producer_notify_clients( + producer, + IPA_RM_RESOURCE_RELEASED, + false); + goto bail; + } + } + break; + case IPA_RM_GRANTED: + case IPA_RM_RELEASED: + default: + goto unlock_and_bail; + } +unlock_and_bail: + IPA_RM_DBG_LOW("%s new state: %d\n", + ipa_rm_resource_str(producer->resource.name), + producer->resource.state); +bail: + return; +} + +/** + * ipa_rm_resource_consumer_handle_cb() - propagates resource + * notification to all dependent producers + * @consumer: [in] notifying resource + * + */ +void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer, + enum ipa_rm_event event) +{ + int peers_index; + struct ipa_rm_resource *producer; + + if (!consumer) { + IPA_RM_ERR("invalid params\n"); + return; + } + IPA_RM_DBG_LOW("%s state: %d event: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state, + event); + + switch (consumer->resource.state) { + case IPA_RM_REQUEST_IN_PROGRESS: + if (event == IPA_RM_RESOURCE_RELEASED) + goto bail; + consumer->resource.state = IPA_RM_GRANTED; + ipa_rm_perf_profile_change(consumer->resource.name); + ipa_resume_resource(consumer->resource.name); + complete_all(&consumer->request_consumer_in_progress); + break; + case IPA_RM_RELEASE_IN_PROGRESS: + if (event == IPA_RM_RESOURCE_GRANTED) + goto bail; + consumer->resource.state = IPA_RM_RELEASED; + break; + case IPA_RM_GRANTED: + case IPA_RM_RELEASED: + default: + goto bail; + } + + for (peers_index = 0; + peers_index < ipa_rm_peers_list_get_size( + consumer->resource.peers_list); + peers_index++) { + producer = ipa_rm_peers_list_get_resource(peers_index, + consumer->resource.peers_list); + if (producer) + ipa_rm_resource_producer_handle_cb( + (struct ipa_rm_resource_prod *) + producer, + event); + } + + return; +bail: + IPA_RM_DBG_LOW("%s new state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); +} + +/* + * ipa_rm_resource_set_perf_profile() - sets the performance profile to + * resource. + * + * @resource: [in] resource + * @profile: [in] profile to be set + * + * sets the profile to the given resource, In case the resource is + * granted, update bandwidth vote of the resource + */ +int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource, + struct ipa_rm_perf_profile *profile) +{ + int peers_index; + struct ipa_rm_resource *peer; + + if (!resource || !profile) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + if (profile->max_supported_bandwidth_mbps == resource->max_bw) { + IPA_RM_DBG_LOW("same profile\n"); + return 0; + } + + if ((resource->type == IPA_RM_PRODUCER && + (resource->state == IPA_RM_GRANTED || + resource->state == IPA_RM_REQUEST_IN_PROGRESS)) || + resource->type == IPA_RM_CONSUMER) { + for (peers_index = 0; + peers_index < ipa_rm_peers_list_get_size( + resource->peers_list); + peers_index++) { + peer = ipa_rm_peers_list_get_resource(peers_index, + resource->peers_list); + if (!peer) + continue; + peer->needed_bw -= resource->max_bw; + peer->needed_bw += + profile->max_supported_bandwidth_mbps; + if (peer->state == IPA_RM_GRANTED) + ipa_rm_perf_profile_change(peer->name); + } + } + + resource->max_bw = profile->max_supported_bandwidth_mbps; + if (resource->state == IPA_RM_GRANTED) + ipa_rm_perf_profile_change(resource->name); + + return 0; +} + + +/* + * ipa_rm_resource_producer_print_stat() - print the + * resource status and all his dependencies + * + * @resource: [in] Resource resource + * @buff: [in] The buf used to print + * @size: [in] Buf size + * + * Returns: number of bytes used on success, negative on failure + */ +int ipa_rm_resource_producer_print_stat( + struct ipa_rm_resource *resource, + char *buf, + int size) +{ + + int i; + int nbytes; + int cnt = 0; + struct ipa_rm_resource *consumer; + + if (!buf || size < 0) + return -EINVAL; + + nbytes = scnprintf(buf + cnt, size - cnt, + ipa_rm_resource_str(resource->name)); + cnt += nbytes; + nbytes = scnprintf(buf + cnt, size - cnt, "[%d, ", resource->max_bw); + cnt += nbytes; + + switch (resource->state) { + case IPA_RM_RELEASED: + nbytes = scnprintf(buf + cnt, size - cnt, + "Released] -> "); + cnt += nbytes; + break; + case IPA_RM_REQUEST_IN_PROGRESS: + nbytes = scnprintf(buf + cnt, size - cnt, + "Request In Progress] -> "); + cnt += nbytes; + break; + case IPA_RM_GRANTED: + nbytes = scnprintf(buf + cnt, size - cnt, + "Granted] -> "); + cnt += nbytes; + break; + case IPA_RM_RELEASE_IN_PROGRESS: + nbytes = scnprintf(buf + cnt, size - cnt, + "Release In Progress] -> "); + cnt += nbytes; + break; + default: + return -EPERM; + } + + for (i = 0; i < resource->peers_list->max_peers; ++i) { + consumer = + ipa_rm_peers_list_get_resource( + i, + resource->peers_list); + if (consumer) { + nbytes = scnprintf(buf + cnt, size - cnt, + ipa_rm_resource_str(consumer->name)); + cnt += nbytes; + nbytes = scnprintf(buf + cnt, size - cnt, "[%d, ", + consumer->max_bw); + cnt += nbytes; + + switch (consumer->state) { + case IPA_RM_RELEASED: + nbytes = scnprintf(buf + cnt, size - cnt, + "Released], "); + cnt += nbytes; + break; + case IPA_RM_REQUEST_IN_PROGRESS: + nbytes = scnprintf(buf + cnt, size - cnt, + "Request In Progress], "); + cnt += nbytes; + break; + case IPA_RM_GRANTED: + nbytes = scnprintf(buf + cnt, size - cnt, + "Granted], "); + cnt += nbytes; + break; + case IPA_RM_RELEASE_IN_PROGRESS: + nbytes = scnprintf(buf + cnt, size - cnt, + "Release In Progress], "); + cnt += nbytes; + break; + default: + return -EPERM; + } + } + } + nbytes = scnprintf(buf + cnt, size - cnt, "\n"); + cnt += nbytes; + + return cnt; +} diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.h b/drivers/platform/msm/ipa/ipa_rm_resource.h new file mode 100644 index 000000000000..98cf31a23f28 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_resource.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_RM_RESOURCE_H_ +#define _IPA_RM_RESOURCE_H_ + +#include +#include +#include "ipa_rm_peers_list.h" + +/** + * enum ipa_rm_resource_state - resource state + */ +enum ipa_rm_resource_state { + IPA_RM_RELEASED, + IPA_RM_REQUEST_IN_PROGRESS, + IPA_RM_GRANTED, + IPA_RM_RELEASE_IN_PROGRESS +}; + +/** + * enum ipa_rm_resource_type - IPA resource manager resource type + */ +enum ipa_rm_resource_type { + IPA_RM_PRODUCER, + IPA_RM_CONSUMER +}; + +/** + * struct ipa_rm_notification_info - notification information + * of IPA RM client + * @reg_params: registration parameters + * @explicit: registered explicitly by ipa_rm_register() + * @link: link to the list of all registered clients information + */ +struct ipa_rm_notification_info { + struct ipa_rm_register_params reg_params; + bool explicit; + struct list_head link; +}; + +/** + * struct ipa_rm_resource - IPA RM resource + * @name: name identifying resource + * @type: type of resource (PRODUCER or CONSUMER) + * @floor_voltage: minimum voltage level for operation + * @max_bw: maximum bandwidth required for resource in Mbps + * @state: state of the resource + * @peers_list: list of the peers of the resource + */ +struct ipa_rm_resource { + enum ipa_rm_resource_name name; + enum ipa_rm_resource_type type; + enum ipa_voltage_level floor_voltage; + u32 max_bw; + u32 needed_bw; + enum ipa_rm_resource_state state; + struct ipa_rm_peers_list *peers_list; +}; + +/** + * struct ipa_rm_resource_cons - IPA RM consumer + * @resource: resource + * @usage_count: number of producers in GRANTED / REQUESTED state + * using this consumer + * @request_consumer_in_progress: when set, the consumer is during its request + * phase + * @request_resource: function which should be called to request resource + * from resource manager + * @release_resource: function which should be called to release resource + * from resource manager + * Add new fields after @resource only. + */ +struct ipa_rm_resource_cons { + struct ipa_rm_resource resource; + int usage_count; + struct completion request_consumer_in_progress; + int (*request_resource)(void); + int (*release_resource)(void); +}; + +/** + * struct ipa_rm_resource_prod - IPA RM producer + * @resource: resource + * @event_listeners: clients registered with this producer + * for notifications in resource state + * list Add new fields after @resource only. + */ +struct ipa_rm_resource_prod { + struct ipa_rm_resource resource; + struct list_head event_listeners; + int pending_request; + int pending_release; +}; + +int ipa_rm_resource_create( + struct ipa_rm_create_params *create_params, + struct ipa_rm_resource **resource); + +int ipa_rm_resource_delete(struct ipa_rm_resource *resource); + +int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer, + struct ipa_rm_register_params *reg_params, + bool explicit); + +int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer, + struct ipa_rm_register_params *reg_params); + +int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource, + struct ipa_rm_resource *depends_on, + bool userspace_dep); + +int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource, + struct ipa_rm_resource *depends_on, + bool userspace_dep); + +int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer); + +int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer); + +int ipa_rm_resource_consumer_request(struct ipa_rm_resource_cons *consumer, + u32 needed_bw, + bool inc_usage_count, + bool wake_client); + +int ipa_rm_resource_consumer_release(struct ipa_rm_resource_cons *consumer, + u32 needed_bw, + bool dec_usage_count); + +int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource, + struct ipa_rm_perf_profile *profile); + +void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer, + enum ipa_rm_event event); + +void ipa_rm_resource_producer_notify_clients( + struct ipa_rm_resource_prod *producer, + enum ipa_rm_event event, + bool notify_registered_only); + +int ipa_rm_resource_producer_print_stat( + struct ipa_rm_resource *resource, + char *buf, + int size); + +int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer, + enum ipa_rm_resource_state prev_state, + u32 needed_bw, + bool notify_completion, + bool dec_client_on_err); + +int ipa_rm_resource_consumer_release_work( + struct ipa_rm_resource_cons *consumer, + enum ipa_rm_resource_state prev_state, + bool notify_completion); + +#endif /* _IPA_RM_RESOURCE_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h new file mode 100644 index 000000000000..51a298922671 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include + +#ifndef _IPA_UC_OFFLOAD_COMMON_I_H_ +#define _IPA_UC_OFFLOAD_COMMON_I_H_ + +int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp); + +int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl, + struct ipa_ntn_conn_in_params *params); + +int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data), + void *user_data); +void ipa_ntn_uc_dereg_rdyCB(void); +#endif /* _IPA_UC_OFFLOAD_COMMON_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile new file mode 100644 index 000000000000..f0275ea947a6 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_IPA3) += ipahal/ + +obj-$(CONFIG_IPA3) += ipat.o +ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \ + ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \ + ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o \ + ipa_hw_stats.o ipa_pm.o ipa_wdi3_i.o ipa_odl.o + +ipat-$(CONFIG_IPA_EMULATION) += ipa_dt_replacement.o + +obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o + +obj-$(CONFIG_IPA3_MHI_PROXY) += ipa_mhi_proxy.o diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c new file mode 100644 index 000000000000..e2cfea1cae89 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -0,0 +1,7118 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_ARM64 + +/* Outer caches unsupported on ARM64 platforms */ +#define outer_flush_range(x, y) +#define __cpuc_flush_dcache_area __flush_dcache_area + +#endif + +#define IPA_SUBSYSTEM_NAME "ipa_fws" + +#include "ipa_i.h" +#include "../ipa_rm_i.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_fltrt.h" + +#define CREATE_TRACE_POINTS +#include "ipa_trace.h" +#include "ipa_odl.h" + +/* + * The following for adding code (ie. for EMULATION) not found on x86. + */ +#if defined(CONFIG_IPA_EMULATION) +# include "ipa_emulation_stubs.h" +#endif + +#ifdef CONFIG_COMPAT +/** + * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation + * properties + * @dev_name: input parameter, the name of table + * @size: input parameter, size of table in bytes + * @offset: output parameter, offset into page in case of system memory + */ +struct ipa3_ioc_nat_alloc_mem32 { + char dev_name[IPA_RESOURCE_NAME_MAX]; + compat_size_t size; + compat_off_t offset; +}; + +/** + * struct ipa_ioc_nat_ipv6ct_table_alloc32 - table memory allocation + * properties + * @size: input parameter, size of table in bytes + * @offset: output parameter, offset into page in case of system memory + */ +struct ipa_ioc_nat_ipv6ct_table_alloc32 { + compat_size_t size; + compat_off_t offset; +}; +#endif /* #ifdef CONFIG_COMPAT */ + +#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311 +#define TZ_MEM_PROTECT_REGION_ID 0x10 + +struct tz_smmu_ipa_protect_region_iovec_s { + u64 input_addr; + u64 output_addr; + u64 size; + u32 attr; +} __packed; + +struct tz_smmu_ipa_protect_region_s { + phys_addr_t iovec_buf; + u32 size_bytes; +} __packed; + +static void ipa3_start_tag_process(struct work_struct *work); +static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process); + +static void ipa3_transport_release_resource(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa3_transport_release_resource_work, + ipa3_transport_release_resource); +static void ipa_gsi_notify_cb(struct gsi_per_notify *notify); + +static int ipa3_attach_to_smmu(void); +static int ipa3_alloc_pkt_init(void); + +static void ipa3_load_ipa_fw(struct work_struct *work); +static DECLARE_WORK(ipa3_fw_loading_work, ipa3_load_ipa_fw); + +static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work); +static DECLARE_WORK(ipa_dec_clients_disable_clks_on_wq_work, + ipa_dec_clients_disable_clks_on_wq); + +static struct ipa3_plat_drv_res ipa3_res = {0, }; + +static struct clk *ipa3_clk; + +struct ipa3_context *ipa3_ctx; + +static struct { + bool present[IPA_SMMU_CB_MAX]; + bool arm_smmu; + bool fast_map; + bool s1_bypass_arr[IPA_SMMU_CB_MAX]; + bool use_64_bit_dma_mask; + u32 ipa_base; + u32 ipa_size; +} smmu_info; + +static char *active_clients_table_buf; + +int ipa3_active_clients_log_print_buffer(char *buf, int size) +{ + int i; + int nbytes; + int cnt = 0; + int start_idx; + int end_idx; + unsigned long flags; + + spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags); + start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) % + IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; + end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head; + for (i = start_idx; i != end_idx; + i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) { + nbytes = scnprintf(buf + cnt, size - cnt, "%s\n", + ipa3_ctx->ipa3_active_clients_logging + .log_buffer[i]); + cnt += nbytes; + } + spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock, + flags); + + return cnt; +} + +int ipa3_active_clients_log_print_table(char *buf, int size) +{ + int i; + struct ipa3_active_client_htable_entry *iterator; + int cnt = 0; + unsigned long flags; + + spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags); + cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n"); + hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i, + iterator, list) { + switch (iterator->type) { + case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d ENDPOINT\n", + iterator->id_string, iterator->count); + break; + case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d SIMPLE\n", + iterator->id_string, iterator->count); + break; + case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d RESOURCE\n", + iterator->id_string, iterator->count); + break; + case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d SPECIAL\n", + iterator->id_string, iterator->count); + break; + default: + IPAERR("Trying to print illegal active_clients type"); + break; + } + } + cnt += scnprintf(buf + cnt, size - cnt, + "\nTotal active clients count: %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); + spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock, + flags); + + return cnt; +} + +static int ipa3_clean_modem_rule(void) +{ + struct ipa_install_fltr_rule_req_msg_v01 *req; + struct ipa_install_fltr_rule_req_ex_msg_v01 *req_ex; + int val = 0; + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v3_0) { + req = kzalloc( + sizeof(struct ipa_install_fltr_rule_req_msg_v01), + GFP_KERNEL); + if (!req) { + IPAERR("mem allocated failed!\n"); + return -ENOMEM; + } + req->filter_spec_list_valid = false; + req->filter_spec_list_len = 0; + req->source_pipe_index_valid = 0; + val = ipa3_qmi_filter_request_send(req); + kfree(req); + } else { + req_ex = kzalloc( + sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01), + GFP_KERNEL); + if (!req_ex) { + IPAERR("mem allocated failed!\n"); + return -ENOMEM; + } + req_ex->filter_spec_ex_list_valid = false; + req_ex->filter_spec_ex_list_len = 0; + req_ex->source_pipe_index_valid = 0; + val = ipa3_qmi_filter_request_ex_send(req_ex); + kfree(req_ex); + } + + return val; +} + +static int ipa3_active_clients_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr) +{ + ipa3_active_clients_log_print_table(active_clients_table_buf, + IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE); + IPAERR("%s\n", active_clients_table_buf); + + return NOTIFY_DONE; +} + +static struct notifier_block ipa3_active_clients_panic_blk = { + .notifier_call = ipa3_active_clients_panic_notifier, +}; + +static int ipa3_active_clients_log_insert(const char *string) +{ + int head; + int tail; + + if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy) + return -EPERM; + + head = ipa3_ctx->ipa3_active_clients_logging.log_head; + tail = ipa3_ctx->ipa3_active_clients_logging.log_tail; + + memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_', + IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN); + strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string, + (size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN); + head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; + if (tail == head) + tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; + + ipa3_ctx->ipa3_active_clients_logging.log_tail = tail; + ipa3_ctx->ipa3_active_clients_logging.log_head = head; + + return 0; +} + +static int ipa3_active_clients_log_init(void) +{ + int i; + + spin_lock_init(&ipa3_ctx->ipa3_active_clients_logging.lock); + ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kcalloc( + IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES, + sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]), + GFP_KERNEL); + active_clients_table_buf = kzalloc(sizeof( + char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL); + if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) { + pr_err("Active Clients Logging memory allocation failed\n"); + goto bail; + } + for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) { + ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] = + ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] + + (IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i); + } + ipa3_ctx->ipa3_active_clients_logging.log_head = 0; + ipa3_ctx->ipa3_active_clients_logging.log_tail = + IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1; + hash_init(ipa3_ctx->ipa3_active_clients_logging.htable); + atomic_notifier_chain_register(&panic_notifier_list, + &ipa3_active_clients_panic_blk); + ipa3_ctx->ipa3_active_clients_logging.log_rdy = true; + + return 0; + +bail: + return -ENOMEM; +} + +void ipa3_active_clients_log_clear(void) +{ + unsigned long flags; + + spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags); + ipa3_ctx->ipa3_active_clients_logging.log_head = 0; + ipa3_ctx->ipa3_active_clients_logging.log_tail = + IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1; + spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock, + flags); +} + +static void ipa3_active_clients_log_destroy(void) +{ + unsigned long flags; + + spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags); + ipa3_ctx->ipa3_active_clients_logging.log_rdy = false; + kfree(active_clients_table_buf); + active_clients_table_buf = NULL; + kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]); + ipa3_ctx->ipa3_active_clients_logging.log_head = 0; + ipa3_ctx->ipa3_active_clients_logging.log_tail = + IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1; + spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock, + flags); +} + +static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX]; + +struct iommu_domain *ipa3_get_smmu_domain(void) +{ + if (smmu_cb[IPA_SMMU_CB_AP].valid) + return smmu_cb[IPA_SMMU_CB_AP].mapping->domain; + + IPAERR("CB not valid\n"); + + return NULL; +} + +struct iommu_domain *ipa3_get_uc_smmu_domain(void) +{ + if (smmu_cb[IPA_SMMU_CB_UC].valid) + return smmu_cb[IPA_SMMU_CB_UC].mapping->domain; + + IPAERR("CB not valid\n"); + + return NULL; +} + +struct iommu_domain *ipa3_get_wlan_smmu_domain(void) +{ + if (smmu_cb[IPA_SMMU_CB_WLAN].valid) + return smmu_cb[IPA_SMMU_CB_WLAN].iommu; + + IPAERR("CB not valid\n"); + + return NULL; +} + +struct iommu_domain *ipa3_get_smmu_domain_by_type(enum ipa_smmu_cb_type cb_type) +{ + + if (cb_type == IPA_SMMU_CB_WLAN && smmu_cb[IPA_SMMU_CB_WLAN].valid) + return smmu_cb[IPA_SMMU_CB_WLAN].iommu; + + if (smmu_cb[cb_type].valid) + return smmu_cb[cb_type].mapping->domain; + + IPAERR("CB#%d not valid\n", cb_type); + + return NULL; +} + +struct device *ipa3_get_dma_dev(void) +{ + return ipa3_ctx->pdev; +} + +/** + * ipa3_get_smmu_ctx()- Return smmu context for the given cb_type + * + * Return value: pointer to smmu context address + */ +struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(enum ipa_smmu_cb_type cb_type) +{ + return &smmu_cb[cb_type]; +} + +static int ipa3_open(struct inode *inode, struct file *filp) +{ + IPADBG_LOW("ENTER\n"); + filp->private_data = ipa3_ctx; + + return 0; +} + +static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type) +{ + if (!buff) { + IPAERR("Null buffer\n"); + return; + } + + if (type != WAN_UPSTREAM_ROUTE_ADD && + type != WAN_UPSTREAM_ROUTE_DEL && + type != WAN_EMBMS_CONNECT) { + IPAERR("Wrong type given. buff %pK type %d\n", buff, type); + return; + } + + kfree(buff); +} + +static int ipa3_send_wan_msg(unsigned long usr_param, + uint8_t msg_type, bool is_cache) +{ + int retval; + struct ipa_wan_msg *wan_msg; + struct ipa_msg_meta msg_meta; + struct ipa_wan_msg cache_wan_msg; + + wan_msg = kzalloc(sizeof(*wan_msg), GFP_KERNEL); + if (!wan_msg) + return -ENOMEM; + + if (copy_from_user(wan_msg, (const void __user *)usr_param, + sizeof(struct ipa_wan_msg))) { + kfree(wan_msg); + return -EFAULT; + } + + memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg)); + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = msg_type; + msg_meta.msg_len = sizeof(struct ipa_wan_msg); + retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb); + if (retval) { + IPAERR_RL("ipa3_send_msg failed: %d\n", retval); + kfree(wan_msg); + return retval; + } + + if (is_cache) { + mutex_lock(&ipa3_ctx->ipa_cne_evt_lock); + + /* cache the cne event */ + memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[ + ipa3_ctx->num_ipa_cne_evt_req].wan_msg, + &cache_wan_msg, + sizeof(cache_wan_msg)); + + memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[ + ipa3_ctx->num_ipa_cne_evt_req].msg_meta, + &msg_meta, + sizeof(struct ipa_msg_meta)); + + ipa3_ctx->num_ipa_cne_evt_req++; + ipa3_ctx->num_ipa_cne_evt_req %= IPA_MAX_NUM_REQ_CACHE; + mutex_unlock(&ipa3_ctx->ipa_cne_evt_lock); + } + + return 0; +} + +static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type) +{ + if (!buff) { + IPAERR("Null buffer\n"); + return; + } + + switch (type) { + case ADD_VLAN_IFACE: + case DEL_VLAN_IFACE: + case ADD_L2TP_VLAN_MAPPING: + case DEL_L2TP_VLAN_MAPPING: + case ADD_BRIDGE_VLAN_MAPPING: + case DEL_BRIDGE_VLAN_MAPPING: + break; + default: + IPAERR("Wrong type given. buff %pK type %d\n", buff, type); + return; + } + + kfree(buff); +} + +static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type) +{ + int retval; + struct ipa_ioc_vlan_iface_info *vlan_info; + struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info; + struct ipa_ioc_bridge_vlan_mapping_info *bridge_vlan_info; + struct ipa_msg_meta msg_meta; + void *buff; + + IPADBG("type %d\n", msg_type); + + memset(&msg_meta, 0, sizeof(msg_meta)); + msg_meta.msg_type = msg_type; + + if ((msg_type == ADD_VLAN_IFACE) || + (msg_type == DEL_VLAN_IFACE)) { + vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info), + GFP_KERNEL); + if (!vlan_info) + return -ENOMEM; + + if (copy_from_user((u8 *)vlan_info, (void __user *)usr_param, + sizeof(struct ipa_ioc_vlan_iface_info))) { + kfree(vlan_info); + return -EFAULT; + } + + msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info); + buff = vlan_info; + } else if ((msg_type == ADD_L2TP_VLAN_MAPPING) || + (msg_type == DEL_L2TP_VLAN_MAPPING)) { + mapping_info = kzalloc(sizeof(struct + ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL); + if (!mapping_info) + return -ENOMEM; + + if (copy_from_user((u8 *)mapping_info, + (void __user *)usr_param, + sizeof(struct ipa_ioc_l2tp_vlan_mapping_info))) { + kfree(mapping_info); + return -EFAULT; + } + + msg_meta.msg_len = sizeof(struct + ipa_ioc_l2tp_vlan_mapping_info); + buff = mapping_info; + } else if ((msg_type == ADD_BRIDGE_VLAN_MAPPING) || + (msg_type == DEL_BRIDGE_VLAN_MAPPING)) { + bridge_vlan_info = kzalloc( + sizeof(struct ipa_ioc_bridge_vlan_mapping_info), + GFP_KERNEL); + if (!bridge_vlan_info) + return -ENOMEM; + + if (copy_from_user((u8 *)bridge_vlan_info, + (void __user *)usr_param, + sizeof(struct ipa_ioc_bridge_vlan_mapping_info))) { + kfree(bridge_vlan_info); + IPAERR("copy from user failed\n"); + return -EFAULT; + } + + msg_meta.msg_len = sizeof(struct + ipa_ioc_bridge_vlan_mapping_info); + buff = bridge_vlan_info; + } else { + IPAERR("Unexpected event\n"); + return -EFAULT; + } + + retval = ipa3_send_msg(&msg_meta, buff, + ipa3_vlan_l2tp_msg_free_cb); + if (retval) { + IPAERR("ipa3_send_msg failed: %d, msg_type %d\n", + retval, + msg_type); + kfree(buff); + return retval; + } + IPADBG("exit\n"); + + return 0; +} + +static void ipa3_gsb_msg_free_cb(void *buff, u32 len, u32 type) +{ + if (!buff) { + IPAERR("Null buffer\n"); + return; + } + + switch (type) { + case IPA_GSB_CONNECT: + case IPA_GSB_DISCONNECT: + break; + default: + IPAERR("Wrong type given. buff %pK type %d\n", buff, type); + return; + } + + kfree(buff); +} + +static int ipa3_send_gsb_msg(unsigned long usr_param, uint8_t msg_type) +{ + int retval; + struct ipa_ioc_gsb_info *gsb_info; + struct ipa_msg_meta msg_meta; + void *buff; + + IPADBG("type %d\n", msg_type); + + memset(&msg_meta, 0, sizeof(msg_meta)); + msg_meta.msg_type = msg_type; + + if ((msg_type == IPA_GSB_CONNECT) || + (msg_type == IPA_GSB_DISCONNECT)) { + gsb_info = kzalloc(sizeof(struct ipa_ioc_gsb_info), + GFP_KERNEL); + if (!gsb_info) { + IPAERR("no memory\n"); + return -ENOMEM; + } + + if (copy_from_user((u8 *)gsb_info, (void __user *)usr_param, + sizeof(struct ipa_ioc_gsb_info))) { + kfree(gsb_info); + return -EFAULT; + } + + msg_meta.msg_len = sizeof(struct ipa_ioc_gsb_info); + buff = gsb_info; + } else { + IPAERR("Unexpected event\n"); + return -EFAULT; + } + + retval = ipa3_send_msg(&msg_meta, buff, + ipa3_gsb_msg_free_cb); + if (retval) { + IPAERR("ipa3_send_msg failed: %d, msg_type %d\n", + retval, + msg_type); + kfree(buff); + return retval; + } + IPADBG("exit\n"); + + return 0; +} + +static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int retval = 0; + u32 pyld_sz; + u8 header[128] = { 0 }; + u8 *param = NULL; + bool is_vlan_mode; + struct ipa_ioc_nat_alloc_mem nat_mem; + struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc; + struct ipa_ioc_v4_nat_init nat_init; + struct ipa_ioc_ipv6ct_init ipv6ct_init; + struct ipa_ioc_v4_nat_del nat_del; + struct ipa_ioc_nat_ipv6ct_table_del table_del; + struct ipa_ioc_nat_pdn_entry mdfy_pdn; + struct ipa_ioc_rm_dependency rm_depend; + struct ipa_ioc_nat_dma_cmd *table_dma_cmd; + struct ipa_ioc_get_vlan_mode vlan_mode; + size_t sz; + int pre_entry; + + IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd)); + + if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC) + return -ENOTTY; + + if (!ipa3_is_ready()) { + IPAERR("IPA not ready, waiting for init completion\n"); + wait_for_completion(&ipa3_ctx->init_completion_obj); + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + switch (cmd) { + case IPA_IOC_ALLOC_NAT_MEM: + if (copy_from_user(&nat_mem, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_alloc_mem))) { + retval = -EFAULT; + break; + } + /* null terminate the string */ + nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0'; + + if (ipa3_allocate_nat_device(&nat_mem)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, &nat_mem, + sizeof(struct ipa_ioc_nat_alloc_mem))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_ALLOC_NAT_TABLE: + if (copy_from_user(&table_alloc, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc))) { + retval = -EFAULT; + break; + } + + if (ipa3_allocate_nat_table(&table_alloc)) { + retval = -EFAULT; + break; + } + if (table_alloc.offset && + copy_to_user((void __user *)arg, &table_alloc, sizeof( + struct ipa_ioc_nat_ipv6ct_table_alloc))) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ALLOC_IPV6CT_TABLE: + if (copy_from_user(&table_alloc, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc))) { + retval = -EFAULT; + break; + } + + if (ipa3_allocate_ipv6ct_table(&table_alloc)) { + retval = -EFAULT; + break; + } + if (table_alloc.offset && + copy_to_user((void __user *)arg, &table_alloc, sizeof( + struct ipa_ioc_nat_ipv6ct_table_alloc))) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_V4_INIT_NAT: + if (copy_from_user(&nat_init, (const void __user *)arg, + sizeof(struct ipa_ioc_v4_nat_init))) { + retval = -EFAULT; + break; + } + if (ipa3_nat_init_cmd(&nat_init)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_INIT_IPV6CT_TABLE: + if (copy_from_user(&ipv6ct_init, (const void __user *)arg, + sizeof(struct ipa_ioc_ipv6ct_init))) { + retval = -EFAULT; + break; + } + if (ipa3_ipv6ct_init_cmd(&ipv6ct_init)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_TABLE_DMA_CMD: + table_dma_cmd = (struct ipa_ioc_nat_dma_cmd *)header; + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_dma_cmd))) { + retval = -EFAULT; + break; + } + pre_entry = table_dma_cmd->entries; + pyld_sz = sizeof(struct ipa_ioc_nat_dma_cmd) + + pre_entry * sizeof(struct ipa_ioc_nat_dma_one); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + table_dma_cmd = (struct ipa_ioc_nat_dma_cmd *)param; + /* add check in case user-space module compromised */ + if (unlikely(table_dma_cmd->entries != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + table_dma_cmd->entries, pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_table_dma_cmd(table_dma_cmd)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_V4_DEL_NAT: + if (copy_from_user(&nat_del, (const void __user *)arg, + sizeof(struct ipa_ioc_v4_nat_del))) { + retval = -EFAULT; + break; + } + if (ipa3_nat_del_cmd(&nat_del)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_NAT_TABLE: + if (copy_from_user(&table_del, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_ipv6ct_table_del))) { + retval = -EFAULT; + break; + } + if (ipa3_del_nat_table(&table_del)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_IPV6CT_TABLE: + if (copy_from_user(&table_del, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_ipv6ct_table_del))) { + retval = -EFAULT; + break; + } + if (ipa3_del_ipv6ct_table(&table_del)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_NAT_MODIFY_PDN: + if (copy_from_user(&mdfy_pdn, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_pdn_entry))) { + retval = -EFAULT; + break; + } + if (ipa3_nat_mdfy_pdn(&mdfy_pdn)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_HDR: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_add_hdr))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_hdr *)header)->num_hdrs; + pyld_sz = + sizeof(struct ipa_ioc_add_hdr) + + pre_entry * sizeof(struct ipa_hdr_add); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_hdr *)param)->num_hdrs, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_add_hdr_usr((struct ipa_ioc_add_hdr *)param, + true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_HDR: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_del_hdr))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_hdr *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_hdr) + + pre_entry * sizeof(struct ipa_hdr_del); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_hdr *)param)->num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_del_hdr_by_user((struct ipa_ioc_del_hdr *)param, + true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_RT_RULE: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_add_rt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_rt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_add_rt_rule) + + pre_entry * sizeof(struct ipa_rt_rule_add); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_rt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_add_rt_rule_usr((struct ipa_ioc_add_rt_rule *)param, + true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_RT_RULE_EXT: + if (copy_from_user(header, + (const void __user *)arg, + sizeof(struct ipa_ioc_add_rt_rule_ext))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_rt_rule_ext *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_add_rt_rule_ext) + + pre_entry * sizeof(struct ipa_rt_rule_add_ext); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely( + ((struct ipa_ioc_add_rt_rule_ext *)param)->num_rules + != pre_entry)) { + IPAERR(" prevent memory corruption(%d not match %d)\n", + ((struct ipa_ioc_add_rt_rule_ext *)param)-> + num_rules, + pre_entry); + retval = -EINVAL; + break; + } + if (ipa3_add_rt_rule_ext( + (struct ipa_ioc_add_rt_rule_ext *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_RT_RULE_AFTER: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_add_rt_rule_after))) { + + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_add_rt_rule_after) + + pre_entry * sizeof(struct ipa_rt_rule_add); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)-> + num_rules != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_rt_rule_after *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_add_rt_rule_after( + (struct ipa_ioc_add_rt_rule_after *)param)) { + + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_MDFY_RT_RULE: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_mdfy_rt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_mdfy_rt_rule) + + pre_entry * sizeof(struct ipa_rt_rule_mdfy); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_mdfy_rt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_RT_RULE: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_del_rt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_rt_rule *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_rt_rule) + + pre_entry * sizeof(struct ipa_rt_rule_del); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_rt_rule *)param)->num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_FLT_RULE: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_add_flt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_flt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_add_flt_rule) + + pre_entry * sizeof(struct ipa_flt_rule_add); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_flt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_add_flt_rule_usr((struct ipa_ioc_add_flt_rule *)param, + true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_FLT_RULE_AFTER: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_add_flt_rule_after))) { + + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_flt_rule_after *)header)-> + num_rules; + pyld_sz = + sizeof(struct ipa_ioc_add_flt_rule_after) + + pre_entry * sizeof(struct ipa_flt_rule_add); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)-> + num_rules != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_flt_rule_after *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_add_flt_rule_after( + (struct ipa_ioc_add_flt_rule_after *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_FLT_RULE: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_del_flt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_flt_rule *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_flt_rule) + + pre_entry * sizeof(struct ipa_flt_rule_del); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_flt_rule *)param)-> + num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_MDFY_FLT_RULE: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_mdfy_flt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_mdfy_flt_rule) + + pre_entry * sizeof(struct ipa_flt_rule_mdfy); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_mdfy_flt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_COMMIT_HDR: + retval = ipa3_commit_hdr(); + break; + case IPA_IOC_RESET_HDR: + retval = ipa3_reset_hdr(false); + break; + case IPA_IOC_COMMIT_RT: + retval = ipa3_commit_rt(arg); + break; + case IPA_IOC_RESET_RT: + retval = ipa3_reset_rt(arg, false); + break; + case IPA_IOC_COMMIT_FLT: + retval = ipa3_commit_flt(arg); + break; + case IPA_IOC_RESET_FLT: + retval = ipa3_reset_flt(arg, false); + break; + case IPA_IOC_GET_RT_TBL: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_get_rt_tbl))) { + retval = -EFAULT; + break; + } + if (ipa3_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, header, + sizeof(struct ipa_ioc_get_rt_tbl))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_PUT_RT_TBL: + retval = ipa3_put_rt_tbl(arg); + break; + case IPA_IOC_GET_HDR: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_get_hdr))) { + retval = -EFAULT; + break; + } + if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, header, + sizeof(struct ipa_ioc_get_hdr))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_PUT_HDR: + retval = ipa3_put_hdr(arg); + break; + case IPA_IOC_SET_FLT: + retval = ipa3_cfg_filter(arg); + break; + case IPA_IOC_COPY_HDR: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_copy_hdr))) { + retval = -EFAULT; + break; + } + if (ipa3_copy_hdr((struct ipa_ioc_copy_hdr *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, header, + sizeof(struct ipa_ioc_copy_hdr))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_query_intf))) { + retval = -EFAULT; + break; + } + if (ipa3_query_intf((struct ipa_ioc_query_intf *)header)) { + retval = -1; + break; + } + if (copy_to_user((void __user *)arg, header, + sizeof(struct ipa_ioc_query_intf))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF_TX_PROPS: + sz = sizeof(struct ipa_ioc_query_intf_tx_props); + if (copy_from_user(header, (const void __user *)arg, sz)) { + retval = -EFAULT; + break; + } + + if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props + > IPA_NUM_PROPS_MAX) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_query_intf_tx_props *) + header)->num_tx_props; + pyld_sz = sz + pre_entry * + sizeof(struct ipa_ioc_tx_intf_prop); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_query_intf_tx_props *) + param)->num_tx_props + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_query_intf_tx_props *) + param)->num_tx_props, pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_query_intf_tx_props( + (struct ipa_ioc_query_intf_tx_props *)param)) { + retval = -1; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF_RX_PROPS: + sz = sizeof(struct ipa_ioc_query_intf_rx_props); + if (copy_from_user(header, (const void __user *)arg, sz)) { + retval = -EFAULT; + break; + } + + if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props + > IPA_NUM_PROPS_MAX) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_query_intf_rx_props *) + header)->num_rx_props; + pyld_sz = sz + pre_entry * + sizeof(struct ipa_ioc_rx_intf_prop); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_query_intf_rx_props *) + param)->num_rx_props != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_query_intf_rx_props *) + param)->num_rx_props, pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_query_intf_rx_props( + (struct ipa_ioc_query_intf_rx_props *)param)) { + retval = -1; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF_EXT_PROPS: + sz = sizeof(struct ipa_ioc_query_intf_ext_props); + if (copy_from_user(header, (const void __user *)arg, sz)) { + retval = -EFAULT; + break; + } + + if (((struct ipa_ioc_query_intf_ext_props *) + header)->num_ext_props > IPA_NUM_PROPS_MAX) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_query_intf_ext_props *) + header)->num_ext_props; + pyld_sz = sz + pre_entry * + sizeof(struct ipa_ioc_ext_intf_prop); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_query_intf_ext_props *) + param)->num_ext_props != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_query_intf_ext_props *) + param)->num_ext_props, pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_query_intf_ext_props( + (struct ipa_ioc_query_intf_ext_props *)param)) { + retval = -1; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_PULL_MSG: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_msg_meta))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_msg_meta *)header)->msg_len; + pyld_sz = sizeof(struct ipa_msg_meta) + + pre_entry; + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_msg_meta *)param)->msg_len + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_msg_meta *)param)->msg_len, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_pull_msg((struct ipa_msg_meta *)param, + (char *)param + sizeof(struct ipa_msg_meta), + ((struct ipa_msg_meta *)param)->msg_len) != + ((struct ipa_msg_meta *)param)->msg_len) { + retval = -1; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_RM_ADD_DEPENDENCY: + /* deprecate if IPA PM is used */ + if (ipa3_ctx->use_ipa_pm) + return -EINVAL; + + if (copy_from_user(&rm_depend, (const void __user *)arg, + sizeof(struct ipa_ioc_rm_dependency))) { + retval = -EFAULT; + break; + } + retval = ipa_rm_add_dependency_from_ioctl( + rm_depend.resource_name, rm_depend.depends_on_name); + break; + case IPA_IOC_RM_DEL_DEPENDENCY: + /* deprecate if IPA PM is used */ + if (ipa3_ctx->use_ipa_pm) + return -EINVAL; + + if (copy_from_user(&rm_depend, (const void __user *)arg, + sizeof(struct ipa_ioc_rm_dependency))) { + retval = -EFAULT; + break; + } + retval = ipa_rm_delete_dependency_from_ioctl( + rm_depend.resource_name, rm_depend.depends_on_name); + break; + case IPA_IOC_GENERATE_FLT_EQ: + { + struct ipa_ioc_generate_flt_eq flt_eq; + + if (copy_from_user(&flt_eq, (const void __user *)arg, + sizeof(struct ipa_ioc_generate_flt_eq))) { + retval = -EFAULT; + break; + } + if (ipahal_flt_generate_equation(flt_eq.ip, + &flt_eq.attrib, &flt_eq.eq_attrib)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, &flt_eq, + sizeof(struct ipa_ioc_generate_flt_eq))) { + retval = -EFAULT; + break; + } + break; + } + case IPA_IOC_QUERY_EP_MAPPING: + { + retval = ipa3_get_ep_mapping(arg); + break; + } + case IPA_IOC_QUERY_RT_TBL_INDEX: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_get_rt_tbl_indx))) { + retval = -EFAULT; + break; + } + if (ipa3_query_rt_index( + (struct ipa_ioc_get_rt_tbl_indx *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, header, + sizeof(struct ipa_ioc_get_rt_tbl_indx))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_WRITE_QMAPID: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_write_qmapid))) { + retval = -EFAULT; + break; + } + if (ipa3_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, header, + sizeof(struct ipa_ioc_write_qmapid))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD: + retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD, true); + if (retval) { + IPAERR("ipa3_send_wan_msg failed: %d\n", retval); + break; + } + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL: + retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL, true); + if (retval) { + IPAERR("ipa3_send_wan_msg failed: %d\n", retval); + break; + } + break; + case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED: + retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT, false); + if (retval) { + IPAERR("ipa3_send_wan_msg failed: %d\n", retval); + break; + } + break; + case IPA_IOC_ADD_HDR_PROC_CTX: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_add_hdr_proc_ctx))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_hdr_proc_ctx *) + header)->num_proc_ctxs; + pyld_sz = + sizeof(struct ipa_ioc_add_hdr_proc_ctx) + + pre_entry * sizeof(struct ipa_hdr_proc_ctx_add); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *) + param)->num_proc_ctxs != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_hdr_proc_ctx *) + param)->num_proc_ctxs, pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_add_hdr_proc_ctx( + (struct ipa_ioc_add_hdr_proc_ctx *)param, true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_DEL_HDR_PROC_CTX: + if (copy_from_user(header, (const void __user *)arg, + sizeof(struct ipa_ioc_del_hdr_proc_ctx))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_hdr_proc_ctx) + + pre_entry * sizeof(struct ipa_hdr_proc_ctx_del); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *) + param)->num_hdls != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_hdr_proc_ctx *)param)-> + num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_del_hdr_proc_ctx_by_user( + (struct ipa_ioc_del_hdr_proc_ctx *)param, true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_GET_HW_VERSION: + pyld_sz = sizeof(enum ipa_hw_type); + param = kmemdup(&ipa3_ctx->ipa_hw_type, pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_GET_VLAN_MODE: + if (copy_from_user(&vlan_mode, (const void __user *)arg, + sizeof(struct ipa_ioc_get_vlan_mode))) { + retval = -EFAULT; + break; + } + retval = ipa3_is_vlan_mode( + vlan_mode.iface, + &is_vlan_mode); + if (retval) + break; + + vlan_mode.is_vlan_mode = is_vlan_mode; + + if (copy_to_user((void __user *)arg, + &vlan_mode, + sizeof(struct ipa_ioc_get_vlan_mode))) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_VLAN_IFACE: + if (ipa3_send_vlan_l2tp_msg(arg, ADD_VLAN_IFACE)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_VLAN_IFACE: + if (ipa3_send_vlan_l2tp_msg(arg, DEL_VLAN_IFACE)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_ADD_BRIDGE_VLAN_MAPPING: + if (ipa3_send_vlan_l2tp_msg(arg, ADD_BRIDGE_VLAN_MAPPING)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_DEL_BRIDGE_VLAN_MAPPING: + if (ipa3_send_vlan_l2tp_msg(arg, DEL_BRIDGE_VLAN_MAPPING)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_ADD_L2TP_VLAN_MAPPING: + if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_L2TP_VLAN_MAPPING: + if (ipa3_send_vlan_l2tp_msg(arg, DEL_L2TP_VLAN_MAPPING)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_CLEANUP: + /*Route and filter rules will also be clean*/ + IPADBG("Got IPA_IOC_CLEANUP\n"); + retval = ipa3_reset_hdr(true); + memset(&nat_del, 0, sizeof(nat_del)); + nat_del.table_index = 0; + retval = ipa3_nat_del_cmd(&nat_del); + retval = ipa3_clean_modem_rule(); + break; + + case IPA_IOC_QUERY_WLAN_CLIENT: + IPADBG("Got IPA_IOC_QUERY_WLAN_CLIENT\n"); + retval = ipa3_resend_wlan_msg(); + break; + + case IPA_IOC_GSB_CONNECT: + IPADBG("Got IPA_IOC_GSB_CONNECT\n"); + if (ipa3_send_gsb_msg(arg, IPA_GSB_CONNECT)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_GSB_DISCONNECT: + IPADBG("Got IPA_IOC_GSB_DISCONNECT\n"); + if (ipa3_send_gsb_msg(arg, IPA_GSB_DISCONNECT)) { + retval = -EFAULT; + break; + } + break; + + default: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return -ENOTTY; + } + kfree(param); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return retval; +} + +/** + * ipa3_setup_dflt_rt_tables() - Setup default routing tables + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate memory + * -EPERM: failed to add the tables + */ +int ipa3_setup_dflt_rt_tables(void) +{ + struct ipa_ioc_add_rt_rule *rt_rule; + struct ipa_rt_rule_add *rt_rule_entry; + + rt_rule = + kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 * + sizeof(struct ipa_rt_rule_add), GFP_KERNEL); + if (!rt_rule) + return -ENOMEM; + + /* setup a default v4 route to point to Apps */ + rt_rule->num_rules = 1; + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v4; + strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME, + IPA_RESOURCE_NAME_MAX); + + rt_rule_entry = &rt_rule->rules[0]; + rt_rule_entry->at_rear = 1; + rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS; + rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl; + rt_rule_entry->rule.retain_hdr = 1; + + if (ipa3_add_rt_rule(rt_rule)) { + IPAERR("fail to add dflt v4 rule\n"); + kfree(rt_rule); + return -EPERM; + } + IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + ipa3_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl; + + /* setup a default v6 route to point to A5 */ + rt_rule->ip = IPA_IP_v6; + if (ipa3_add_rt_rule(rt_rule)) { + IPAERR("fail to add dflt v6 rule\n"); + kfree(rt_rule); + return -EPERM; + } + IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + ipa3_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl; + + /* + * because these tables are the very first to be added, they will both + * have the same index (0) which is essential for programming the + * "route" end-point config + */ + + kfree(rt_rule); + + return 0; +} + +static int ipa3_setup_exception_path(void) +{ + struct ipa_ioc_add_hdr *hdr; + struct ipa_hdr_add *hdr_entry; + struct ipahal_reg_route route = { 0 }; + int ret; + + /* install the basic exception header */ + hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 * + sizeof(struct ipa_hdr_add), GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + hdr->num_hdrs = 1; + hdr->commit = 1; + hdr_entry = &hdr->hdr[0]; + + strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX); + hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH; + + if (ipa3_add_hdr(hdr)) { + IPAERR("fail to add exception hdr\n"); + ret = -EPERM; + goto bail; + } + + if (hdr_entry->status) { + IPAERR("fail to add exception hdr\n"); + ret = -EPERM; + goto bail; + } + + ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl; + + /* set the route register to pass exception packets to Apps */ + route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + route.route_frag_def_pipe = ipa3_get_ep_mapping( + IPA_CLIENT_APPS_LAN_CONS); + route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl; + route.route_def_retain_hdr = 1; + + if (ipa3_cfg_route(&route)) { + IPAERR("fail to add exception hdr\n"); + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + kfree(hdr); + return ret; +} + +static int ipa3_init_smem_region(int memory_region_size, + int memory_region_offset) +{ + struct ipahal_imm_cmd_dma_shared_mem cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipa3_desc desc; + struct ipa_mem_buffer mem; + int rc; + + if (memory_region_size == 0) + return 0; + + memset(&desc, 0, sizeof(desc)); + memset(&cmd, 0, sizeof(cmd)); + memset(&mem, 0, sizeof(mem)); + + mem.size = memory_region_size; + mem.base = dma_zalloc_coherent(ipa3_ctx->pdev, mem.size, + &mem.phys_base, GFP_KERNEL); + if (!mem.base) { + IPAERR("failed to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + cmd.is_read = false; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + cmd.size = mem.size; + cmd.system_addr = mem.phys_base; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + memory_region_offset; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + return -ENOMEM; + } + ipa3_init_imm_cmd_desc(&desc, cmd_pyld); + + rc = ipa3_send_cmd(1, &desc); + if (rc) { + IPAERR("failed to send immediate command (error %d)\n", rc); + rc = -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, + mem.phys_base); + + return rc; +} + +/** + * ipa3_init_q6_smem() - Initialize Q6 general memory and + * header memory regions in IPA. + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate dma memory + * -EFAULT: failed to send IPA command to initialize the memory + */ +int ipa3_init_q6_smem(void) +{ + int rc; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size), + IPA_MEM_PART(modem_ofst)); + if (rc) { + IPAERR("failed to initialize Modem RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + + rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_size), + IPA_MEM_PART(modem_hdr_ofst)); + if (rc) { + IPAERR("failed to initialize Modem HDRs RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + + rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size), + IPA_MEM_PART(modem_hdr_proc_ctx_ofst)); + if (rc) { + IPAERR("failed to initialize Modem proc ctx RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + + rc = ipa3_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size), + IPA_MEM_PART(modem_comp_decomp_ofst)); + if (rc) { + IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return rc; +} + +static void ipa3_destroy_imm(void *user1, int user2) +{ + ipahal_destroy_imm_cmd(user1); +} + +static void ipa3_q6_pipe_delay(bool delay) +{ + int client_idx; + int ep_idx; + struct ipa_ep_cfg_ctrl ep_ctrl; + + memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_ctrl.ipa_ep_delay = delay; + + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + if (IPA_CLIENT_IS_Q6_PROD(client_idx)) { + ep_idx = ipa3_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, + ep_idx, &ep_ctrl); + } + } +} + +static void ipa3_q6_avoid_holb(void) +{ + int ep_idx; + int client_idx; + struct ipa_ep_cfg_ctrl ep_suspend; + struct ipa_ep_cfg_holb ep_holb; + + memset(&ep_suspend, 0, sizeof(ep_suspend)); + memset(&ep_holb, 0, sizeof(ep_holb)); + + ep_suspend.ipa_ep_suspend = true; + ep_holb.tmr_val = 0; + ep_holb.en = 1; + + if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_2) + ipa3_cal_ep_holb_scale_base_val(ep_holb.tmr_val, &ep_holb); + + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + if (IPA_CLIENT_IS_Q6_CONS(client_idx)) { + ep_idx = ipa3_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + /* from IPA 4.0 pipe suspend is not supported */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_CTRL_n, + ep_idx, &ep_suspend); + + /* + * ipa3_cfg_ep_holb is not used here because we are + * setting HOLB on Q6 pipes, and from APPS perspective + * they are not valid, therefore, the above function + * will fail. + */ + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, + ep_idx, &ep_holb); + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_HOL_BLOCK_EN_n, + ep_idx, &ep_holb); + } + } +} + +static void ipa3_halt_q6_gsi_channels(bool prod) +{ + int ep_idx; + int client_idx; + const struct ipa_gsi_ep_config *gsi_ep_cfg; + int i; + int ret; + int code = 0; + + /* if prod flag is true, then we halt the producer channels also */ + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + if (IPA_CLIENT_IS_Q6_CONS(client_idx) + || (IPA_CLIENT_IS_Q6_PROD(client_idx) && prod)) { + ep_idx = ipa3_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + gsi_ep_cfg = ipa3_get_gsi_ep_info(client_idx); + if (!gsi_ep_cfg) { + IPAERR("failed to get GSI config\n"); + ipa_assert(); + return; + } + + ret = gsi_halt_channel_ee( + gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee, + &code); + for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY && + ret == -GSI_STATUS_AGAIN; i++) { + IPADBG( + "ch %d ee %d with code %d\n is busy try again", + gsi_ep_cfg->ipa_gsi_chan_num, + gsi_ep_cfg->ee, + code); + usleep_range(IPA_GSI_CHANNEL_HALT_MIN_SLEEP, + IPA_GSI_CHANNEL_HALT_MAX_SLEEP); + ret = gsi_halt_channel_ee( + gsi_ep_cfg->ipa_gsi_chan_num, + gsi_ep_cfg->ee, &code); + } + if (ret == GSI_STATUS_SUCCESS) + IPADBG("halted gsi ch %d ee %d with code %d\n", + gsi_ep_cfg->ipa_gsi_chan_num, + gsi_ep_cfg->ee, + code); + else + IPAERR("failed to halt ch %d ee %d code %d\n", + gsi_ep_cfg->ipa_gsi_chan_num, + gsi_ep_cfg->ee, + code); + } + } +} + +static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip, + enum ipa_rule_type rlt) +{ + struct ipa3_desc *desc; + struct ipahal_imm_cmd_dma_shared_mem cmd = {0}; + struct ipahal_imm_cmd_pyld **cmd_pyld; + int retval = 0; + int pipe_idx; + int flt_idx = 0; + int num_cmds = 0; + int index; + u32 lcl_addr_mem_part; + u32 lcl_hdr_sz; + struct ipa_mem_buffer mem; + + IPADBG("Entry\n"); + + if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) { + IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt); + return -EINVAL; + } + + /* + * SRAM memory not allocated to hash tables. Cleaning the of hash table + * operation not supported. + */ + if (rlt == IPA_RULE_HASHABLE && ipa3_ctx->ipa_fltrt_not_hashable) { + IPADBG("Clean hashable rules not supported\n"); + return retval; + } + + /* Up to filtering pipes we have filtering tables */ + desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc), + GFP_KERNEL); + if (!desc) + return -ENOMEM; + + cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num, + sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL); + if (!cmd_pyld) { + retval = -ENOMEM; + goto free_desc; + } + + if (ip == IPA_IP_v4) { + if (rlt == IPA_RULE_HASHABLE) { + lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size); + } else { + lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size); + } + } else { + if (rlt == IPA_RULE_HASHABLE) { + lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size); + } else { + lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size); + } + } + + retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz, + 0, &mem, true); + if (retval) { + IPAERR("failed to generate flt single tbl empty img\n"); + goto free_cmd_pyld; + } + + for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) { + if (!ipa_is_ep_support_flt(pipe_idx)) + continue; + + /* + * Iterating over all the filtering pipes which are either + * invalid but connected or connected but not configured by AP. + */ + if (!ipa3_ctx->ep[pipe_idx].valid || + ipa3_ctx->ep[pipe_idx].skip_ep_cfg) { + + if (num_cmds >= ipa3_ctx->ep_flt_num) { + IPAERR("number of commands is out of range\n"); + retval = -ENOBUFS; + goto free_empty_img; + } + + cmd.is_read = false; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + cmd.size = mem.size; + cmd.system_addr = mem.phys_base; + cmd.local_addr = + ipa3_ctx->smem_restricted_bytes + + lcl_addr_mem_part + + ipahal_get_hw_tbl_hdr_width() + + flt_idx * ipahal_get_hw_tbl_hdr_width(); + cmd_pyld[num_cmds] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld[num_cmds]) { + IPAERR("fail construct dma_shared_mem cmd\n"); + retval = -ENOMEM; + goto free_empty_img; + } + ipa3_init_imm_cmd_desc(&desc[num_cmds], + cmd_pyld[num_cmds]); + ++num_cmds; + } + + ++flt_idx; + } + + IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds); + retval = ipa3_send_cmd(num_cmds, desc); + if (retval) { + IPAERR("failed to send immediate command (err %d)\n", retval); + retval = -EFAULT; + } + +free_empty_img: + ipahal_free_dma_mem(&mem); +free_cmd_pyld: + for (index = 0; index < num_cmds; index++) + ipahal_destroy_imm_cmd(cmd_pyld[index]); + kfree(cmd_pyld); +free_desc: + kfree(desc); + return retval; +} + +static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip, + enum ipa_rule_type rlt) +{ + struct ipa3_desc *desc; + struct ipahal_imm_cmd_dma_shared_mem cmd = {0}; + struct ipahal_imm_cmd_pyld *cmd_pyld = NULL; + int retval = 0; + u32 modem_rt_index_lo; + u32 modem_rt_index_hi; + u32 lcl_addr_mem_part; + u32 lcl_hdr_sz; + struct ipa_mem_buffer mem; + + IPADBG("Entry\n"); + + if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) { + IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt); + return -EINVAL; + } + + /* + * SRAM memory not allocated to hash tables. Cleaning the of hash table + * operation not supported. + */ + if (rlt == IPA_RULE_HASHABLE && ipa3_ctx->ipa_fltrt_not_hashable) { + IPADBG("Clean hashable rules not supported\n"); + return retval; + } + + if (ip == IPA_IP_v4) { + modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo); + modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi); + if (rlt == IPA_RULE_HASHABLE) { + lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size); + } else { + lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size); + } + } else { + modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo); + modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi); + if (rlt == IPA_RULE_HASHABLE) { + lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size); + } else { + lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size); + } + } + + retval = ipahal_rt_generate_empty_img( + modem_rt_index_hi - modem_rt_index_lo + 1, + lcl_hdr_sz, lcl_hdr_sz, &mem, true); + if (retval) { + IPAERR("fail generate empty rt img\n"); + return -ENOMEM; + } + + desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL); + if (!desc) { + IPAERR("failed to allocate memory\n"); + goto free_empty_img; + } + + cmd.is_read = false; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + cmd.size = mem.size; + cmd.system_addr = mem.phys_base; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + lcl_addr_mem_part + + modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width(); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + retval = -ENOMEM; + goto free_desc; + } + ipa3_init_imm_cmd_desc(desc, cmd_pyld); + + IPADBG("Sending 1 descriptor for rt tbl clearing\n"); + retval = ipa3_send_cmd(1, desc); + if (retval) { + IPAERR("failed to send immediate command (err %d)\n", retval); + retval = -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); +free_desc: + kfree(desc); +free_empty_img: + ipahal_free_dma_mem(&mem); + return retval; +} + +static int ipa3_q6_clean_q6_tables(void) +{ + struct ipa3_desc *desc; + struct ipahal_imm_cmd_pyld *cmd_pyld = NULL; + struct ipahal_imm_cmd_register_write reg_write_cmd = {0}; + int retval = 0; + struct ipahal_reg_fltrt_hash_flush flush; + struct ipahal_reg_valmask valmask; + + IPADBG("Entry\n"); + + + if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) { + IPAERR("failed to clean q6 flt tbls (v4/hashable)\n"); + return -EFAULT; + } + if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) { + IPAERR("failed to clean q6 flt tbls (v6/hashable)\n"); + return -EFAULT; + } + if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) { + IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n"); + return -EFAULT; + } + if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) { + IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n"); + return -EFAULT; + } + + if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) { + IPAERR("failed to clean q6 rt tbls (v4/hashable)\n"); + return -EFAULT; + } + if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) { + IPAERR("failed to clean q6 rt tbls (v6/hashable)\n"); + return -EFAULT; + } + if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) { + IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n"); + return -EFAULT; + } + if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) { + IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n"); + return -EFAULT; + } + + /* + * SRAM memory not allocated to hash tables. Cleaning the of hash table + * operation not supported. + */ + if (ipa3_ctx->ipa_fltrt_not_hashable) + return retval; + /* Flush rules cache */ + desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + flush.v4_flt = true; + flush.v4_rt = true; + flush.v6_flt = true; + flush.v6_rt = true; + ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask); + reg_write_cmd.skip_pipeline_clear = false; + reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH); + reg_write_cmd.value = valmask.val; + reg_write_cmd.value_mask = valmask.mask; + cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + ®_write_cmd, false); + if (!cmd_pyld) { + IPAERR("fail construct register_write imm cmd\n"); + retval = -EFAULT; + goto bail_desc; + } + ipa3_init_imm_cmd_desc(desc, cmd_pyld); + + IPADBG("Sending 1 descriptor for tbls flush\n"); + retval = ipa3_send_cmd(1, desc); + if (retval) { + IPAERR("failed to send immediate command (err %d)\n", retval); + retval = -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); + +bail_desc: + kfree(desc); + IPADBG("Done - retval = %d\n", retval); + return retval; +} + +static int ipa3_q6_set_ex_path_to_apps(void) +{ + int ep_idx; + int client_idx; + struct ipa3_desc *desc; + int num_descs = 0; + int index; + struct ipahal_imm_cmd_register_write reg_write; + struct ipahal_imm_cmd_pyld *cmd_pyld; + int retval; + + desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc), + GFP_KERNEL); + if (!desc) + return -ENOMEM; + + /* Set the exception path to AP */ + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + ep_idx = ipa3_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + /* disable statuses for all modem controlled prod pipes */ + if (IPA_CLIENT_IS_Q6_PROD(client_idx) || + (ipa3_ctx->ep[ep_idx].valid && + ipa3_ctx->ep[ep_idx].skip_ep_cfg) || + (ipa3_ctx->ep[ep_idx].client == IPA_CLIENT_APPS_WAN_PROD + && ipa3_ctx->modem_cfg_emb_pipe_flt)) { + ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes); + + ipa3_ctx->ep[ep_idx].status.status_en = false; + reg_write.skip_pipeline_clear = false; + reg_write.pipeline_clear_options = + IPAHAL_HPS_CLEAR; + reg_write.offset = + ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n, + ep_idx); + reg_write.value = 0; + reg_write.value_mask = ~0; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, ®_write, false); + if (!cmd_pyld) { + IPAERR("fail construct register_write cmd\n"); + ipa_assert(); + return -ENOMEM; + } + + ipa3_init_imm_cmd_desc(&desc[num_descs], cmd_pyld); + desc[num_descs].callback = ipa3_destroy_imm; + desc[num_descs].user1 = cmd_pyld; + ++num_descs; + } + } + + /* Will wait 500msecs for IPA tag process completion */ + retval = ipa3_tag_process(desc, num_descs, + msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT)); + if (retval) { + IPAERR("TAG process failed! (error %d)\n", retval); + /* For timeout error ipa3_destroy_imm cb will destroy user1 */ + if (retval != -ETIME) { + for (index = 0; index < num_descs; index++) + if (desc[index].callback) + desc[index].callback(desc[index].user1, + desc[index].user2); + retval = -EINVAL; + } + } + + kfree(desc); + + return retval; +} + +/** + * ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration + * in IPA HW. This is performed in case of SSR. + * + * This is a mandatory procedure, in case one of the steps fails, the + * AP needs to restart. + */ +void ipa3_q6_pre_shutdown_cleanup(void) +{ + IPADBG_LOW("ENTER\n"); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + ipa3_q6_pipe_delay(true); + ipa3_q6_avoid_holb(); + if (ipa3_ctx->ipa_config_is_mhi) + ipa3_set_reset_client_cons_pipe_sus_holb(true, + IPA_CLIENT_MHI_CONS); + if (ipa3_q6_clean_q6_tables()) { + IPAERR("Failed to clean Q6 tables\n"); + /* + * Indicates IPA hardware is stalled, unexpected + * hardware state. + */ + ipa_assert(); + } + if (ipa3_q6_set_ex_path_to_apps()) { + IPAERR("Failed to redirect exceptions to APPS\n"); + /* + * Indicates IPA hardware is stalled, unexpected + * hardware state. + */ + ipa_assert(); + } + /* Remove delay from Q6 PRODs to avoid pending descriptors + * on pipe reset procedure + */ + ipa3_q6_pipe_delay(false); + ipa3_set_reset_client_prod_pipe_delay(true, + IPA_CLIENT_USB_PROD); + if (ipa3_ctx->ipa_config_is_mhi) + ipa3_set_reset_client_prod_pipe_delay(true, + IPA_CLIENT_MHI_PROD); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPADBG_LOW("Exit with success\n"); +} + +/* + * ipa3_q6_post_shutdown_cleanup() - As part of this cleanup + * check if GSI channel related to Q6 producer client is empty. + * + * Q6 GSI channel emptiness is needed to garantee no descriptors with invalid + * info are injected into IPA RX from IPA_IF, while modem is restarting. + */ +void ipa3_q6_post_shutdown_cleanup(void) +{ + int client_idx; + int ep_idx; + bool prod = false; + + IPADBG_LOW("ENTER\n"); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + /* Handle the issue where SUSPEND was removed for some reason */ + ipa3_q6_avoid_holb(); + + /* halt both prod and cons channels starting at IPAv4 */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + prod = true; + ipa3_halt_q6_gsi_channels(prod); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPADBG("Exit without consumer check\n"); + return; + } + + ipa3_halt_q6_gsi_channels(prod); + + if (!ipa3_ctx->uc_ctx.uc_loaded) { + IPAERR("uC is not loaded. Skipping\n"); + return; + } + + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) + if (IPA_CLIENT_IS_Q6_PROD(client_idx)) { + ep_idx = ipa3_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + if (ipa3_uc_is_gsi_channel_empty(client_idx)) { + IPAERR("fail to validate Q6 ch emptiness %d\n", + client_idx); + /* + * Indicates GSI hardware is stalled, unexpected + * hardware state. + * Remove bug for adb reboot issue. + */ + } + } + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPADBG_LOW("Exit with success\n"); +} + +static inline void ipa3_sram_set_canary(u32 *sram_mmio, int offset) +{ + /* Set 4 bytes of CANARY before the offset */ + sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL; +} + +/** + * _ipa_init_sram_v3() - Initialize IPA local SRAM. + * + * Return codes: 0 for success, negative value for failure + */ +int _ipa_init_sram_v3(void) +{ + u32 *ipa_sram_mmio; + unsigned long phys_addr; + + IPADBG( + "ipa_wrapper_base(0x%08X) ipa_reg_base_ofst(0x%08X) IPA_SW_AREA_RAM_DIRECT_ACCESS_n(0x%08X) smem_restricted_bytes(0x%08X) smem_sz(0x%08X)\n", + ipa3_ctx->ipa_wrapper_base, + ipa3_ctx->ctrl->ipa_reg_base_ofst, + ipahal_get_reg_n_ofst( + IPA_SW_AREA_RAM_DIRECT_ACCESS_n, + ipa3_ctx->smem_restricted_bytes / 4), + ipa3_ctx->smem_restricted_bytes, + ipa3_ctx->smem_sz); + + phys_addr = ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, + ipa3_ctx->smem_restricted_bytes / 4); + + ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz); + if (!ipa_sram_mmio) { + IPAERR("fail to ioremap IPA SRAM\n"); + return -ENOMEM; + } + + /* Consult with ipa_i.h on the location of the CANARY values */ + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(v4_flt_nhash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_nhash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(v6_flt_nhash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_nhash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(modem_hdr_proc_ctx_ofst)); + if (ipa_get_hw_type() >= IPA_HW_v4_5) { + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(nat_tbl_ofst) - 12); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(nat_tbl_ofst) - 8); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(nat_tbl_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(nat_tbl_ofst)); + } + if (ipa_get_hw_type() >= IPA_HW_v4_0) { + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(pdn_config_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(pdn_config_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(stats_quota_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(stats_quota_ofst)); + } + if (ipa_get_hw_type() <= IPA_HW_v3_5 || + ipa_get_hw_type() >= IPA_HW_v4_5) { + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(modem_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst)); + } + ipa3_sram_set_canary(ipa_sram_mmio, + (ipa_get_hw_type() >= IPA_HW_v3_5) ? + IPA_MEM_PART(uc_descriptor_ram_ofst) : + IPA_MEM_PART(end_ofst)); + + iounmap(ipa_sram_mmio); + + return 0; +} + +/** + * _ipa_init_hdr_v3_0() - Initialize IPA header block. + * + * Return codes: 0 for success, negative value for failure + */ +int _ipa_init_hdr_v3_0(void) +{ + struct ipa3_desc desc; + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_hdr_init_local cmd = {0}; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 }; + + mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size); + mem.base = dma_zalloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + cmd.hdr_table_addr = mem.phys_base; + cmd.size_hdr_table = mem.size; + cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(modem_hdr_ofst); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false); + if (!cmd_pyld) { + IPAERR("fail to construct hdr_init_local imm cmd\n"); + dma_free_coherent(ipa3_ctx->pdev, + mem.size, mem.base, + mem.phys_base); + return -EFAULT; + } + ipa3_init_imm_cmd_desc(&desc, cmd_pyld); + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa3_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + ipahal_destroy_imm_cmd(cmd_pyld); + dma_free_coherent(ipa3_ctx->pdev, + mem.size, mem.base, + mem.phys_base); + return -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + + mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) + + IPA_MEM_PART(apps_hdr_proc_ctx_size); + mem.base = dma_zalloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + dma_cmd.is_read = false; + dma_cmd.skip_pipeline_clear = false; + dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + dma_cmd.system_addr = mem.phys_base; + dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(modem_hdr_proc_ctx_ofst); + dma_cmd.size = mem.size; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false); + if (!cmd_pyld) { + IPAERR("fail to construct dma_shared_mem imm\n"); + dma_free_coherent(ipa3_ctx->pdev, + mem.size, mem.base, + mem.phys_base); + return -ENOMEM; + } + ipa3_init_imm_cmd_desc(&desc, cmd_pyld); + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa3_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + ipahal_destroy_imm_cmd(cmd_pyld); + dma_free_coherent(ipa3_ctx->pdev, + mem.size, + mem.base, + mem.phys_base); + return -EBUSY; + } + ipahal_destroy_imm_cmd(cmd_pyld); + + ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr); + + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + + return 0; +} + +/** + * _ipa_init_rt4_v3() - Initialize IPA routing block for IPv4. + * + * Return codes: 0 for success, negative value for failure + */ +int _ipa_init_rt4_v3(void) +{ + struct ipa3_desc desc; + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld; + int i; + int rc = 0; + + for (i = IPA_MEM_PART(v4_modem_rt_index_lo); + i <= IPA_MEM_PART(v4_modem_rt_index_hi); + i++) + ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i); + IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]); + + rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index), + IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size), + &mem, false); + if (rc) { + IPAERR("fail generate empty v4 rt img\n"); + return rc; + } + + /* + * SRAM memory not allocated to hash tables. Initializing/Sending + * command to hash tables(filer/routing) operation not supported. + */ + if (ipa3_ctx->ipa_fltrt_not_hashable) { + v4_cmd.hash_rules_addr = 0; + v4_cmd.hash_rules_size = 0; + v4_cmd.hash_local_addr = 0; + } else { + v4_cmd.hash_rules_addr = mem.phys_base; + v4_cmd.hash_rules_size = mem.size; + v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_hash_ofst); + } + + v4_cmd.nhash_rules_addr = mem.phys_base; + v4_cmd.nhash_rules_size = mem.size; + v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_nhash_ofst); + IPADBG("putting hashable routing IPv4 rules to phys 0x%x\n", + v4_cmd.hash_local_addr); + IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n", + v4_cmd.nhash_local_addr); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false); + if (!cmd_pyld) { + IPAERR("fail construct ip_v4_rt_init imm cmd\n"); + rc = -EPERM; + goto free_mem; + } + + ipa3_init_imm_cmd_desc(&desc, cmd_pyld); + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa3_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); + +free_mem: + ipahal_free_dma_mem(&mem); + return rc; +} + +/** + * _ipa_init_rt6_v3() - Initialize IPA routing block for IPv6. + * + * Return codes: 0 for success, negative value for failure + */ +int _ipa_init_rt6_v3(void) +{ + struct ipa3_desc desc; + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld; + int i; + int rc = 0; + + for (i = IPA_MEM_PART(v6_modem_rt_index_lo); + i <= IPA_MEM_PART(v6_modem_rt_index_hi); + i++) + ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i); + IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]); + + rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index), + IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size), + &mem, false); + if (rc) { + IPAERR("fail generate empty v6 rt img\n"); + return rc; + } + + /* + * SRAM memory not allocated to hash tables. Initializing/Sending + * command to hash tables(filer/routing) operation not supported. + */ + if (ipa3_ctx->ipa_fltrt_not_hashable) { + v6_cmd.hash_rules_addr = 0; + v6_cmd.hash_rules_size = 0; + v6_cmd.hash_local_addr = 0; + } else { + v6_cmd.hash_rules_addr = mem.phys_base; + v6_cmd.hash_rules_size = mem.size; + v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_hash_ofst); + } + + v6_cmd.nhash_rules_addr = mem.phys_base; + v6_cmd.nhash_rules_size = mem.size; + v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_nhash_ofst); + IPADBG("putting hashable routing IPv6 rules to phys 0x%x\n", + v6_cmd.hash_local_addr); + IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n", + v6_cmd.nhash_local_addr); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false); + if (!cmd_pyld) { + IPAERR("fail construct ip_v6_rt_init imm cmd\n"); + rc = -EPERM; + goto free_mem; + } + + ipa3_init_imm_cmd_desc(&desc, cmd_pyld); + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa3_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); + +free_mem: + ipahal_free_dma_mem(&mem); + return rc; +} + +/** + * _ipa_init_flt4_v3() - Initialize IPA filtering block for IPv4. + * + * Return codes: 0 for success, negative value for failure + */ +int _ipa_init_flt4_v3(void) +{ + struct ipa3_desc desc; + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld; + int rc; + + rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num, + IPA_MEM_PART(v4_flt_hash_size), + IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap, + &mem, false); + if (rc) { + IPAERR("fail generate empty v4 flt img\n"); + return rc; + } + + /* + * SRAM memory not allocated to hash tables. Initializing/Sending + * command to hash tables(filer/routing) operation not supported. + */ + if (ipa3_ctx->ipa_fltrt_not_hashable) { + v4_cmd.hash_rules_addr = 0; + v4_cmd.hash_rules_size = 0; + v4_cmd.hash_local_addr = 0; + } else { + v4_cmd.hash_rules_addr = mem.phys_base; + v4_cmd.hash_rules_size = mem.size; + v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_hash_ofst); + } + + v4_cmd.nhash_rules_addr = mem.phys_base; + v4_cmd.nhash_rules_size = mem.size; + v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_nhash_ofst); + IPADBG("putting hashable filtering IPv4 rules to phys 0x%x\n", + v4_cmd.hash_local_addr); + IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n", + v4_cmd.nhash_local_addr); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false); + if (!cmd_pyld) { + IPAERR("fail construct ip_v4_flt_init imm cmd\n"); + rc = -EPERM; + goto free_mem; + } + + ipa3_init_imm_cmd_desc(&desc, cmd_pyld); + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa3_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); + +free_mem: + ipahal_free_dma_mem(&mem); + return rc; +} + +/** + * _ipa_init_flt6_v3() - Initialize IPA filtering block for IPv6. + * + * Return codes: 0 for success, negative value for failure + */ +int _ipa_init_flt6_v3(void) +{ + struct ipa3_desc desc; + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld; + int rc; + + rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num, + IPA_MEM_PART(v6_flt_hash_size), + IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap, + &mem, false); + if (rc) { + IPAERR("fail generate empty v6 flt img\n"); + return rc; + } + + /* + * SRAM memory not allocated to hash tables. Initializing/Sending + * command to hash tables(filer/routing) operation not supported. + */ + if (ipa3_ctx->ipa_fltrt_not_hashable) { + v6_cmd.hash_rules_addr = 0; + v6_cmd.hash_rules_size = 0; + v6_cmd.hash_local_addr = 0; + } else { + v6_cmd.hash_rules_addr = mem.phys_base; + v6_cmd.hash_rules_size = mem.size; + v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_hash_ofst); + } + + v6_cmd.nhash_rules_addr = mem.phys_base; + v6_cmd.nhash_rules_size = mem.size; + v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_nhash_ofst); + IPADBG("putting hashable filtering IPv6 rules to phys 0x%x\n", + v6_cmd.hash_local_addr); + IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n", + v6_cmd.nhash_local_addr); + + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false); + if (!cmd_pyld) { + IPAERR("fail construct ip_v6_flt_init imm cmd\n"); + rc = -EPERM; + goto free_mem; + } + + ipa3_init_imm_cmd_desc(&desc, cmd_pyld); + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa3_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); + +free_mem: + ipahal_free_dma_mem(&mem); + return rc; +} + +static int ipa3_setup_flt_hash_tuple(void) +{ + int pipe_idx; + struct ipahal_reg_hash_tuple tuple; + + memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple)); + + for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) { + if (!ipa_is_ep_support_flt(pipe_idx)) + continue; + + if (ipa_is_modem_pipe(pipe_idx)) + continue; + + if (ipa3_set_flt_tuple_mask(pipe_idx, &tuple)) { + IPAERR("failed to setup pipe %d flt tuple\n", pipe_idx); + return -EFAULT; + } + } + + return 0; +} + +static int ipa3_setup_rt_hash_tuple(void) +{ + int tbl_idx; + struct ipahal_reg_hash_tuple tuple; + + memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple)); + + for (tbl_idx = 0; + tbl_idx < max(IPA_MEM_PART(v6_rt_num_index), + IPA_MEM_PART(v4_rt_num_index)); + tbl_idx++) { + + if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) && + tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi)) + continue; + + if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) && + tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi)) + continue; + + if (ipa3_set_rt_tuple_mask(tbl_idx, &tuple)) { + IPAERR("failed to setup tbl %d rt tuple\n", tbl_idx); + return -EFAULT; + } + } + + return 0; +} + +static int ipa3_setup_apps_pipes(void) +{ + struct ipa_sys_connect_params sys_in; + int result = 0; + + if (ipa3_ctx->gsi_ch20_wa) { + IPADBG("Allocating GSI physical channel 20\n"); + result = ipa_gsi_ch20_wa(); + if (result) { + IPAERR("ipa_gsi_ch20_wa failed %d\n", result); + goto fail_ch20_wa; + } + } + + /* allocate the common PROD event ring */ + if (ipa3_alloc_common_event_ring()) { + IPAERR("ipa3_alloc_common_event_ring failed.\n"); + result = -EPERM; + goto fail_ch20_wa; + } + + /* CMD OUT (AP->IPA) */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_APPS_CMD_PROD; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS; + if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) { + IPAERR(":setup sys pipe (APPS_CMD_PROD) failed.\n"); + result = -EPERM; + goto fail_ch20_wa; + } + IPADBG("Apps to IPA cmd pipe is connected\n"); + + IPADBG("Will initialize SRAM\n"); + ipa3_ctx->ctrl->ipa_init_sram(); + IPADBG("SRAM initialized\n"); + + IPADBG("Will initialize HDR\n"); + ipa3_ctx->ctrl->ipa_init_hdr(); + IPADBG("HDR initialized\n"); + + IPADBG("Will initialize V4 RT\n"); + ipa3_ctx->ctrl->ipa_init_rt4(); + IPADBG("V4 RT initialized\n"); + + IPADBG("Will initialize V6 RT\n"); + ipa3_ctx->ctrl->ipa_init_rt6(); + IPADBG("V6 RT initialized\n"); + + IPADBG("Will initialize V4 FLT\n"); + ipa3_ctx->ctrl->ipa_init_flt4(); + IPADBG("V4 FLT initialized\n"); + + IPADBG("Will initialize V6 FLT\n"); + ipa3_ctx->ctrl->ipa_init_flt6(); + IPADBG("V6 FLT initialized\n"); + + if (!ipa3_ctx->ipa_fltrt_not_hashable) { + if (ipa3_setup_flt_hash_tuple()) { + IPAERR(":fail to configure flt hash tuple\n"); + result = -EPERM; + goto fail_flt_hash_tuple; + } + IPADBG("flt hash tuple is configured\n"); + + if (ipa3_setup_rt_hash_tuple()) { + IPAERR(":fail to configure rt hash tuple\n"); + result = -EPERM; + goto fail_flt_hash_tuple; + } + IPADBG("rt hash tuple is configured\n"); + } + if (ipa3_setup_exception_path()) { + IPAERR(":fail to setup excp path\n"); + result = -EPERM; + goto fail_flt_hash_tuple; + } + IPADBG("Exception path was successfully set"); + + if (ipa3_setup_dflt_rt_tables()) { + IPAERR(":fail to setup dflt routes\n"); + result = -EPERM; + goto fail_flt_hash_tuple; + } + IPADBG("default routing was set\n"); + + /* LAN IN (IPA->AP) */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_APPS_LAN_CONS; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + sys_in.notify = ipa3_lan_rx_cb; + sys_in.priv = NULL; + sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH; + sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false; + sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true; + sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD; + sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false; + sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0; + sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2; + sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_DISABLE_CS_OFFLOAD; + + /** + * ipa_lan_rx_cb() intended to notify the source EP about packet + * being received on the LAN_CONS via calling the source EP call-back. + * There could be a race condition with calling this call-back. Other + * thread may nullify it - e.g. on EP disconnect. + * This lock intended to protect the access to the source EP call-back + */ + spin_lock_init(&ipa3_ctx->disconnect_lock); + if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) { + IPAERR(":setup sys pipe (LAN_CONS) failed.\n"); + result = -EPERM; + goto fail_flt_hash_tuple; + } + + /* LAN OUT (AP->IPA) */ + if (!ipa3_ctx->ipa_config_is_mhi) { + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_APPS_LAN_PROD; + sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; + if (ipa3_setup_sys_pipe(&sys_in, + &ipa3_ctx->clnt_hdl_data_out)) { + IPAERR(":setup sys pipe (LAN_PROD) failed.\n"); + result = -EPERM; + goto fail_lan_data_out; + } + } + + return 0; + +fail_lan_data_out: + ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in); +fail_flt_hash_tuple: + if (ipa3_ctx->dflt_v6_rt_rule_hdl) + __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl); + if (ipa3_ctx->dflt_v4_rt_rule_hdl) + __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl); + if (ipa3_ctx->excp_hdr_hdl) + __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false); + ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd); +fail_ch20_wa: + return result; +} + +static void ipa3_teardown_apps_pipes(void) +{ + if (!ipa3_ctx->ipa_config_is_mhi) + ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out); + ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in); + __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl); + __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl); + __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false); + ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd); +} + +#ifdef CONFIG_COMPAT + +static long compat_ipa3_nat_ipv6ct_alloc_table(unsigned long arg, + int (alloc_func)(struct ipa_ioc_nat_ipv6ct_table_alloc *)) +{ + long retval; + struct ipa_ioc_nat_ipv6ct_table_alloc32 table_alloc32; + struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc; + + retval = copy_from_user(&table_alloc32, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc32)); + if (retval) + return retval; + + table_alloc.size = (size_t)table_alloc32.size; + table_alloc.offset = (off_t)table_alloc32.offset; + + retval = alloc_func(&table_alloc); + if (retval) + return retval; + + if (table_alloc.offset) { + table_alloc32.offset = (compat_off_t)table_alloc.offset; + retval = copy_to_user((void __user *)arg, &table_alloc32, + sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc32)); + } + + return retval; +} + +long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long retval = 0; + struct ipa3_ioc_nat_alloc_mem32 nat_mem32; + struct ipa_ioc_nat_alloc_mem nat_mem; + + switch (cmd) { + case IPA_IOC_ADD_HDR32: + cmd = IPA_IOC_ADD_HDR; + break; + case IPA_IOC_DEL_HDR32: + cmd = IPA_IOC_DEL_HDR; + break; + case IPA_IOC_ADD_RT_RULE32: + cmd = IPA_IOC_ADD_RT_RULE; + break; + case IPA_IOC_DEL_RT_RULE32: + cmd = IPA_IOC_DEL_RT_RULE; + break; + case IPA_IOC_ADD_FLT_RULE32: + cmd = IPA_IOC_ADD_FLT_RULE; + break; + case IPA_IOC_DEL_FLT_RULE32: + cmd = IPA_IOC_DEL_FLT_RULE; + break; + case IPA_IOC_GET_RT_TBL32: + cmd = IPA_IOC_GET_RT_TBL; + break; + case IPA_IOC_COPY_HDR32: + cmd = IPA_IOC_COPY_HDR; + break; + case IPA_IOC_QUERY_INTF32: + cmd = IPA_IOC_QUERY_INTF; + break; + case IPA_IOC_QUERY_INTF_TX_PROPS32: + cmd = IPA_IOC_QUERY_INTF_TX_PROPS; + break; + case IPA_IOC_QUERY_INTF_RX_PROPS32: + cmd = IPA_IOC_QUERY_INTF_RX_PROPS; + break; + case IPA_IOC_QUERY_INTF_EXT_PROPS32: + cmd = IPA_IOC_QUERY_INTF_EXT_PROPS; + break; + case IPA_IOC_GET_HDR32: + cmd = IPA_IOC_GET_HDR; + break; + case IPA_IOC_ALLOC_NAT_MEM32: + retval = copy_from_user(&nat_mem32, (const void __user *)arg, + sizeof(struct ipa3_ioc_nat_alloc_mem32)); + if (retval) + return retval; + memcpy(nat_mem.dev_name, nat_mem32.dev_name, + IPA_RESOURCE_NAME_MAX); + nat_mem.size = (size_t)nat_mem32.size; + nat_mem.offset = (off_t)nat_mem32.offset; + + /* null terminate the string */ + nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0'; + + retval = ipa3_allocate_nat_device(&nat_mem); + if (retval) + return retval; + nat_mem32.offset = (compat_off_t)nat_mem.offset; + retval = copy_to_user((void __user *)arg, &nat_mem32, + sizeof(struct ipa3_ioc_nat_alloc_mem32)); + return retval; + case IPA_IOC_ALLOC_NAT_TABLE32: + return compat_ipa3_nat_ipv6ct_alloc_table(arg, + ipa3_allocate_nat_table); + case IPA_IOC_ALLOC_IPV6CT_TABLE32: + return compat_ipa3_nat_ipv6ct_alloc_table(arg, + ipa3_allocate_ipv6ct_table); + case IPA_IOC_V4_INIT_NAT32: + cmd = IPA_IOC_V4_INIT_NAT; + break; + case IPA_IOC_INIT_IPV6CT_TABLE32: + cmd = IPA_IOC_INIT_IPV6CT_TABLE; + break; + case IPA_IOC_TABLE_DMA_CMD32: + cmd = IPA_IOC_TABLE_DMA_CMD; + break; + case IPA_IOC_V4_DEL_NAT32: + cmd = IPA_IOC_V4_DEL_NAT; + break; + case IPA_IOC_DEL_NAT_TABLE32: + cmd = IPA_IOC_DEL_NAT_TABLE; + break; + case IPA_IOC_DEL_IPV6CT_TABLE32: + cmd = IPA_IOC_DEL_IPV6CT_TABLE; + break; + case IPA_IOC_NAT_MODIFY_PDN32: + cmd = IPA_IOC_NAT_MODIFY_PDN; + break; + case IPA_IOC_GET_NAT_OFFSET32: + cmd = IPA_IOC_GET_NAT_OFFSET; + break; + case IPA_IOC_PULL_MSG32: + cmd = IPA_IOC_PULL_MSG; + break; + case IPA_IOC_RM_ADD_DEPENDENCY32: + cmd = IPA_IOC_RM_ADD_DEPENDENCY; + break; + case IPA_IOC_RM_DEL_DEPENDENCY32: + cmd = IPA_IOC_RM_DEL_DEPENDENCY; + break; + case IPA_IOC_GENERATE_FLT_EQ32: + cmd = IPA_IOC_GENERATE_FLT_EQ; + break; + case IPA_IOC_QUERY_RT_TBL_INDEX32: + cmd = IPA_IOC_QUERY_RT_TBL_INDEX; + break; + case IPA_IOC_WRITE_QMAPID32: + cmd = IPA_IOC_WRITE_QMAPID; + break; + case IPA_IOC_MDFY_FLT_RULE32: + cmd = IPA_IOC_MDFY_FLT_RULE; + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32: + cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD; + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32: + cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL; + break; + case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32: + cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED; + break; + case IPA_IOC_MDFY_RT_RULE32: + cmd = IPA_IOC_MDFY_RT_RULE; + break; + case IPA_IOC_COMMIT_HDR: + case IPA_IOC_RESET_HDR: + case IPA_IOC_COMMIT_RT: + case IPA_IOC_RESET_RT: + case IPA_IOC_COMMIT_FLT: + case IPA_IOC_RESET_FLT: + case IPA_IOC_DUMP: + case IPA_IOC_PUT_RT_TBL: + case IPA_IOC_PUT_HDR: + case IPA_IOC_SET_FLT: + case IPA_IOC_QUERY_EP_MAPPING: + break; + default: + return -ENOIOCTLCMD; + } + return ipa3_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); +} +#endif + +static ssize_t ipa3_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos); + +static const struct file_operations ipa3_drv_fops = { + .owner = THIS_MODULE, + .open = ipa3_open, + .read = ipa3_read, + .write = ipa3_write, + .unlocked_ioctl = ipa3_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_ipa3_ioctl, +#endif +}; + +static int ipa3_get_clks(struct device *dev) +{ + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) { + IPADBG("not supported in this HW mode\n"); + ipa3_clk = NULL; + return 0; + } + + if (ipa3_res.use_bw_vote) { + IPADBG("Vote IPA clock by bw voting via bus scaling driver\n"); + ipa3_clk = NULL; + return 0; + } + + ipa3_clk = clk_get(dev, "core_clk"); + if (IS_ERR(ipa3_clk)) { + if (ipa3_clk != ERR_PTR(-EPROBE_DEFER)) + IPAERR("fail to get ipa clk\n"); + return PTR_ERR(ipa3_clk); + } + return 0; +} + +/** + * _ipa_enable_clks_v3_0() - Enable IPA clocks. + */ +void _ipa_enable_clks_v3_0(void) +{ + IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate); + if (ipa3_clk) { + IPADBG_LOW("enabling gcc_ipa_clk\n"); + clk_prepare(ipa3_clk); + clk_enable(ipa3_clk); + clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate); + } + + ipa3_uc_notify_clk_state(true); +} + +static unsigned int ipa3_get_bus_vote(void) +{ + unsigned int idx = 1; + + if (ipa3_ctx->curr_ipa_clk_rate == ipa3_ctx->ctrl->ipa_clk_rate_svs2) { + idx = 1; + } else if (ipa3_ctx->curr_ipa_clk_rate == + ipa3_ctx->ctrl->ipa_clk_rate_svs) { + idx = 2; + } else if (ipa3_ctx->curr_ipa_clk_rate == + ipa3_ctx->ctrl->ipa_clk_rate_nominal) { + idx = 3; + } else if (ipa3_ctx->curr_ipa_clk_rate == + ipa3_ctx->ctrl->ipa_clk_rate_turbo) { + idx = ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1; + } else { + WARN(1, "unexpected clock rate"); + } + IPADBG_LOW("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx); + + return idx; +} + +/** + * ipa3_enable_clks() - Turn on IPA clocks + * + * Return codes: + * None + */ +void ipa3_enable_clks(void) +{ + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) { + IPAERR("not supported in this mode\n"); + return; + } + + IPADBG("enabling IPA clocks and bus voting\n"); + + if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, + ipa3_get_bus_vote())) + WARN(1, "bus scaling failed"); + + ipa3_ctx->ctrl->ipa3_enable_clks(); +} + + +/** + * _ipa_disable_clks_v3_0() - Disable IPA clocks. + */ +void _ipa_disable_clks_v3_0(void) +{ + ipa3_uc_notify_clk_state(false); + if (ipa3_clk) { + IPADBG_LOW("disabling gcc_ipa_clk\n"); + clk_disable_unprepare(ipa3_clk); + } +} + +/** + * ipa3_disable_clks() - Turn off IPA clocks + * + * Return codes: + * None + */ +void ipa3_disable_clks(void) +{ + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) { + IPAERR("not supported in this mode\n"); + return; + } + + IPADBG("disabling IPA clocks and bus voting\n"); + + ipa3_ctx->ctrl->ipa3_disable_clks(); + + if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, 0)) + WARN(1, "bus scaling failed"); +} + +/** + * ipa3_start_tag_process() - Send TAG packet and wait for it to come back + * + * This function is called prior to clock gating when active client counter + * is 1. TAG process ensures that there are no packets inside IPA HW that + * were not submitted to the IPA client via the transport. During TAG process + * all aggregation frames are (force) closed. + * + * Return codes: + * None + */ +static void ipa3_start_tag_process(struct work_struct *work) +{ + int res; + + IPADBG("starting TAG process\n"); + /* close aggregation frames on all pipes */ + res = ipa3_tag_aggr_force_close(-1); + if (res) + IPAERR("ipa3_tag_aggr_force_close failed %d\n", res); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS"); + + IPADBG("TAG process done\n"); +} + +/** + * ipa3_active_clients_log_mod() - Log a modification in the active clients + * reference count + * + * This method logs any modification in the active clients reference count: + * It logs the modification in the circular history buffer + * It logs the modification in the hash table - looking for an entry, + * creating one if needed and deleting one if needed. + * + * @id: ipa3_active client logging info struct to hold the log information + * @inc: a boolean variable to indicate whether the modification is an increase + * or decrease + * @int_ctx: a boolean variable to indicate whether this call is being made from + * an interrupt context and therefore should allocate GFP_ATOMIC memory + * + * Method process: + * - Hash the unique identifier string + * - Find the hash in the table + * 1)If found, increase or decrease the reference count + * 2)If not found, allocate a new hash table entry struct and initialize it + * - Remove and deallocate unneeded data structure + * - Log the call in the circular history buffer (unless it is a simple call) + */ +void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id, + bool inc, bool int_ctx) +{ + char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]; + unsigned long long t; + unsigned long nanosec_rem; + struct ipa3_active_client_htable_entry *hentry; + struct ipa3_active_client_htable_entry *hfound; + u32 hkey; + char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN]; + unsigned long flags; + + spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags); + int_ctx = true; + hfound = NULL; + memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN); + strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN); + hkey = jhash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN, + 0); + hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable, + hentry, list, hkey) { + if (!strcmp(hentry->id_string, id->id_string)) { + hentry->count = hentry->count + (inc ? 1 : -1); + hfound = hentry; + } + } + if (hfound == NULL) { + hentry = NULL; + hentry = kzalloc(sizeof( + struct ipa3_active_client_htable_entry), + int_ctx ? GFP_ATOMIC : GFP_KERNEL); + if (hentry == NULL) { + spin_unlock_irqrestore( + &ipa3_ctx->ipa3_active_clients_logging.lock, + flags); + return; + } + hentry->type = id->type; + strlcpy(hentry->id_string, id->id_string, + IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN); + INIT_HLIST_NODE(&hentry->list); + hentry->count = inc ? 1 : -1; + hash_add(ipa3_ctx->ipa3_active_clients_logging.htable, + &hentry->list, hkey); + } else if (hfound->count == 0) { + hash_del(&hfound->list); + kfree(hfound); + } + + if (id->type != SIMPLE) { + t = local_clock(); + nanosec_rem = do_div(t, 1000000000) / 1000; + snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN, + inc ? "[%5lu.%06lu] ^ %s, %s: %d" : + "[%5lu.%06lu] v %s, %s: %d", + (unsigned long)t, nanosec_rem, + id->id_string, id->file, id->line); + ipa3_active_clients_log_insert(temp_str); + } + spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock, + flags); +} + +void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id, + bool int_ctx) +{ + ipa3_active_clients_log_mod(id, false, int_ctx); +} + +void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id, + bool int_ctx) +{ + ipa3_active_clients_log_mod(id, true, int_ctx); +} + +/** + * ipa3_inc_client_enable_clks() - Increase active clients counter, and + * enable ipa clocks if necessary + * + * Return codes: + * None + */ +void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id) +{ + int ret; + + ipa3_active_clients_log_inc(id, false); + ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt); + if (ret) { + IPADBG_LOW("active clients = %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); + return; + } + + mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex); + + /* somebody might voted to clocks meanwhile */ + ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt); + if (ret) { + mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex); + IPADBG_LOW("active clients = %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); + return; + } + + ipa3_enable_clks(); + atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt); + IPADBG_LOW("active clients = %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); + ipa3_suspend_apps_pipes(false); + mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex); +} + +/** + * ipa3_inc_client_enable_clks_no_block() - Only increment the number of active + * clients if no asynchronous actions should be done. Asynchronous actions are + * locking a mutex and waking up IPA HW. + * + * Return codes: 0 for success + * -EPERM if an asynchronous action should have been done + */ +int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info + *id) +{ + int ret; + + ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt); + if (ret) { + ipa3_active_clients_log_inc(id, true); + IPADBG_LOW("active clients = %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); + return 0; + } + + return -EPERM; +} + +static void __ipa3_dec_client_disable_clks(void) +{ + int ret; + + if (!atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)) { + IPAERR("trying to disable clocks with refcnt is 0\n"); + ipa_assert(); + return; + } + + ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1); + if (ret) + goto bail; + + /* seems like this is the only client holding the clocks */ + mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex); + if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 && + ipa3_ctx->tag_process_before_gating) { + ipa3_ctx->tag_process_before_gating = false; + /* + * When TAG process ends, active clients will be + * decreased + */ + queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work); + goto unlock_mutex; + } + + /* a different context might increase the clock reference meanwhile */ + ret = atomic_sub_return(1, &ipa3_ctx->ipa3_active_clients.cnt); + if (ret > 0) + goto unlock_mutex; + ipa3_suspend_apps_pipes(true); + ipa3_disable_clks(); + +unlock_mutex: + mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex); +bail: + IPADBG_LOW("active clients = %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); +} + +/** + * ipa3_dec_client_disable_clks() - Decrease active clients counter + * + * In case that there are no active clients this function also starts + * TAG process. When TAG progress ends ipa clocks will be gated. + * start_tag_process_again flag is set during this function to signal TAG + * process to start again as there was another client that may send data to ipa + * + * Return codes: + * None + */ +void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id) +{ + ipa3_active_clients_log_dec(id, false); + __ipa3_dec_client_disable_clks(); +} + +static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work) +{ + __ipa3_dec_client_disable_clks(); +} + +/** + * ipa3_dec_client_disable_clks_no_block() - Decrease active clients counter + * if possible without blocking. If this is the last client then the desrease + * will happen from work queue context. + * + * Return codes: + * None + */ +void ipa3_dec_client_disable_clks_no_block( + struct ipa_active_client_logging_info *id) +{ + int ret; + + ipa3_active_clients_log_dec(id, true); + ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1); + if (ret) { + IPADBG_LOW("active clients = %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); + return; + } + + /* seems like this is the only client holding the clocks */ + queue_work(ipa3_ctx->power_mgmt_wq, + &ipa_dec_clients_disable_clks_on_wq_work); +} + +/** + * ipa3_inc_acquire_wakelock() - Increase active clients counter, and + * acquire wakelock if necessary + * + * Return codes: + * None + */ +void ipa3_inc_acquire_wakelock(void) +{ + unsigned long flags; + + spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags); + ipa3_ctx->wakelock_ref_cnt.cnt++; + if (ipa3_ctx->wakelock_ref_cnt.cnt == 1) + __pm_stay_awake(&ipa3_ctx->w_lock); + IPADBG_LOW("active wakelock ref cnt = %d\n", + ipa3_ctx->wakelock_ref_cnt.cnt); + spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags); +} + +/** + * ipa3_dec_release_wakelock() - Decrease active clients counter + * + * In case if the ref count is 0, release the wakelock. + * + * Return codes: + * None + */ +void ipa3_dec_release_wakelock(void) +{ + unsigned long flags; + + spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags); + ipa3_ctx->wakelock_ref_cnt.cnt--; + IPADBG_LOW("active wakelock ref cnt = %d\n", + ipa3_ctx->wakelock_ref_cnt.cnt); + if (ipa3_ctx->wakelock_ref_cnt.cnt == 0) + __pm_relax(&ipa3_ctx->w_lock); + spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags); +} + +int ipa3_set_clock_plan_from_pm(int idx) +{ + u32 clk_rate; + + IPADBG_LOW("idx = %d\n", idx); + + if (!ipa3_ctx->enable_clock_scaling) { + ipa3_ctx->ipa3_active_clients.bus_vote_idx = idx; + return 0; + } + + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) { + IPAERR("not supported in this mode\n"); + return 0; + } + + if (idx <= 0 || idx >= ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases) { + IPAERR("bad voltage\n"); + return -EINVAL; + } + + if (idx == 1) + clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2; + else if (idx == 2) + clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs; + else if (idx == 3) + clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal; + else if (idx == 4) + clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo; + else { + IPAERR("bad voltage\n"); + WARN_ON(1); + return -EFAULT; + } + + if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) { + IPADBG_LOW("Same voltage\n"); + return 0; + } + + mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex); + ipa3_ctx->curr_ipa_clk_rate = clk_rate; + ipa3_ctx->ipa3_active_clients.bus_vote_idx = idx; + IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate); + if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) { + if (ipa3_clk) + clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate); + if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, + ipa3_get_bus_vote())) + WARN_ON(1); + } else { + IPADBG_LOW("clocks are gated, not setting rate\n"); + } + mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex); + IPADBG_LOW("Done\n"); + + return 0; +} + +int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage, + u32 bandwidth_mbps) +{ + enum ipa_voltage_level needed_voltage; + u32 clk_rate; + + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) { + IPAERR("not supported in this mode\n"); + return 0; + } + + IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u", + floor_voltage, bandwidth_mbps); + + if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED || + floor_voltage >= IPA_VOLTAGE_MAX) { + IPAERR("bad voltage\n"); + return -EINVAL; + } + + if (ipa3_ctx->enable_clock_scaling) { + IPADBG_LOW("Clock scaling is enabled\n"); + if (bandwidth_mbps >= + ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo) + needed_voltage = IPA_VOLTAGE_TURBO; + else if (bandwidth_mbps >= + ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal) + needed_voltage = IPA_VOLTAGE_NOMINAL; + else if (bandwidth_mbps >= + ipa3_ctx->ctrl->clock_scaling_bw_threshold_svs) + needed_voltage = IPA_VOLTAGE_SVS; + else + needed_voltage = IPA_VOLTAGE_SVS2; + } else { + IPADBG_LOW("Clock scaling is disabled\n"); + needed_voltage = IPA_VOLTAGE_NOMINAL; + } + + needed_voltage = max(needed_voltage, floor_voltage); + switch (needed_voltage) { + case IPA_VOLTAGE_SVS2: + clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2; + break; + case IPA_VOLTAGE_SVS: + clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs; + break; + case IPA_VOLTAGE_NOMINAL: + clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal; + break; + case IPA_VOLTAGE_TURBO: + clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo; + break; + default: + IPAERR("bad voltage\n"); + WARN_ON(1); + return -EFAULT; + } + + if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) { + IPADBG_LOW("Same voltage\n"); + return 0; + } + + /* Hold the mutex to avoid race conditions with ipa3_enable_clocks() */ + mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex); + ipa3_ctx->curr_ipa_clk_rate = clk_rate; + IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate); + if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) { + if (ipa3_clk) + clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate); + if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, + ipa3_get_bus_vote())) + WARN_ON(1); + } else { + IPADBG_LOW("clocks are gated, not setting rate\n"); + } + mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex); + IPADBG_LOW("Done\n"); + + return 0; +} + +static void ipa3_process_irq_schedule_rel(void) +{ + queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq, + &ipa3_transport_release_resource_work, + msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC)); +} + +/** + * ipa3_suspend_handler() - Handles the suspend interrupt: + * wakes up the suspended peripheral by requesting its consumer + * @interrupt: Interrupt type + * @private_data: The client's private data + * @interrupt_data: Interrupt specific information data + */ +void ipa3_suspend_handler(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data) +{ + enum ipa_rm_resource_name resource; + u32 suspend_data = + ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints; + u32 bmsk = 1; + u32 i = 0; + int res; + struct ipa_ep_cfg_holb holb_cfg; + struct mutex *pm_mutex_ptr = &ipa3_ctx->transport_pm.transport_pm_mutex; + u32 pipe_bitmask = 0; + + IPADBG("interrupt=%d, interrupt_data=%u\n", + interrupt, suspend_data); + memset(&holb_cfg, 0, sizeof(holb_cfg)); + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++, bmsk = bmsk << 1) { + if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) { + if (ipa3_ctx->use_ipa_pm) { + pipe_bitmask |= bmsk; + continue; + } + if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) { + /* + * pipe will be unsuspended as part of + * enabling IPA clocks + */ + mutex_lock(pm_mutex_ptr); + if (!atomic_read( + &ipa3_ctx->transport_pm.dec_clients) + ) { + IPA_ACTIVE_CLIENTS_INC_EP( + ipa3_ctx->ep[i].client); + IPADBG_LOW("Pipes un-suspended.\n"); + IPADBG_LOW("Enter poll mode.\n"); + atomic_set( + &ipa3_ctx->transport_pm.dec_clients, + 1); + /* + * acquire wake lock as long as suspend + * vote is held + */ + ipa3_inc_acquire_wakelock(); + ipa3_process_irq_schedule_rel(); + } + mutex_unlock(pm_mutex_ptr); + } else { + resource = ipa3_get_rm_resource_from_ep(i); + res = + ipa_rm_request_resource_with_timer(resource); + if (res == -EPERM && + IPA_CLIENT_IS_CONS( + ipa3_ctx->ep[i].client)) { + holb_cfg.en = 1; + res = ipa3_cfg_ep_holb_by_client( + ipa3_ctx->ep[i].client, &holb_cfg); + WARN(res, "holb en failed\n"); + } + } + } + } + if (ipa3_ctx->use_ipa_pm) { + res = ipa_pm_handle_suspend(pipe_bitmask); + if (res) { + IPAERR("ipa_pm_handle_suspend failed %d\n", res); + return; + } + } +} + +/** + * ipa3_restore_suspend_handler() - restores the original suspend IRQ handler + * as it was registered in the IPA init sequence. + * Return codes: + * 0: success + * -EPERM: failed to remove current handler or failed to add original handler + */ +int ipa3_restore_suspend_handler(void) +{ + int result = 0; + + result = ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ); + if (result) { + IPAERR("remove handler for suspend interrupt failed\n"); + return -EPERM; + } + + result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ, + ipa3_suspend_handler, false, NULL); + if (result) { + IPAERR("register handler for suspend interrupt failed\n"); + result = -EPERM; + } + + IPADBG("suspend handler successfully restored\n"); + + return result; +} + +static int ipa3_apps_cons_release_resource(void) +{ + return 0; +} + +static int ipa3_apps_cons_request_resource(void) +{ + return 0; +} + +static void ipa3_transport_release_resource(struct work_struct *work) +{ + mutex_lock(&ipa3_ctx->transport_pm.transport_pm_mutex); + /* check whether still need to decrease client usage */ + if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) { + if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) { + IPADBG("EOT pending Re-scheduling\n"); + ipa3_process_irq_schedule_rel(); + } else { + atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0); + ipa3_dec_release_wakelock(); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TRANSPORT_RESOURCE"); + } + } + atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0); + mutex_unlock(&ipa3_ctx->transport_pm.transport_pm_mutex); +} + +int ipa3_create_apps_resource(void) +{ + struct ipa_rm_create_params apps_cons_create_params; + struct ipa_rm_perf_profile profile; + int result = 0; + + memset(&apps_cons_create_params, 0, + sizeof(apps_cons_create_params)); + apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS; + apps_cons_create_params.request_resource = + ipa3_apps_cons_request_resource; + apps_cons_create_params.release_resource = + ipa3_apps_cons_release_resource; + result = ipa_rm_create_resource(&apps_cons_create_params); + if (result) { + IPAERR("ipa_rm_create_resource failed\n"); + return result; + } + + profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; + ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile); + + return result; +} + +/** + * ipa3_init_interrupts() - Register to IPA IRQs + * + * Return codes: 0 in success, negative in failure + * + */ +int ipa3_init_interrupts(void) +{ + int result; + + /*register IPA IRQ handler*/ + result = ipa3_interrupts_init(ipa3_res.ipa_irq, 0, + &ipa3_ctx->master_pdev->dev); + if (result) { + IPAERR("ipa interrupts initialization failed\n"); + return -ENODEV; + } + + /*add handler for suspend interrupt*/ + result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ, + ipa3_suspend_handler, false, NULL); + if (result) { + IPAERR("register handler for suspend interrupt failed\n"); + result = -ENODEV; + goto fail_add_interrupt_handler; + } + + return 0; + +fail_add_interrupt_handler: + ipa3_interrupts_destroy(ipa3_res.ipa_irq, &ipa3_ctx->master_pdev->dev); + return result; +} + +/** + * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables + * The idr strcuture per filtering table is intended for rule id generation + * per filtering rule. + */ +static void ipa3_destroy_flt_tbl_idrs(void) +{ + int i; + struct ipa3_flt_tbl *flt_tbl; + + idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v4]); + idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v6]); + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa_is_ep_support_flt(i)) + continue; + + flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4]; + flt_tbl->rule_ids = NULL; + flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6]; + flt_tbl->rule_ids = NULL; + } +} + +static void ipa3_freeze_clock_vote_and_notify_modem(void) +{ + int res; + struct ipa_active_client_logging_info log_info; + + if (ipa3_ctx->smp2p_info.res_sent) + return; + + if (IS_ERR(ipa3_ctx->smp2p_info.smem_state)) { + IPAERR("fail to get smp2p clk resp bit %d\n", + PTR_ERR(ipa3_ctx->smp2p_info.smem_state)); + return; + } + + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE"); + res = ipa3_inc_client_enable_clks_no_block(&log_info); + if (res) + ipa3_ctx->smp2p_info.ipa_clk_on = false; + else + ipa3_ctx->smp2p_info.ipa_clk_on = true; + + qcom_smem_state_update_bits(ipa3_ctx->smp2p_info.smem_state, + IPA_SMP2P_SMEM_STATE_MASK, + ((ipa3_ctx->smp2p_info.ipa_clk_on << + IPA_SMP2P_OUT_CLK_VOTE_IDX) | + (1 << IPA_SMP2P_OUT_CLK_RSP_CMPLT_IDX))); + + ipa3_ctx->smp2p_info.res_sent = true; + IPADBG("IPA clocks are %s\n", + ipa3_ctx->smp2p_info.ipa_clk_on ? "ON" : "OFF"); +} + +void ipa3_reset_freeze_vote(void) +{ + if (!ipa3_ctx->smp2p_info.res_sent) + return; + + if (ipa3_ctx->smp2p_info.ipa_clk_on) + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("FREEZE_VOTE"); + + qcom_smem_state_update_bits(ipa3_ctx->smp2p_info.smem_state, + IPA_SMP2P_SMEM_STATE_MASK, + ((0 << + IPA_SMP2P_OUT_CLK_VOTE_IDX) | + (0 << IPA_SMP2P_OUT_CLK_RSP_CMPLT_IDX))); + + ipa3_ctx->smp2p_info.res_sent = false; + ipa3_ctx->smp2p_info.ipa_clk_on = false; +} + +static int ipa3_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr) +{ + int res; + + ipa3_freeze_clock_vote_and_notify_modem(); + + IPADBG("Calling uC panic handler\n"); + res = ipa3_uc_panic_notifier(this, event, ptr); + if (res) + IPAERR("uC panic handler failed %d\n", res); + + if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) != 0) + ipahal_print_all_regs(false); + + return NOTIFY_DONE; +} + +static struct notifier_block ipa3_panic_blk = { + .notifier_call = ipa3_panic_notifier, + /* IPA panic handler needs to run before modem shuts down */ + .priority = INT_MAX, +}; + +static void ipa3_register_panic_hdlr(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, + &ipa3_panic_blk); +} + +static void ipa3_trigger_ipa_ready_cbs(void) +{ + struct ipa3_ready_cb_info *info; + + mutex_lock(&ipa3_ctx->lock); + + /* Call all the CBs */ + list_for_each_entry(info, &ipa3_ctx->ipa_ready_cb_list, link) + if (info->ready_cb) + info->ready_cb(info->user_data); + + mutex_unlock(&ipa3_ctx->lock); +} + +static void ipa3_uc_is_loaded(void) +{ + IPADBG("\n"); + complete_all(&ipa3_ctx->uc_loaded_completion_obj); +} + +static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type) +{ + enum gsi_ver gsi_ver; + + switch (ipa_hw_type) { + case IPA_HW_v3_0: + case IPA_HW_v3_1: + gsi_ver = GSI_VER_1_0; + break; + case IPA_HW_v3_5: + gsi_ver = GSI_VER_1_2; + break; + case IPA_HW_v3_5_1: + gsi_ver = GSI_VER_1_3; + break; + case IPA_HW_v4_0: + case IPA_HW_v4_1: + gsi_ver = GSI_VER_2_0; + break; + case IPA_HW_v4_2: + gsi_ver = GSI_VER_2_2; + break; + case IPA_HW_v4_5: + gsi_ver = GSI_VER_2_5; + break; + default: + IPAERR("No GSI version for ipa type %d\n", ipa_hw_type); + WARN_ON(1); + gsi_ver = GSI_VER_ERR; + } + + IPADBG("GSI version %d\n", gsi_ver); + + return gsi_ver; +} + +static int ipa3_gsi_pre_fw_load_init(void) +{ + int result; + + result = gsi_configure_regs( + ipa3_res.ipa_mem_base, + ipa3_get_gsi_ver(ipa3_res.ipa_hw_type)); + + if (result) { + IPAERR("Failed to configure GSI registers\n"); + return -EINVAL; + } + + return 0; +} + +static int ipa3_alloc_gsi_channel(void) +{ + const struct ipa_gsi_ep_config *gsi_ep_cfg; + enum ipa_client_type type; + int code = 0; + int ret = 0; + int i; + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + type = ipa3_get_client_by_pipe(i); + gsi_ep_cfg = ipa3_get_gsi_ep_info(type); + IPADBG("for ep %d client is %d\n", i, type); + if (!gsi_ep_cfg) + continue; + + ret = gsi_alloc_channel_ee(gsi_ep_cfg->ipa_gsi_chan_num, + gsi_ep_cfg->ee, &code); + if (ret == GSI_STATUS_SUCCESS) { + IPADBG("alloc gsi ch %d ee %d with code %d\n", + gsi_ep_cfg->ipa_gsi_chan_num, + gsi_ep_cfg->ee, + code); + } else { + IPAERR("failed to alloc ch %d ee %d code %d\n", + gsi_ep_cfg->ipa_gsi_chan_num, + gsi_ep_cfg->ee, + code); + return ret; + } + } + return ret; +} +/** + * ipa3_post_init() - Initialize the IPA Driver (Part II). + * This part contains all initialization which requires interaction with + * IPA HW (via GSI). + * + * @resource_p: contain platform specific values from DST file + * @pdev: The platform device structure representing the IPA driver + * + * Function initialization process: + * - Initialize endpoints bitmaps + * - Initialize resource groups min and max values + * - Initialize filtering lists heads and idr + * - Initialize interrupts + * - Register GSI + * - Setup APPS pipes + * - Initialize tethering bridge + * - Initialize IPA debugfs + * - Initialize IPA uC interface + * - Initialize WDI interface + * - Initialize USB interface + * - Register for panic handler + * - Trigger IPA ready callbacks (to all subscribers) + * - Trigger IPA completion object (to all who wait on it) + */ +static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, + struct device *ipa_dev) +{ + int result; + struct gsi_per_props gsi_props; + struct ipa3_uc_hdlrs uc_hdlrs = { 0 }; + struct ipa3_flt_tbl *flt_tbl; + int i; + struct idr *idr; + + if (ipa3_ctx == NULL) { + IPADBG("IPA driver haven't initialized\n"); + return -ENXIO; + } + + /* Prevent consequent calls from trying to load the FW again. */ + if (ipa3_ctx->ipa_initialization_complete) + return 0; + + IPADBG("active clients = %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); + /* move proxy vote for modem on ipa3_post_init */ + if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0) + ipa3_proxy_clk_vote(); + + if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio, + ipa3_ctx->pdev)) { + IPAERR("fail to init ipahal\n"); + result = -EFAULT; + goto fail_ipahal; + } + + result = ipa3_init_hw(); + if (result) { + IPAERR(":error initializing HW\n"); + result = -ENODEV; + goto fail_init_hw; + } + IPADBG("IPA HW initialization sequence completed"); + + ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes(); + IPADBG("IPA Pipes num %u\n", ipa3_ctx->ipa_num_pipes); + if (ipa3_ctx->ipa_num_pipes > IPA3_MAX_NUM_PIPES) { + IPAERR("IPA has more pipes then supported has %d, max %d\n", + ipa3_ctx->ipa_num_pipes, IPA3_MAX_NUM_PIPES); + result = -ENODEV; + goto fail_init_hw; + } + + ipa3_ctx->ctrl->ipa_sram_read_settings(); + IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n", + ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes); + + IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n", + ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl, + ipa3_ctx->ip4_rt_tbl_nhash_lcl); + + IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n", + ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl); + + IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n", + ipa3_ctx->ip4_flt_tbl_hash_lcl, + ipa3_ctx->ip4_flt_tbl_nhash_lcl); + + IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n", + ipa3_ctx->ip6_flt_tbl_hash_lcl, + ipa3_ctx->ip6_flt_tbl_nhash_lcl); + + if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) { + IPAERR("SW expect more core memory, needed %d, avail %d\n", + ipa3_ctx->smem_reqd_sz, ipa3_ctx->smem_sz); + result = -ENOMEM; + goto fail_init_hw; + } + + result = ipa3_allocate_dma_task_for_gsi(); + if (result) { + IPAERR("failed to allocate dma task\n"); + goto fail_dma_task; + } + + if (ipa3_nat_ipv6ct_init_devices()) { + IPAERR("unable to init NAT and IPv6CT devices\n"); + result = -ENODEV; + goto fail_nat_ipv6ct_init_dev; + } + + result = ipa3_alloc_pkt_init(); + if (result) { + IPAERR("Failed to alloc pkt_init payload\n"); + result = -ENODEV; + goto fail_allok_pkt_init; + } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) + ipa3_enable_dcd(); + + /* + * indication whether working in MHI config or non MHI config is given + * in ipa3_write which is launched before ipa3_post_init. i.e. from + * this point it is safe to use ipa3_ep_mapping array and the correct + * entry will be returned from ipa3_get_hw_type_index() + */ + ipa_init_ep_flt_bitmap(); + IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n", + ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num); + + /* Assign resource limitation to each group */ + ipa3_set_resorce_groups_min_max_limits(); + + idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v4]); + idr_init(idr); + idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v6]); + idr_init(idr); + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa_is_ep_support_flt(i)) + continue; + + flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4]; + INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list); + flt_tbl->in_sys[IPA_RULE_HASHABLE] = + !ipa3_ctx->ip4_flt_tbl_hash_lcl; + flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] = + !ipa3_ctx->ip4_flt_tbl_nhash_lcl; + flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v4]; + + flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6]; + INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list); + flt_tbl->in_sys[IPA_RULE_HASHABLE] = + !ipa3_ctx->ip6_flt_tbl_hash_lcl; + flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] = + !ipa3_ctx->ip6_flt_tbl_nhash_lcl; + flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v6]; + } + + if (!ipa3_ctx->apply_rg10_wa) { + result = ipa3_init_interrupts(); + if (result) { + IPAERR("ipa initialization of interrupts failed\n"); + result = -ENODEV; + goto fail_register_device; + } + } else { + IPADBG("Initialization of ipa interrupts skipped\n"); + } + + /* + * Disable prefetch for USB or MHI at IPAv3.5/IPA.3.5.1 + * This is to allow MBIM to work. + */ + if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5 + && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) && + (!ipa3_ctx->ipa_config_is_mhi)) + ipa3_disable_prefetch(IPA_CLIENT_USB_CONS); + + if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5 + && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) && + (ipa3_ctx->ipa_config_is_mhi)) + ipa3_disable_prefetch(IPA_CLIENT_MHI_CONS); + + memset(&gsi_props, 0, sizeof(gsi_props)); + gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type); + gsi_props.ee = resource_p->ee; + gsi_props.intr = GSI_INTR_IRQ; + gsi_props.phys_addr = resource_p->transport_mem_base; + gsi_props.size = resource_p->transport_mem_size; + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + gsi_props.irq = resource_p->emulator_irq; + gsi_props.emulator_intcntrlr_client_isr = ipa3_get_isr(); + gsi_props.emulator_intcntrlr_addr = + resource_p->emulator_intcntrlr_mem_base; + gsi_props.emulator_intcntrlr_size = + resource_p->emulator_intcntrlr_mem_size; + } else { + gsi_props.irq = resource_p->transport_irq; + } + gsi_props.notify_cb = ipa_gsi_notify_cb; + gsi_props.req_clk_cb = NULL; + gsi_props.rel_clk_cb = NULL; + + if (ipa3_ctx->ipa_config_is_mhi) { + gsi_props.mhi_er_id_limits_valid = true; + gsi_props.mhi_er_id_limits[0] = resource_p->mhi_evid_limits[0]; + gsi_props.mhi_er_id_limits[1] = resource_p->mhi_evid_limits[1]; + } + + result = gsi_register_device(&gsi_props, + &ipa3_ctx->gsi_dev_hdl); + if (result != GSI_STATUS_SUCCESS) { + IPAERR(":gsi register error - %d\n", result); + result = -ENODEV; + goto fail_register_device; + } + IPADBG("IPA gsi is registered\n"); + /* GSI 2.2 requires to allocate all EE GSI channel + * during device bootup. + */ + if (ipa3_get_gsi_ver(resource_p->ipa_hw_type) == GSI_VER_2_2) { + result = ipa3_alloc_gsi_channel(); + if (result) { + IPAERR("Failed to alloc the GSI channels\n"); + result = -ENODEV; + goto fail_alloc_gsi_channel; + } + } + + /* setup the AP-IPA pipes */ + if (ipa3_setup_apps_pipes()) { + IPAERR(":failed to setup IPA-Apps pipes\n"); + result = -ENODEV; + goto fail_setup_apps_pipes; + } + IPADBG("IPA GPI pipes were connected\n"); + + if (ipa3_ctx->use_ipa_teth_bridge) { + /* Initialize the tethering bridge driver */ + result = ipa3_teth_bridge_driver_init(); + if (result) { + IPAERR(":teth_bridge init failed (%d)\n", -result); + result = -ENODEV; + goto fail_teth_bridge_driver_init; + } + IPADBG("teth_bridge initialized"); + } + + result = ipa3_uc_interface_init(); + if (result) + IPAERR(":ipa Uc interface init failed (%d)\n", -result); + else + IPADBG(":ipa Uc interface init ok\n"); + + uc_hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_is_loaded; + ipa3_uc_register_handlers(IPA_HW_FEATURE_COMMON, &uc_hdlrs); + + result = ipa3_wdi_init(); + if (result) + IPAERR(":wdi init failed (%d)\n", -result); + else + IPADBG(":wdi init ok\n"); + + result = ipa3_ntn_init(); + if (result) + IPAERR(":ntn init failed (%d)\n", -result); + else + IPADBG(":ntn init ok\n"); + + result = ipa_hw_stats_init(); + if (result) + IPAERR("fail to init stats %d\n", result); + else + IPADBG(":stats init ok\n"); + + ipa3_register_panic_hdlr(); + + ipa3_debugfs_init(); + + mutex_lock(&ipa3_ctx->lock); + ipa3_ctx->ipa_initialization_complete = true; + mutex_unlock(&ipa3_ctx->lock); + + ipa3_trigger_ipa_ready_cbs(); + complete_all(&ipa3_ctx->init_completion_obj); + pr_info("IPA driver initialization was successful.\n"); + + return 0; + +fail_teth_bridge_driver_init: + ipa3_teardown_apps_pipes(); +fail_alloc_gsi_channel: +fail_setup_apps_pipes: + gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false); +fail_register_device: + ipa3_destroy_flt_tbl_idrs(); +fail_allok_pkt_init: + ipa3_nat_ipv6ct_destroy_devices(); +fail_nat_ipv6ct_init_dev: + ipa3_free_dma_task_for_gsi(); +fail_dma_task: +fail_init_hw: + ipahal_destroy(); +fail_ipahal: + ipa3_proxy_clk_unvote(); + + return result; +} + +static int ipa3_manual_load_ipa_fws(void) +{ + int result; + const struct firmware *fw; + const char *path = IPA_FWS_PATH; + + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + switch (ipa3_get_emulation_type()) { + case IPA_HW_v3_5_1: + path = IPA_FWS_PATH_3_5_1; + break; + case IPA_HW_v4_0: + path = IPA_FWS_PATH_4_0; + break; + case IPA_HW_v4_5: + path = IPA_FWS_PATH_4_5; + break; + default: + break; + } + } + + IPADBG("Manual FW loading (%s) process initiated\n", path); + + result = request_firmware(&fw, path, ipa3_ctx->cdev.dev); + if (result < 0) { + IPAERR("request_firmware failed, error %d\n", result); + return result; + } + + IPADBG("FWs are available for loading\n"); + + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + result = emulator_load_fws(fw, + ipa3_res.transport_mem_base, + ipa3_res.transport_mem_size, + ipa3_get_gsi_ver(ipa3_res.ipa_hw_type)); + } else { + result = ipa3_load_fws(fw, ipa3_res.transport_mem_base, + ipa3_get_gsi_ver(ipa3_res.ipa_hw_type)); + } + + if (result) { + IPAERR("Manual IPA FWs loading has failed\n"); + release_firmware(fw); + return result; + } + + result = gsi_enable_fw(ipa3_res.transport_mem_base, + ipa3_res.transport_mem_size, + ipa3_get_gsi_ver(ipa3_res.ipa_hw_type)); + if (result) { + IPAERR("Failed to enable GSI FW\n"); + release_firmware(fw); + return result; + } + + release_firmware(fw); + + IPADBG("Manual FW loading process is complete\n"); + + return 0; +} + +static int ipa3_pil_load_ipa_fws(void) +{ + void *subsystem_get_retval = NULL; + + IPADBG("PIL FW loading process initiated\n"); + + subsystem_get_retval = subsystem_get(IPA_SUBSYSTEM_NAME); + if (IS_ERR_OR_NULL(subsystem_get_retval)) { + IPAERR("Unable to trigger PIL process for FW loading\n"); + return -EINVAL; + } + + IPADBG("PIL FW loading process is complete\n"); + return 0; +} + +static void ipa3_load_ipa_fw(struct work_struct *work) +{ + int result; + + IPADBG("Entry\n"); + + result = ipa3_attach_to_smmu(); + if (result) { + IPAERR("IPA attach to smmu failed %d\n", result); + return; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION && + (ipa3_is_msm_device() || (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5))) + result = ipa3_pil_load_ipa_fws(); + else + result = ipa3_manual_load_ipa_fws(); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + if (result) { + IPAERR("IPA FW loading process has failed\n"); + return; + } + pr_info("IPA FW loaded successfully\n"); + + result = ipa3_post_init(&ipa3_res, ipa3_ctx->cdev.dev); + if (result) + IPAERR("IPA post init failed %d\n", result); +} + +static ssize_t ipa3_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long missing; + + char dbg_buff[32] = { 0 }; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + + if (missing) { + IPAERR("Unable to copy data from user\n"); + return -EFAULT; + } + + if (count > 0) + dbg_buff[count] = '\0'; + + IPADBG("user input string %s\n", dbg_buff); + + /* Prevent consequent calls from trying to load the FW again. */ + if (ipa3_is_ready()) + return count; + + /* Check MHI configuration on MDM devices */ + if (!ipa3_is_msm_device()) { + + if (strnstr(dbg_buff, "vlan", strlen(dbg_buff))) { + if (strnstr(dbg_buff, "eth", strlen(dbg_buff))) + ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_EMAC] = + true; + if (strnstr(dbg_buff, "rndis", strlen(dbg_buff))) + ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_RNDIS] = + true; + if (strnstr(dbg_buff, "ecm", strlen(dbg_buff))) + ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_ECM] = + true; + + /* + * when vlan mode is passed to our dev we expect + * another write + */ + return count; + } + + /* trim ending newline character if any */ + if (count && (dbg_buff[count - 1] == '\n')) + dbg_buff[count - 1] = '\0'; + + /* + * This logic enforeces MHI mode based on userspace input. + * Note that MHI mode could be already determined due + * to previous logic. + */ + if (!strcasecmp(dbg_buff, "MHI")) { + ipa3_ctx->ipa_config_is_mhi = true; + } else if (strcmp(dbg_buff, "1")) { + IPAERR("got invalid string %s not loading FW\n", + dbg_buff); + return count; + } + pr_info("IPA is loading with %sMHI configuration\n", + ipa3_ctx->ipa_config_is_mhi ? "" : "non "); + } + + queue_work(ipa3_ctx->transport_power_mgmt_wq, + &ipa3_fw_loading_work); + + IPADBG("Scheduled a work to load IPA FW\n"); + return count; +} + +/** + * ipa3_tz_unlock_reg - Unlocks memory regions so that they become accessible + * from AP. + * @reg_info - Pointer to array of memory regions to unlock + * @num_regs - Number of elements in the array + * + * Converts the input array of regions to a struct that TZ understands and + * issues an SCM call. + * Also flushes the memory cache to DDR in order to make sure that TZ sees the + * correct data structure. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs) +{ + int i, size, ret; + struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec; + struct tz_smmu_ipa_protect_region_s cmd_buf; + struct scm_desc desc = {0}; + + if (reg_info == NULL || num_regs == 0) { + IPAERR("Bad parameters\n"); + return -EFAULT; + } + + size = num_regs * sizeof(struct tz_smmu_ipa_protect_region_iovec_s); + ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL); + if (ipa_tz_unlock_vec == NULL) + return -ENOMEM; + + for (i = 0; i < num_regs; i++) { + ipa_tz_unlock_vec[i].input_addr = reg_info[i].reg_addr ^ + (reg_info[i].reg_addr & 0xFFF); + ipa_tz_unlock_vec[i].output_addr = reg_info[i].reg_addr ^ + (reg_info[i].reg_addr & 0xFFF); + ipa_tz_unlock_vec[i].size = reg_info[i].size; + ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE; + } + + /* pass physical address of command buffer */ + cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec); + cmd_buf.size_bytes = size; + + desc.args[0] = virt_to_phys((void *)ipa_tz_unlock_vec); + desc.args[1] = size; + desc.arginfo = SCM_ARGS(2); + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, + TZ_MEM_PROTECT_REGION_ID), &desc); + + if (ret) { + IPAERR("scm call SCM_SVC_MP failed: %d\n", ret); + kfree(ipa_tz_unlock_vec); + return -EFAULT; + } + kfree(ipa_tz_unlock_vec); + return 0; +} + +static int ipa3_alloc_pkt_init(void) +{ + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipahal_imm_cmd_ip_packet_init cmd = {0}; + int i; + + cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT, + &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct IMM cmd\n"); + return -ENOMEM; + } + ipa3_ctx->pkt_init_imm_opcode = cmd_pyld->opcode; + + mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes; + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, + &mem.phys_base, GFP_KERNEL); + if (!mem.base) { + IPAERR("failed to alloc DMA buff of size %d\n", mem.size); + ipahal_destroy_imm_cmd(cmd_pyld); + return -ENOMEM; + } + ipahal_destroy_imm_cmd(cmd_pyld); + + memset(mem.base, 0, mem.size); + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + cmd.destination_pipe_index = i; + cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT, + &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct IMM cmd\n"); + dma_free_coherent(ipa3_ctx->pdev, + mem.size, + mem.base, + mem.phys_base); + return -ENOMEM; + } + memcpy(mem.base + i * cmd_pyld->len, cmd_pyld->data, + cmd_pyld->len); + ipa3_ctx->pkt_init_imm[i] = mem.phys_base + i * cmd_pyld->len; + ipahal_destroy_imm_cmd(cmd_pyld); + } + + return 0; +} + +/** + * ipa3_pre_init() - Initialize the IPA Driver. + * This part contains all initialization which doesn't require IPA HW, such + * as structure allocations and initializations, register writes, etc. + * + * @resource_p: contain platform specific values from DST file + * @pdev: The platform device structure representing the IPA driver + * + * Function initialization process: + * Allocate memory for the driver context data struct + * Initializing the ipa3_ctx with : + * 1)parsed values from the dts file + * 2)parameters passed to the module initialization + * 3)read HW values(such as core memory size) + * Map IPA core registers to CPU memory + * Restart IPA core(HW reset) + * Initialize the look-aside caches(kmem_cache/slab) for filter, + * routing and IPA-tree + * Create memory pool with 4 objects for DMA operations(each object + * is 512Bytes long), this object will be use for tx(A5->IPA) + * Initialize lists head(routing, hdr, system pipes) + * Initialize mutexes (for ipa_ctx and NAT memory mutexes) + * Initialize spinlocks (for list related to A5<->IPA pipes) + * Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq" + * Initialize Red-Black-Tree(s) for handles of header,routing rule, + * routing table ,filtering rule + * Initialize the filter block by committing IPV4 and IPV6 default rules + * Create empty routing table in system memory(no committing) + * Create a char-device for IPA + * Initialize IPA RM (resource manager) + * Configure GSI registers (in GSI case) + */ +static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, + struct platform_device *ipa_pdev) +{ + int result = 0; + int i; + struct ipa3_rt_tbl_set *rset; + struct ipa_active_client_logging_info log_info; + struct cdev *cdev; + + IPADBG("IPA Driver initialization started\n"); + + ipa3_ctx = kzalloc(sizeof(*ipa3_ctx), GFP_KERNEL); + if (!ipa3_ctx) { + result = -ENOMEM; + goto fail_mem_ctx; + } + + ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0); + if (ipa3_ctx->logbuf == NULL) + IPADBG("failed to create IPC log, continue...\n"); + + /* ipa3_ctx->pdev and ipa3_ctx->uc_pdev will be set in the smmu probes*/ + ipa3_ctx->master_pdev = ipa_pdev; + for (i = 0; i < IPA_SMMU_CB_MAX; i++) + ipa3_ctx->s1_bypass_arr[i] = true; + + ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base; + ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size; + ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type; + ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode; + ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge; + ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt; + ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2; + ipa3_ctx->ipa_wdi2_over_gsi = resource_p->ipa_wdi2_over_gsi; + ipa3_ctx->ipa_fltrt_not_hashable = resource_p->ipa_fltrt_not_hashable; + ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask; + ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size; + ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size; + ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset; + ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control; + ipa3_ctx->ee = resource_p->ee; + ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa; + ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa; + ipa3_ctx->use_ipa_pm = resource_p->use_ipa_pm; + ipa3_ctx->wdi_over_pcie = resource_p->wdi_over_pcie; + ipa3_ctx->ipa3_active_clients_logging.log_rdy = false; + ipa3_ctx->ipa_config_is_mhi = resource_p->ipa_mhi_dynamic_config; + ipa3_ctx->mhi_evid_limits[0] = resource_p->mhi_evid_limits[0]; + ipa3_ctx->mhi_evid_limits[1] = resource_p->mhi_evid_limits[1]; + + WARN(ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL, + "Non NORMAL IPA HW mode, is this emulation platform ?"); + + if (resource_p->ipa_tz_unlock_reg) { + ipa3_ctx->ipa_tz_unlock_reg_num = + resource_p->ipa_tz_unlock_reg_num; + ipa3_ctx->ipa_tz_unlock_reg = kcalloc( + ipa3_ctx->ipa_tz_unlock_reg_num, + sizeof(*ipa3_ctx->ipa_tz_unlock_reg), + GFP_KERNEL); + if (ipa3_ctx->ipa_tz_unlock_reg == NULL) { + result = -ENOMEM; + goto fail_tz_unlock_reg; + } + for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) { + ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr = + resource_p->ipa_tz_unlock_reg[i].reg_addr; + ipa3_ctx->ipa_tz_unlock_reg[i].size = + resource_p->ipa_tz_unlock_reg[i].size; + } + + /* unlock registers for uc */ + result = ipa3_tz_unlock_reg(ipa3_ctx->ipa_tz_unlock_reg, + ipa3_ctx->ipa_tz_unlock_reg_num); + if (result) + IPAERR("Failed to unlock memory region using TZ\n"); + } + + /* default aggregation parameters */ + ipa3_ctx->aggregation_type = IPA_MBIM_16; + ipa3_ctx->aggregation_byte_limit = 1; + ipa3_ctx->aggregation_time_limit = 0; + + ipa3_ctx->ctrl = kzalloc(sizeof(*ipa3_ctx->ctrl), GFP_KERNEL); + if (!ipa3_ctx->ctrl) { + result = -ENOMEM; + goto fail_mem_ctrl; + } + result = ipa3_controller_static_bind(ipa3_ctx->ctrl, + ipa3_ctx->ipa_hw_type); + if (result) { + IPAERR("fail to static bind IPA ctrl\n"); + result = -EFAULT; + goto fail_bind; + } + + result = ipa3_init_mem_partition(ipa3_ctx->ipa_hw_type); + if (result) { + IPAERR(":ipa3_init_mem_partition failed\n"); + result = -ENODEV; + goto fail_init_mem_partition; + } + + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL && + ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) { + ipa3_ctx->ctrl->msm_bus_data_ptr = + msm_bus_cl_get_pdata(ipa3_ctx->master_pdev); + if (ipa3_ctx->ctrl->msm_bus_data_ptr == NULL) { + IPAERR("failed to get bus scaling\n"); + goto fail_bus_reg; + } + IPADBG("Use bus scaling info from device tree #usecases=%d\n", + ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases); + + /* get BUS handle */ + ipa3_ctx->ipa_bus_hdl = + msm_bus_scale_register_client( + ipa3_ctx->ctrl->msm_bus_data_ptr); + if (!ipa3_ctx->ipa_bus_hdl) { + IPAERR("fail to register with bus mgr!\n"); + result = -ENODEV; + goto fail_bus_reg; + } + } + + /* get IPA clocks */ + result = ipa3_get_clks(&ipa3_ctx->master_pdev->dev); + if (result) + goto fail_clk; + + /* init active_clients_log after getting ipa-clk */ + result = ipa3_active_clients_log_init(); + if (result) + goto fail_init_active_client; + + /* Enable ipa3_ctx->enable_clock_scaling */ + ipa3_ctx->enable_clock_scaling = 1; + /* vote for svs2 on bootup */ + ipa3_ctx->curr_ipa_clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2; + + /* Enable ipa3_ctx->enable_napi_chain */ + ipa3_ctx->enable_napi_chain = 1; + + /* enable IPA clocks explicitly to allow the initialization */ + ipa3_enable_clks(); + + /* setup IPA register access */ + IPADBG("Mapping 0x%x\n", resource_p->ipa_mem_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst); + ipa3_ctx->mmio = ioremap(resource_p->ipa_mem_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst, + resource_p->ipa_mem_size); + if (!ipa3_ctx->mmio) { + IPAERR(":ipa-base ioremap err\n"); + result = -EFAULT; + goto fail_remap; + } + + IPADBG( + "base(0x%x)+offset(0x%x)=(0x%x) mapped to (%pK) with len (0x%x)\n", + resource_p->ipa_mem_base, + ipa3_ctx->ctrl->ipa_reg_base_ofst, + resource_p->ipa_mem_base + ipa3_ctx->ctrl->ipa_reg_base_ofst, + ipa3_ctx->mmio, + resource_p->ipa_mem_size); + + /* + * Since we now know where the transport's registers live, + * let's set up access to them. This is done since subseqent + * functions, that deal with the transport, require the + * access. + */ + if (gsi_map_base( + ipa3_res.transport_mem_base, + ipa3_res.transport_mem_size) != 0) { + IPAERR("Allocation of gsi base failed\n"); + result = -EFAULT; + goto fail_gsi_map; + } + + mutex_init(&ipa3_ctx->ipa3_active_clients.mutex); + + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE"); + ipa3_active_clients_log_inc(&log_info, false); + ipa3_ctx->q6_proxy_clk_vote_valid = true; + ipa3_ctx->q6_proxy_clk_vote_cnt = 1; + + /*Updating the proxy vote cnt 1 */ + atomic_set(&ipa3_ctx->ipa3_active_clients.cnt, 1); + + /* Create workqueues for power management */ + ipa3_ctx->power_mgmt_wq = + create_singlethread_workqueue("ipa_power_mgmt"); + if (!ipa3_ctx->power_mgmt_wq) { + IPAERR("failed to create power mgmt wq\n"); + result = -ENOMEM; + goto fail_init_hw; + } + + ipa3_ctx->transport_power_mgmt_wq = + create_singlethread_workqueue("transport_power_mgmt"); + if (!ipa3_ctx->transport_power_mgmt_wq) { + IPAERR("failed to create transport power mgmt wq\n"); + result = -ENOMEM; + goto fail_create_transport_wq; + } + + mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex); + + /* init the lookaside cache */ + ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT", + sizeof(struct ipa3_flt_entry), 0, 0, NULL); + if (!ipa3_ctx->flt_rule_cache) { + IPAERR(":ipa flt cache create failed\n"); + result = -ENOMEM; + goto fail_flt_rule_cache; + } + ipa3_ctx->rt_rule_cache = kmem_cache_create("IPA_RT", + sizeof(struct ipa3_rt_entry), 0, 0, NULL); + if (!ipa3_ctx->rt_rule_cache) { + IPAERR(":ipa rt cache create failed\n"); + result = -ENOMEM; + goto fail_rt_rule_cache; + } + ipa3_ctx->hdr_cache = kmem_cache_create("IPA_HDR", + sizeof(struct ipa3_hdr_entry), 0, 0, NULL); + if (!ipa3_ctx->hdr_cache) { + IPAERR(":ipa hdr cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_cache; + } + ipa3_ctx->hdr_offset_cache = + kmem_cache_create("IPA_HDR_OFFSET", + sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL); + if (!ipa3_ctx->hdr_offset_cache) { + IPAERR(":ipa hdr off cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_offset_cache; + } + ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX", + sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL); + if (!ipa3_ctx->hdr_proc_ctx_cache) { + IPAERR(":ipa hdr proc ctx cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_proc_ctx_cache; + } + ipa3_ctx->hdr_proc_ctx_offset_cache = + kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET", + sizeof(struct ipa3_hdr_proc_ctx_offset_entry), 0, 0, NULL); + if (!ipa3_ctx->hdr_proc_ctx_offset_cache) { + IPAERR(":ipa hdr proc ctx off cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_proc_ctx_offset_cache; + } + ipa3_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL", + sizeof(struct ipa3_rt_tbl), 0, 0, NULL); + if (!ipa3_ctx->rt_tbl_cache) { + IPAERR(":ipa rt tbl cache create failed\n"); + result = -ENOMEM; + goto fail_rt_tbl_cache; + } + ipa3_ctx->tx_pkt_wrapper_cache = + kmem_cache_create("IPA_TX_PKT_WRAPPER", + sizeof(struct ipa3_tx_pkt_wrapper), 0, 0, NULL); + if (!ipa3_ctx->tx_pkt_wrapper_cache) { + IPAERR(":ipa tx pkt wrapper cache create failed\n"); + result = -ENOMEM; + goto fail_tx_pkt_wrapper_cache; + } + ipa3_ctx->rx_pkt_wrapper_cache = + kmem_cache_create("IPA_RX_PKT_WRAPPER", + sizeof(struct ipa3_rx_pkt_wrapper), 0, 0, NULL); + if (!ipa3_ctx->rx_pkt_wrapper_cache) { + IPAERR(":ipa rx pkt wrapper cache create failed\n"); + result = -ENOMEM; + goto fail_rx_pkt_wrapper_cache; + } + + /* init the various list heads */ + INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list); + for (i = 0; i < IPA_HDR_BIN_MAX; i++) { + INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]); + INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]); + } + INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list); + for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) { + INIT_LIST_HEAD( + &ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i]); + INIT_LIST_HEAD( + &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i]); + } + INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list); + idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids); + INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list); + idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids); + + rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4]; + INIT_LIST_HEAD(&rset->head_rt_tbl_list); + idr_init(&rset->rule_ids); + rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6]; + INIT_LIST_HEAD(&rset->head_rt_tbl_list); + idr_init(&rset->rule_ids); + + INIT_LIST_HEAD(&ipa3_ctx->intf_list); + INIT_LIST_HEAD(&ipa3_ctx->msg_list); + INIT_LIST_HEAD(&ipa3_ctx->pull_msg_list); + init_waitqueue_head(&ipa3_ctx->msg_waitq); + mutex_init(&ipa3_ctx->msg_lock); + + /* store wlan client-connect-msg-list */ + INIT_LIST_HEAD(&ipa3_ctx->msg_wlan_client_list); + mutex_init(&ipa3_ctx->msg_wlan_client_lock); + + mutex_init(&ipa3_ctx->lock); + mutex_init(&ipa3_ctx->q6_proxy_clk_vote_mutex); + mutex_init(&ipa3_ctx->ipa_cne_evt_lock); + + idr_init(&ipa3_ctx->ipa_idr); + spin_lock_init(&ipa3_ctx->idr_lock); + + /* wlan related member */ + memset(&ipa3_ctx->wc_memb, 0, sizeof(ipa3_ctx->wc_memb)); + spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock); + spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock); + INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list); + + ipa3_ctx->cdev.class = class_create(THIS_MODULE, DRV_NAME); + + result = alloc_chrdev_region(&ipa3_ctx->cdev.dev_num, 0, 1, DRV_NAME); + if (result) { + IPAERR("alloc_chrdev_region err\n"); + result = -ENODEV; + goto fail_alloc_chrdev_region; + } + + ipa3_ctx->cdev.dev = device_create(ipa3_ctx->cdev.class, NULL, + ipa3_ctx->cdev.dev_num, ipa3_ctx, DRV_NAME); + if (IS_ERR(ipa3_ctx->cdev.dev)) { + IPAERR(":device_create err.\n"); + result = -ENODEV; + goto fail_device_create; + } + + /* Create a wakeup source. */ + wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS"); + spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock); + + /* Initialize Power Management framework */ + if (ipa3_ctx->use_ipa_pm) { + result = ipa_pm_init(&ipa3_res.pm_init); + if (result) { + IPAERR("IPA PM initialization failed (%d)\n", -result); + result = -ENODEV; + goto fail_ipa_rm_init; + } + IPADBG("IPA resource manager initialized"); + } else { + result = ipa_rm_initialize(); + if (result) { + IPAERR("RM initialization failed (%d)\n", -result); + result = -ENODEV; + goto fail_ipa_rm_init; + } + IPADBG("IPA resource manager initialized"); + + result = ipa3_create_apps_resource(); + if (result) { + IPAERR("Failed to create APPS_CONS resource\n"); + result = -ENODEV; + goto fail_create_apps_resource; + } + } + + INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list); + + init_completion(&ipa3_ctx->init_completion_obj); + init_completion(&ipa3_ctx->uc_loaded_completion_obj); + + result = ipa3_dma_setup(); + if (result) { + IPAERR("Failed to setup IPA DMA\n"); + result = -ENODEV; + goto fail_ipa_dma_setup; + } + + /* + * We can't register the GSI driver yet, as it expects + * the GSI FW to be up and running before the registration. + * + * For IPA3.0 and the emulation system, the GSI configuration + * is done by the GSI driver. + * + * For IPA3.1 (and on), the GSI configuration is done by TZ. + */ + if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0 || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + result = ipa3_gsi_pre_fw_load_init(); + if (result) { + IPAERR("gsi pre FW loading config failed\n"); + result = -ENODEV; + goto fail_gsi_pre_fw_load_init; + } + } + + cdev = &ipa3_ctx->cdev.cdev; + cdev_init(cdev, &ipa3_drv_fops); + cdev->owner = THIS_MODULE; + cdev->ops = &ipa3_drv_fops; /* from LDD3 */ + + result = cdev_add(cdev, ipa3_ctx->cdev.dev_num, 1); + if (result) { + IPAERR(":cdev_add err=%d\n", -result); + result = -ENODEV; + goto fail_cdev_add; + } + IPADBG("ipa cdev added successful. major:%d minor:%d\n", + MAJOR(ipa3_ctx->cdev.dev_num), + MINOR(ipa3_ctx->cdev.dev_num)); + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_1) { + result = ipa_odl_init(); + if (result) { + IPADBG("Error: ODL init fialed\n"); + result = -ENODEV; + goto fail_cdev_add; + } + } + + /* + * for IPA 4.0 offline charge is not needed and we need to prevent + * power collapse until IPA uC is loaded. + */ + + /* proxy vote for modem is added in ipa3_post_init() phase */ + if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0) + ipa3_proxy_clk_unvote(); + return 0; +fail_cdev_add: +fail_gsi_pre_fw_load_init: + ipa3_dma_shutdown(); +fail_ipa_dma_setup: + if (ipa3_ctx->use_ipa_pm) + ipa_pm_destroy(); + else + ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS); +fail_create_apps_resource: + if (!ipa3_ctx->use_ipa_pm) + ipa_rm_exit(); +fail_ipa_rm_init: + device_destroy(ipa3_ctx->cdev.class, ipa3_ctx->cdev.dev_num); +fail_device_create: + unregister_chrdev_region(ipa3_ctx->cdev.dev_num, 1); +fail_alloc_chrdev_region: + idr_destroy(&ipa3_ctx->ipa_idr); + rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6]; + idr_destroy(&rset->rule_ids); + rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4]; + idr_destroy(&rset->rule_ids); + idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids); + idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids); + kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache); +fail_rx_pkt_wrapper_cache: + kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache); +fail_tx_pkt_wrapper_cache: + kmem_cache_destroy(ipa3_ctx->rt_tbl_cache); +fail_rt_tbl_cache: + kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache); +fail_hdr_proc_ctx_offset_cache: + kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache); +fail_hdr_proc_ctx_cache: + kmem_cache_destroy(ipa3_ctx->hdr_offset_cache); +fail_hdr_offset_cache: + kmem_cache_destroy(ipa3_ctx->hdr_cache); +fail_hdr_cache: + kmem_cache_destroy(ipa3_ctx->rt_rule_cache); +fail_rt_rule_cache: + kmem_cache_destroy(ipa3_ctx->flt_rule_cache); +fail_flt_rule_cache: + destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq); +fail_create_transport_wq: + destroy_workqueue(ipa3_ctx->power_mgmt_wq); +fail_init_hw: + gsi_unmap_base(); +fail_gsi_map: + iounmap(ipa3_ctx->mmio); +fail_remap: + ipa3_disable_clks(); + ipa3_active_clients_log_destroy(); +fail_init_active_client: + if (ipa3_clk) + clk_put(ipa3_clk); + ipa3_clk = NULL; +fail_clk: + if (ipa3_ctx->ipa_bus_hdl) + msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl); +fail_bus_reg: + if (ipa3_ctx->ctrl->msm_bus_data_ptr) + msm_bus_cl_clear_pdata(ipa3_ctx->ctrl->msm_bus_data_ptr); +fail_init_mem_partition: +fail_bind: + kfree(ipa3_ctx->ctrl); +fail_mem_ctrl: + kfree(ipa3_ctx->ipa_tz_unlock_reg); +fail_tz_unlock_reg: + if (ipa3_ctx->logbuf) + ipc_log_context_destroy(ipa3_ctx->logbuf); + kfree(ipa3_ctx); + ipa3_ctx = NULL; +fail_mem_ctx: + return result; +} + +static int get_ipa_dts_pm_info(struct platform_device *pdev, + struct ipa3_plat_drv_res *ipa_drv_res) +{ + int result; + int i, j; + + ipa_drv_res->use_ipa_pm = of_property_read_bool(pdev->dev.of_node, + "qcom,use-ipa-pm"); + IPADBG("use_ipa_pm=%d\n", ipa_drv_res->use_ipa_pm); + if (!ipa_drv_res->use_ipa_pm) + return 0; + + result = of_property_read_u32(pdev->dev.of_node, + "qcom,msm-bus,num-cases", + &ipa_drv_res->pm_init.threshold_size); + /* No vote is ignored */ + ipa_drv_res->pm_init.threshold_size -= 2; + if (result || ipa_drv_res->pm_init.threshold_size > + IPA_PM_THRESHOLD_MAX) { + IPAERR("invalid property qcom,msm-bus,num-cases %d\n", + ipa_drv_res->pm_init.threshold_size); + return -EFAULT; + } + + result = of_property_read_u32_array(pdev->dev.of_node, + "qcom,throughput-threshold", + ipa_drv_res->pm_init.default_threshold, + ipa_drv_res->pm_init.threshold_size); + if (result) { + IPAERR("failed to read qcom,throughput-thresholds\n"); + return -EFAULT; + } + + result = of_property_count_strings(pdev->dev.of_node, + "qcom,scaling-exceptions"); + if (result < 0) { + IPADBG("no exception list for ipa pm\n"); + result = 0; + } + + if (result % (ipa_drv_res->pm_init.threshold_size + 1)) { + IPAERR("failed to read qcom,scaling-exceptions\n"); + return -EFAULT; + } + + ipa_drv_res->pm_init.exception_size = result / + (ipa_drv_res->pm_init.threshold_size + 1); + if (ipa_drv_res->pm_init.exception_size >= + IPA_PM_EXCEPTION_MAX) { + IPAERR("exception list larger then max %d\n", + ipa_drv_res->pm_init.exception_size); + return -EFAULT; + } + + for (i = 0; i < ipa_drv_res->pm_init.exception_size; i++) { + struct ipa_pm_exception *ex = ipa_drv_res->pm_init.exceptions; + + result = of_property_read_string_index(pdev->dev.of_node, + "qcom,scaling-exceptions", + i * ipa_drv_res->pm_init.threshold_size, + &ex[i].usecase); + if (result) { + IPAERR("failed to read qcom,scaling-exceptions"); + return -EFAULT; + } + + for (j = 0; j < ipa_drv_res->pm_init.threshold_size; j++) { + const char *str; + + result = of_property_read_string_index( + pdev->dev.of_node, + "qcom,scaling-exceptions", + i * ipa_drv_res->pm_init.threshold_size + j + 1, + &str); + if (result) { + IPAERR("failed to read qcom,scaling-exceptions" + ); + return -EFAULT; + } + + if (kstrtou32(str, 0, &ex[i].threshold[j])) { + IPAERR("error str=%s\n", str); + return -EFAULT; + } + } + } + + return 0; +} + +static int get_ipa_dts_configuration(struct platform_device *pdev, + struct ipa3_plat_drv_res *ipa_drv_res) +{ + int i, result, pos; + struct resource *resource; + u32 *ipa_tz_unlock_reg; + int elem_num; + u32 mhi_evid_limits[2]; + + /* initialize ipa3_res */ + ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST; + ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE; + ipa_drv_res->ipa_hw_type = 0; + ipa_drv_res->ipa3_hw_mode = 0; + ipa_drv_res->modem_cfg_emb_pipe_flt = false; + ipa_drv_res->ipa_wdi2 = false; + ipa_drv_res->ipa_wdi2_over_gsi = false; + ipa_drv_res->ipa_mhi_dynamic_config = false; + ipa_drv_res->use_64_bit_dma_mask = false; + ipa_drv_res->use_bw_vote = false; + ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ; + ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ; + ipa_drv_res->apply_rg10_wa = false; + ipa_drv_res->gsi_ch20_wa = false; + ipa_drv_res->ipa_tz_unlock_reg_num = 0; + ipa_drv_res->ipa_tz_unlock_reg = NULL; + ipa_drv_res->mhi_evid_limits[0] = IPA_MHI_GSI_EVENT_RING_ID_START; + ipa_drv_res->mhi_evid_limits[1] = IPA_MHI_GSI_EVENT_RING_ID_END; + ipa_drv_res->ipa_fltrt_not_hashable = false; + + /* Get IPA HW Version */ + result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver", + &ipa_drv_res->ipa_hw_type); + if ((result) || (ipa_drv_res->ipa_hw_type == 0)) { + IPAERR(":get resource failed for ipa-hw-ver\n"); + return -ENODEV; + } + IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type); + + if (ipa_drv_res->ipa_hw_type < IPA_HW_v3_0) { + IPAERR(":IPA version below 3.0 not supported\n"); + return -ENODEV; + } + + if (ipa_drv_res->ipa_hw_type >= IPA_HW_MAX) { + IPAERR(":IPA version is greater than the MAX\n"); + return -ENODEV; + } + + /* Get IPA HW mode */ + result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode", + &ipa_drv_res->ipa3_hw_mode); + if (result) + IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n"); + else + IPADBG(": found ipa_drv_res->ipa3_hw_mode = %d", + ipa_drv_res->ipa3_hw_mode); + + /* Get IPA WAN / LAN RX pool size */ + result = of_property_read_u32(pdev->dev.of_node, + "qcom,wan-rx-ring-size", + &ipa_drv_res->wan_rx_ring_size); + if (result) + IPADBG("using default for wan-rx-ring-size = %u\n", + ipa_drv_res->wan_rx_ring_size); + else + IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u", + ipa_drv_res->wan_rx_ring_size); + + result = of_property_read_u32(pdev->dev.of_node, + "qcom,lan-rx-ring-size", + &ipa_drv_res->lan_rx_ring_size); + if (result) + IPADBG("using default for lan-rx-ring-size = %u\n", + ipa_drv_res->lan_rx_ring_size); + else + IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u", + ipa_drv_res->lan_rx_ring_size); + + ipa_drv_res->use_ipa_teth_bridge = + of_property_read_bool(pdev->dev.of_node, + "qcom,use-ipa-tethering-bridge"); + IPADBG(": using ipa teth bridge = %s", + ipa_drv_res->use_ipa_teth_bridge + ? "True" : "False"); + + ipa_drv_res->ipa_mhi_dynamic_config = + of_property_read_bool(pdev->dev.of_node, + "qcom,use-ipa-in-mhi-mode"); + IPADBG(": ipa_mhi_dynamic_config (%s)\n", + ipa_drv_res->ipa_mhi_dynamic_config + ? "True" : "False"); + + ipa_drv_res->modem_cfg_emb_pipe_flt = + of_property_read_bool(pdev->dev.of_node, + "qcom,modem-cfg-emb-pipe-flt"); + IPADBG(": modem configure embedded pipe filtering = %s\n", + ipa_drv_res->modem_cfg_emb_pipe_flt + ? "True" : "False"); + ipa_drv_res->ipa_wdi2_over_gsi = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-wdi2_over_gsi"); + IPADBG(": WDI-2.0 over gsi= %s\n", + ipa_drv_res->ipa_wdi2_over_gsi + ? "True" : "False"); + + ipa_drv_res->ipa_wdi2 = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-wdi2"); + IPADBG(": WDI-2.0 = %s\n", + ipa_drv_res->ipa_wdi2 + ? "True" : "False"); + + ipa_drv_res->ipa_fltrt_not_hashable = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-fltrt-not-hashable"); + IPADBG(": IPA filter/route rule hashable = %s\n", + ipa_drv_res->ipa_fltrt_not_hashable + ? "True" : "False"); + + ipa_drv_res->use_64_bit_dma_mask = + of_property_read_bool(pdev->dev.of_node, + "qcom,use-64-bit-dma-mask"); + IPADBG(": use_64_bit_dma_mask = %s\n", + ipa_drv_res->use_64_bit_dma_mask + ? "True" : "False"); + + ipa_drv_res->use_bw_vote = + of_property_read_bool(pdev->dev.of_node, + "qcom,bandwidth-vote-for-ipa"); + IPADBG(": use_bw_vote = %s\n", + ipa_drv_res->use_bw_vote + ? "True" : "False"); + + ipa_drv_res->skip_uc_pipe_reset = + of_property_read_bool(pdev->dev.of_node, + "qcom,skip-uc-pipe-reset"); + IPADBG(": skip uC pipe reset = %s\n", + ipa_drv_res->skip_uc_pipe_reset + ? "True" : "False"); + + ipa_drv_res->tethered_flow_control = + of_property_read_bool(pdev->dev.of_node, + "qcom,tethered-flow-control"); + IPADBG(": Use apps based flow control = %s\n", + ipa_drv_res->tethered_flow_control + ? "True" : "False"); + + /* Get IPA wrapper address */ + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ipa-base"); + if (!resource) { + IPAERR(":get resource failed for ipa-base!\n"); + return -ENODEV; + } + ipa_drv_res->ipa_mem_base = resource->start; + ipa_drv_res->ipa_mem_size = resource_size(resource); + IPADBG(": ipa-base = 0x%x, size = 0x%x\n", + ipa_drv_res->ipa_mem_base, + ipa_drv_res->ipa_mem_size); + + smmu_info.ipa_base = ipa_drv_res->ipa_mem_base; + smmu_info.ipa_size = ipa_drv_res->ipa_mem_size; + + /* Get IPA GSI address */ + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "gsi-base"); + if (!resource) { + IPAERR(":get resource failed for gsi-base\n"); + return -ENODEV; + } + ipa_drv_res->transport_mem_base = resource->start; + ipa_drv_res->transport_mem_size = resource_size(resource); + IPADBG(": gsi-base = 0x%x, size = 0x%x\n", + ipa_drv_res->transport_mem_base, + ipa_drv_res->transport_mem_size); + + /* Get IPA GSI IRQ number */ + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + "gsi-irq"); + if (!resource) { + IPAERR(":get resource failed for gsi-irq\n"); + return -ENODEV; + } + ipa_drv_res->transport_irq = resource->start; + IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq); + + /* Get IPA pipe mem start ofst */ + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ipa-pipe-mem"); + if (!resource) { + IPADBG(":not using pipe memory - resource nonexisting\n"); + } else { + ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start; + ipa_drv_res->ipa_pipe_mem_size = resource_size(resource); + IPADBG(":using pipe memory - at 0x%x of size 0x%x\n", + ipa_drv_res->ipa_pipe_mem_start_ofst, + ipa_drv_res->ipa_pipe_mem_size); + } + + /* Get IPA IRQ number */ + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + "ipa-irq"); + if (!resource) { + IPAERR(":get resource failed for ipa-irq\n"); + return -ENODEV; + } + ipa_drv_res->ipa_irq = resource->start; + IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq); + + result = of_property_read_u32(pdev->dev.of_node, "qcom,ee", + &ipa_drv_res->ee); + if (result) + ipa_drv_res->ee = 0; + IPADBG(":ee = %u\n", ipa_drv_res->ee); + + ipa_drv_res->apply_rg10_wa = + of_property_read_bool(pdev->dev.of_node, + "qcom,use-rg10-limitation-mitigation"); + IPADBG(": Use Register Group 10 limitation mitigation = %s\n", + ipa_drv_res->apply_rg10_wa + ? "True" : "False"); + + ipa_drv_res->gsi_ch20_wa = + of_property_read_bool(pdev->dev.of_node, + "qcom,do-not-use-ch-gsi-20"); + IPADBG(": GSI CH 20 WA is = %s\n", + ipa_drv_res->gsi_ch20_wa + ? "Needed" : "Not needed"); + + elem_num = of_property_count_elems_of_size(pdev->dev.of_node, + "qcom,mhi-event-ring-id-limits", sizeof(u32)); + + if (elem_num == 2) { + if (of_property_read_u32_array(pdev->dev.of_node, + "qcom,mhi-event-ring-id-limits", mhi_evid_limits, 2)) { + IPAERR("failed to read mhi event ring id limits\n"); + return -EFAULT; + } + if (mhi_evid_limits[0] > mhi_evid_limits[1]) { + IPAERR("mhi event ring id low limit > high limit\n"); + return -EFAULT; + } + ipa_drv_res->mhi_evid_limits[0] = mhi_evid_limits[0]; + ipa_drv_res->mhi_evid_limits[1] = mhi_evid_limits[1]; + IPADBG(": mhi-event-ring-id-limits start=%u end=%u\n", + mhi_evid_limits[0], mhi_evid_limits[1]); + } else { + if (elem_num > 0) { + IPAERR("Invalid mhi event ring id limits number %d\n", + elem_num); + return -EINVAL; + } + IPADBG("use default mhi evt ring id limits start=%u end=%u\n", + ipa_drv_res->mhi_evid_limits[0], + ipa_drv_res->mhi_evid_limits[1]); + } + + elem_num = of_property_count_elems_of_size(pdev->dev.of_node, + "qcom,ipa-tz-unlock-reg", sizeof(u32)); + + if (elem_num > 0 && elem_num % 2 == 0) { + ipa_drv_res->ipa_tz_unlock_reg_num = elem_num / 2; + + ipa_tz_unlock_reg = kcalloc(elem_num, sizeof(u32), GFP_KERNEL); + if (ipa_tz_unlock_reg == NULL) + return -ENOMEM; + + ipa_drv_res->ipa_tz_unlock_reg = kcalloc( + ipa_drv_res->ipa_tz_unlock_reg_num, + sizeof(*ipa_drv_res->ipa_tz_unlock_reg), + GFP_KERNEL); + if (ipa_drv_res->ipa_tz_unlock_reg == NULL) { + kfree(ipa_tz_unlock_reg); + return -ENOMEM; + } + + if (of_property_read_u32_array(pdev->dev.of_node, + "qcom,ipa-tz-unlock-reg", ipa_tz_unlock_reg, + elem_num)) { + IPAERR("failed to read register addresses\n"); + kfree(ipa_tz_unlock_reg); + kfree(ipa_drv_res->ipa_tz_unlock_reg); + return -EFAULT; + } + + pos = 0; + for (i = 0; i < ipa_drv_res->ipa_tz_unlock_reg_num; i++) { + ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr = + ipa_tz_unlock_reg[pos++]; + ipa_drv_res->ipa_tz_unlock_reg[i].size = + ipa_tz_unlock_reg[pos++]; + IPADBG("tz unlock reg %d: addr 0x%pa size %llu\n", i, + &ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr, + ipa_drv_res->ipa_tz_unlock_reg[i].size); + } + kfree(ipa_tz_unlock_reg); + } + + /* get IPA PM related information */ + result = get_ipa_dts_pm_info(pdev, ipa_drv_res); + if (result) { + IPAERR("failed to get pm info from dts %d\n", result); + return result; + } + + ipa_drv_res->wdi_over_pcie = + of_property_read_bool(pdev->dev.of_node, + "qcom,wlan-ce-db-over-pcie"); + IPADBG("Is wdi_over_pcie ? (%s)\n", + ipa_drv_res->wdi_over_pcie ? "Yes":"No"); + + /* + * If we're on emulator, get its interrupt controller's mem + * start and size + */ + if (ipa_drv_res->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + resource = platform_get_resource_byname( + pdev, IORESOURCE_MEM, "intctrl-base"); + if (!resource) { + IPAERR(":Can't find intctrl-base resource\n"); + return -ENODEV; + } + ipa_drv_res->emulator_intcntrlr_mem_base = + resource->start; + ipa_drv_res->emulator_intcntrlr_mem_size = + resource_size(resource); + IPADBG(":using intctrl-base at 0x%x of size 0x%x\n", + ipa_drv_res->emulator_intcntrlr_mem_base, + ipa_drv_res->emulator_intcntrlr_mem_size); + } + + return 0; +} + +static int ipa_smmu_wlan_cb_probe(struct device *dev) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN); + int atomic_ctx = 1; + int fast = 1; + int bypass = 1; + int ret; + u32 add_map_size; + const u32 *add_map; + int i; + + IPADBG("sub pdev=%pK\n", dev); + + if (!smmu_info.present[IPA_SMMU_CB_WLAN]) { + IPAERR("WLAN SMMU is disabled\n"); + return 0; + } + + cb->dev = dev; + cb->iommu = iommu_domain_alloc(dev->bus); + if (!cb->iommu) { + IPAERR("could not alloc iommu domain\n"); + /* assume this failure is because iommu driver is not ready */ + return -EPROBE_DEFER; + } + cb->valid = true; + + if (of_property_read_bool(dev->of_node, "qcom,smmu-s1-bypass") || + ipa3_ctx->ipa_config_is_mhi) { + smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN] = true; + ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN] = true; + + if (iommu_domain_set_attr(cb->iommu, + DOMAIN_ATTR_S1_BYPASS, + &bypass)) { + IPAERR("couldn't set bypass\n"); + cb->valid = false; + return -EIO; + } + IPADBG("WLAN SMMU S1 BYPASS\n"); + } else { + smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN] = false; + ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN] = false; + + if (iommu_domain_set_attr(cb->iommu, + DOMAIN_ATTR_ATOMIC, + &atomic_ctx)) { + IPAERR("couldn't disable coherent HTW\n"); + cb->valid = false; + return -EIO; + } + IPADBG(" WLAN SMMU ATTR ATOMIC\n"); + + if (smmu_info.fast_map) { + if (iommu_domain_set_attr(cb->iommu, + DOMAIN_ATTR_FAST, + &fast)) { + IPAERR("couldn't set fast map\n"); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU fast map set\n"); + } + } + + pr_info("IPA smmu_info.s1_bypass_arr[WLAN]=%d smmu_info.fast_map=%d\n", + smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN], smmu_info.fast_map); + + ret = iommu_attach_device(cb->iommu, dev); + if (ret) { + IPAERR("could not attach device ret=%d\n", ret); + cb->valid = false; + return ret; + } + /* MAP ipa-uc ram */ + add_map = of_get_property(dev->of_node, + "qcom,additional-mapping", &add_map_size); + if (add_map) { + /* mapping size is an array of 3-tuple of u32 */ + if (add_map_size % (3 * sizeof(u32))) { + IPAERR("wrong additional mapping format\n"); + cb->valid = false; + return -EFAULT; + } + + /* iterate of each entry of the additional mapping array */ + for (i = 0; i < add_map_size / sizeof(u32); i += 3) { + u32 iova = be32_to_cpu(add_map[i]); + u32 pa = be32_to_cpu(add_map[i + 1]); + u32 size = be32_to_cpu(add_map[i + 2]); + unsigned long iova_p; + phys_addr_t pa_p; + u32 size_p; + + IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, + iova_p, pa_p, size_p); + IPADBG("mapping 0x%lx to 0x%pa size %d\n", + iova_p, &pa_p, size_p); + ipa3_iommu_map(cb->iommu, + iova_p, pa_p, size_p, + IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); + } + } + return 0; +} + +static int ipa_smmu_uc_cb_probe(struct device *dev) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC); + int atomic_ctx = 1; + int bypass = 1; + int fast = 1; + int ret; + u32 iova_ap_mapping[2]; + + IPADBG("UC CB PROBE sub pdev=%pK\n", dev); + + if (!smmu_info.present[IPA_SMMU_CB_UC]) { + IPAERR("UC SMMU is disabled\n"); + return 0; + } + + ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping", + iova_ap_mapping, 2); + if (ret) { + IPAERR("Fail to read UC start/size iova addresses\n"); + return ret; + } + cb->va_start = iova_ap_mapping[0]; + cb->va_size = iova_ap_mapping[1]; + cb->va_end = cb->va_start + cb->va_size; + IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size); + + if (smmu_info.use_64_bit_dma_mask) { + if (dma_set_mask(dev, DMA_BIT_MASK(64)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) { + IPAERR("DMA set 64bit mask failed\n"); + return -EOPNOTSUPP; + } + } else { + if (dma_set_mask(dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { + IPAERR("DMA set 32bit mask failed\n"); + return -EOPNOTSUPP; + } + } + IPADBG("UC CB PROBE=%pK create IOMMU mapping\n", dev); + + cb->dev = dev; + cb->mapping = __depr_arm_iommu_create_mapping(dev->bus, + cb->va_start, cb->va_size); + if (IS_ERR_OR_NULL(cb->mapping)) { + IPADBG("Fail to create mapping\n"); + /* assume this failure is because iommu driver is not ready */ + return -EPROBE_DEFER; + } + IPADBG("SMMU mapping created\n"); + cb->valid = true; + + IPADBG("UC CB PROBE sub pdev=%pK set attribute\n", dev); + + if (of_property_read_bool(dev->of_node, "qcom,smmu-s1-bypass") || + ipa3_ctx->ipa_config_is_mhi) { + smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC] = true; + ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = true; + + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_S1_BYPASS, + &bypass)) { + IPAERR("couldn't set bypass\n"); + __depr_arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("UC SMMU S1 BYPASS\n"); + } else { + smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC] = false; + ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = false; + + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_ATOMIC, + &atomic_ctx)) { + IPAERR("couldn't set domain as atomic\n"); + __depr_arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU atomic set\n"); + + if (smmu_info.fast_map) { + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_FAST, + &fast)) { + IPAERR("couldn't set fast map\n"); + __depr_arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU fast map set\n"); + } + } + + pr_info("IPA smmu_info.s1_bypass_arr[UC]=%d smmu_info.fast_map=%d\n", + smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC], smmu_info.fast_map); + + IPADBG("UC CB PROBE sub pdev=%pK attaching IOMMU device\n", dev); + ret = __depr_arm_iommu_attach_device(cb->dev, cb->mapping); + if (ret) { + IPAERR("could not attach device ret=%d\n", ret); + __depr_arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return ret; + } + + cb->next_addr = cb->va_end; + ipa3_ctx->uc_pdev = dev; + + return 0; +} + +static int ipa_smmu_ap_cb_probe(struct device *dev) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP); + int result; + int atomic_ctx = 1; + int fast = 1; + int bypass = 1; + u32 iova_ap_mapping[2]; + u32 add_map_size; + const u32 *add_map; + void *smem_addr; + size_t smem_size; + int ret; + int i; + unsigned long iova_p; + phys_addr_t pa_p; + u32 size_p; + phys_addr_t iova; + phys_addr_t pa; + + IPADBG("AP CB probe: sub pdev=%pK\n", dev); + + if (!smmu_info.present[IPA_SMMU_CB_AP]) { + IPAERR("AP SMMU is disabled"); + return 0; + } + + result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping", + iova_ap_mapping, 2); + if (result) { + IPAERR("Fail to read AP start/size iova addresses\n"); + return result; + } + cb->va_start = iova_ap_mapping[0]; + cb->va_size = iova_ap_mapping[1]; + cb->va_end = cb->va_start + cb->va_size; + IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size); + + if (smmu_info.use_64_bit_dma_mask) { + if (dma_set_mask(dev, DMA_BIT_MASK(64)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) { + IPAERR("DMA set 64bit mask failed\n"); + return -EOPNOTSUPP; + } + } else { + if (dma_set_mask(dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { + IPAERR("DMA set 32bit mask failed\n"); + return -EOPNOTSUPP; + } + } + + cb->dev = dev; + cb->mapping = __depr_arm_iommu_create_mapping(dev->bus, + cb->va_start, cb->va_size); + if (IS_ERR_OR_NULL(cb->mapping)) { + IPADBG("Fail to create mapping\n"); + /* assume this failure is because iommu driver is not ready */ + return -EPROBE_DEFER; + } + IPADBG("SMMU mapping created\n"); + cb->valid = true; + + if (of_property_read_bool(dev->of_node, + "qcom,smmu-s1-bypass") || ipa3_ctx->ipa_config_is_mhi) { + smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP] = true; + ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] = true; + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_S1_BYPASS, + &bypass)) { + IPAERR("couldn't set bypass\n"); + __depr_arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("AP/USB SMMU S1 BYPASS\n"); + } else { + smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP] = false; + ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] = false; + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_ATOMIC, + &atomic_ctx)) { + IPAERR("couldn't set domain as atomic\n"); + __depr_arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("AP/USB SMMU atomic set\n"); + + if (smmu_info.fast_map) { + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_FAST, + &fast)) { + IPAERR("couldn't set fast map\n"); + __depr_arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU fast map set\n"); + } + } + + pr_info("IPA smmu_info.s1_bypass_arr[AP]=%d smmu_info.fast_map=%d\n", + smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP], smmu_info.fast_map); + + result = __depr_arm_iommu_attach_device(cb->dev, cb->mapping); + if (result) { + IPAERR("couldn't attach to IOMMU ret=%d\n", result); + cb->valid = false; + return result; + } + + add_map = of_get_property(dev->of_node, + "qcom,additional-mapping", &add_map_size); + if (add_map) { + /* mapping size is an array of 3-tuple of u32 */ + if (add_map_size % (3 * sizeof(u32))) { + IPAERR("wrong additional mapping format\n"); + cb->valid = false; + return -EFAULT; + } + + /* iterate of each entry of the additional mapping array */ + for (i = 0; i < add_map_size / sizeof(u32); i += 3) { + u32 iova = be32_to_cpu(add_map[i]); + u32 pa = be32_to_cpu(add_map[i + 1]); + u32 size = be32_to_cpu(add_map[i + 2]); + unsigned long iova_p; + phys_addr_t pa_p; + u32 size_p; + + IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, + iova_p, pa_p, size_p); + IPADBG("mapping 0x%lx to 0x%pa size %d\n", + iova_p, &pa_p, size_p); + ipa3_iommu_map(cb->mapping->domain, + iova_p, pa_p, size_p, + IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); + } + } + + /* map SMEM memory for IPA table accesses */ + ret = qcom_smem_alloc(SMEM_MODEM, + SMEM_IPA_FILTER_TABLE, + IPA_SMEM_SIZE); + + if (ret < 0 && ret != -EEXIST) { + IPAERR("unable to allocate smem MODEM entry\n"); + cb->valid = false; + return -EFAULT; + } + smem_addr = qcom_smem_get(SMEM_MODEM, + SMEM_IPA_FILTER_TABLE, + &smem_size); + if (IS_ERR(smem_addr)) { + IPAERR("unable to acquire smem MODEM entry\n"); + cb->valid = false; + return -EFAULT; + } + + iova = qcom_smem_virt_to_phys(smem_addr); + pa = iova; + + IPA_SMMU_ROUND_TO_PAGE(iova, pa, IPA_SMEM_SIZE, + iova_p, pa_p, size_p); + IPADBG("mapping 0x%lx to 0x%pa size %d\n", + iova_p, &pa_p, size_p); + ipa3_iommu_map(cb->mapping->domain, + iova_p, pa_p, size_p, + IOMMU_READ | IOMMU_WRITE); + + smmu_info.present[IPA_SMMU_CB_AP] = true; + ipa3_ctx->pdev = dev; + + return 0; +} + +static int ipa_smmu_cb_probe(struct device *dev, enum ipa_smmu_cb_type cb_type) +{ + switch (cb_type) { + case IPA_SMMU_CB_AP: + return ipa_smmu_ap_cb_probe(dev); + case IPA_SMMU_CB_WLAN: + return ipa_smmu_wlan_cb_probe(dev); + case IPA_SMMU_CB_UC: + return ipa_smmu_uc_cb_probe(dev); + case IPA_SMMU_CB_MAX: + IPAERR("Invalid cb_type\n"); + } + return 0; +} + +static int ipa3_attach_to_smmu(void) +{ + struct ipa_smmu_cb_ctx *cb; + int i, result; + + ipa3_ctx->pdev = &ipa3_ctx->master_pdev->dev; + ipa3_ctx->uc_pdev = &ipa3_ctx->master_pdev->dev; + + if (smmu_info.arm_smmu) { + IPADBG("smmu is enabled\n"); + for (i = 0; i < IPA_SMMU_CB_MAX; i++) { + cb = ipa3_get_smmu_ctx(i); + result = ipa_smmu_cb_probe(cb->dev, i); + if (result) + IPAERR("probe failed for cb %d\n", i); + } + } else { + IPADBG("smmu is disabled\n"); + } + return 0; +} + +static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt) +{ + ipa3_freeze_clock_vote_and_notify_modem(); + + return IRQ_HANDLED; +} + +static int ipa3_smp2p_probe(struct device *dev) +{ + struct device_node *node = dev->of_node; + int res; + int irq = 0; + + if (ipa3_ctx == NULL) { + IPAERR("ipa3_ctx was not initialized\n"); + return -EPROBE_DEFER; + } + IPADBG("node->name=%s\n", node->name); + if (strcmp("qcom,smp2p_map_ipa_1_out", node->name) == 0) { + if (of_find_property(node, "qcom,smem-states", NULL)) { + ipa3_ctx->smp2p_info.smem_state = + qcom_smem_state_get(dev, "ipa-smp2p-out", + &ipa3_ctx->smp2p_info.smem_bit); + if (IS_ERR(ipa3_ctx->smp2p_info.smem_state)) { + IPAERR("fail to get smp2p clk resp bit %ld\n", + PTR_ERR(ipa3_ctx->smp2p_info.smem_state)); + return PTR_ERR(ipa3_ctx->smp2p_info.smem_state); + } + IPADBG("smem_bit=%d\n", ipa3_ctx->smp2p_info.smem_bit); + } + } else if (strcmp("qcom,smp2p_map_ipa_1_in", node->name) == 0) { + res = irq = of_irq_get_byname(node, "ipa-smp2p-in"); + if (res < 0) { + IPADBG("of_irq_get_byname returned %d\n", irq); + return res; + } + + ipa3_ctx->smp2p_info.in_base_id = irq; + IPADBG("smp2p irq#=%d\n", irq); + res = devm_request_threaded_irq(dev, irq, NULL, + (irq_handler_t)ipa3_smp2p_modem_clk_query_isr, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "ipa_smp2p_clk_vote", dev); + if (res) { + IPAERR("fail to register smp2p irq=%d\n", irq); + return -ENODEV; + } + } + return 0; +} + +int ipa3_plat_drv_probe(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + int result; + struct device *dev = &pdev_p->dev; + struct ipa_smmu_cb_ctx *cb; + + IPADBG("IPA driver probing started\n"); + IPADBG("dev->of_node->name = %s\n", dev->of_node->name); + + if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb")) { + cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP); + cb->dev = dev; + smmu_info.present[IPA_SMMU_CB_AP] = true; + + return 0; + } + + if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb")) { + cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN); + cb->dev = dev; + smmu_info.present[IPA_SMMU_CB_WLAN] = true; + + return 0; + } + + if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb")) { + cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC); + cb->dev = dev; + smmu_info.present[IPA_SMMU_CB_UC] = true; + + return 0; + } + + if (of_device_is_compatible(dev->of_node, + "qcom,smp2p-map-ipa-1-out")) + return ipa3_smp2p_probe(dev); + if (of_device_is_compatible(dev->of_node, + "qcom,smp2p-map-ipa-1-in")) + return ipa3_smp2p_probe(dev); + + result = get_ipa_dts_configuration(pdev_p, &ipa3_res); + if (result) { + IPAERR("IPA dts parsing failed\n"); + return result; + } + + result = ipa3_bind_api_controller(ipa3_res.ipa_hw_type, api_ctrl); + if (result) { + IPAERR("IPA API binding failed\n"); + return result; + } + + if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) { + if (of_property_read_bool(pdev_p->dev.of_node, + "qcom,smmu-fast-map")) + smmu_info.fast_map = true; + if (of_property_read_bool(pdev_p->dev.of_node, + "qcom,use-64-bit-dma-mask")) + smmu_info.use_64_bit_dma_mask = true; + smmu_info.arm_smmu = true; + } else if (of_property_read_bool(pdev_p->dev.of_node, + "qcom,msm-smmu")) { + IPAERR("Legacy IOMMU not supported\n"); + result = -EOPNOTSUPP; + } else { + if (of_property_read_bool(pdev_p->dev.of_node, + "qcom,use-64-bit-dma-mask")) { + if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) || + dma_set_coherent_mask(&pdev_p->dev, + DMA_BIT_MASK(64))) { + IPAERR("DMA set 64bit mask failed\n"); + return -EOPNOTSUPP; + } + } else { + if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(&pdev_p->dev, + DMA_BIT_MASK(32))) { + IPAERR("DMA set 32bit mask failed\n"); + return -EOPNOTSUPP; + } + } + } + + /* Proceed to real initialization */ + result = ipa3_pre_init(&ipa3_res, pdev_p); + if (result) { + IPAERR("ipa3_init failed\n"); + return result; + } + + result = of_platform_populate(pdev_p->dev.of_node, + pdrv_match, NULL, &pdev_p->dev); + if (result) { + IPAERR("failed to populate platform\n"); + return result; + } + + return result; +} + +/** + * ipa3_ap_suspend() - suspend callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP suspend + * operation is invoked, usually by pressing a suspend button. + * + * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP. + * This will postpone the suspend operation until IPA is no longer used by AP. + */ +int ipa3_ap_suspend(struct device *dev) +{ + int i; + + IPADBG("Enter...\n"); + + /* In case there is a tx/rx handler in polling mode fail to suspend */ + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (ipa3_ctx->ep[i].sys && + atomic_read(&ipa3_ctx->ep[i].sys->curr_polling_state)) { + IPAERR("EP %d is in polling state, do not suspend\n", + i); + return -EAGAIN; + } + } + + if (ipa3_ctx->use_ipa_pm) { + ipa_pm_deactivate_all_deferred(); + } else { + /* + * Release transport IPA resource without waiting + * for inactivity timer + */ + atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0); + ipa3_transport_release_resource(NULL); + } + IPADBG("Exit\n"); + + return 0; +} + +/** + * ipa3_ap_resume() - resume callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP resume + * operation is invoked. + * + * Always returns 0 since resume should always succeed. + */ +int ipa3_ap_resume(struct device *dev) +{ + return 0; +} + +struct ipa3_context *ipa3_get_ctx(void) +{ + return ipa3_ctx; +} + +static void ipa_gsi_notify_cb(struct gsi_per_notify *notify) +{ + /* + * These values are reported by hardware. Any error indicates + * hardware unexpected state. + */ + switch (notify->evt_id) { + case GSI_PER_EVT_GLOB_ERROR: + IPAERR("Got GSI_PER_EVT_GLOB_ERROR\n"); + IPAERR("Err_desc = 0x%04x\n", notify->data.err_desc); + break; + case GSI_PER_EVT_GLOB_GP1: + IPAERR("Got GSI_PER_EVT_GLOB_GP1\n"); + ipa_assert(); + break; + case GSI_PER_EVT_GLOB_GP2: + IPAERR("Got GSI_PER_EVT_GLOB_GP2\n"); + ipa_assert(); + break; + case GSI_PER_EVT_GLOB_GP3: + IPAERR("Got GSI_PER_EVT_GLOB_GP3\n"); + ipa_assert(); + break; + case GSI_PER_EVT_GENERAL_BREAK_POINT: + IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n"); + break; + case GSI_PER_EVT_GENERAL_BUS_ERROR: + IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n"); + ipa_assert(); + break; + case GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW: + IPAERR("Got GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW\n"); + ipa_assert(); + break; + case GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW: + IPAERR("Got GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW\n"); + ipa_assert(); + break; + default: + IPAERR("Received unexpected evt: %d\n", + notify->evt_id); + ipa_assert(); + } +} + +int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data) +{ + struct ipa3_ready_cb_info *cb_info = NULL; + + /* check ipa3_ctx existed or not */ + if (!ipa3_ctx) { + IPADBG("IPA driver haven't initialized\n"); + return -ENXIO; + } + mutex_lock(&ipa3_ctx->lock); + if (ipa3_ctx->ipa_initialization_complete) { + mutex_unlock(&ipa3_ctx->lock); + IPADBG("IPA driver finished initialization already\n"); + return -EEXIST; + } + + cb_info = kmalloc(sizeof(struct ipa3_ready_cb_info), GFP_KERNEL); + if (!cb_info) { + mutex_unlock(&ipa3_ctx->lock); + return -ENOMEM; + } + + cb_info->ready_cb = ipa_ready_cb; + cb_info->user_data = user_data; + + list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list); + mutex_unlock(&ipa3_ctx->lock); + + return 0; +} + +int ipa3_iommu_map(struct iommu_domain *domain, + unsigned long iova, phys_addr_t paddr, size_t size, int prot) +{ + struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP); + struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC); + + IPADBG("domain =0x%pK iova 0x%lx\n", domain, iova); + IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size); + + /* make sure no overlapping */ + if (domain == ipa3_get_smmu_domain()) { + if (iova >= ap_cb->va_start && iova < ap_cb->va_end) { + IPAERR("iommu AP overlap addr 0x%lx\n", iova); + ipa_assert(); + return -EFAULT; + } + } else if (domain == ipa3_get_wlan_smmu_domain()) { + /* wlan is one time map */ + } else if (domain == ipa3_get_uc_smmu_domain()) { + if (iova >= uc_cb->va_start && iova < uc_cb->va_end) { + IPAERR("iommu uC overlap addr 0x%lx\n", iova); + ipa_assert(); + return -EFAULT; + } + } else { + IPAERR("Unexpected domain 0x%pK\n", domain); + ipa_assert(); + return -EFAULT; + } + + return iommu_map(domain, iova, paddr, size, prot); +} + +/** + * ipa3_get_smmu_params()- Return the ipa3 smmu related params. + */ +int ipa3_get_smmu_params(struct ipa_smmu_in_params *in, + struct ipa_smmu_out_params *out) +{ + bool is_smmu_enable = false; + + if (out == NULL || in == NULL) { + IPAERR("bad parms for Client SMMU out params\n"); + return -EINVAL; + } + + if (!ipa3_ctx) { + IPAERR("IPA not yet initialized\n"); + return -EINVAL; + } + + switch (in->smmu_client) { + case IPA_SMMU_WLAN_CLIENT: + is_smmu_enable = !(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] | + ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]); + break; + default: + is_smmu_enable = false; + IPAERR("Trying to get illegal clients SMMU status"); + return -EINVAL; + } + + out->smmu_enable = is_smmu_enable; + + return 0; +} + +#define MAX_LEN 96 + +void ipa_pc_qmp_enable(void) +{ + char buf[MAX_LEN] = "{class: bcm, res: ipa_pc, val: 1}"; + struct qmp_pkt pkt; + int ret = 0; + + /* prepare the mailbox struct */ + ipa3_ctx->mbox_client.dev = &ipa3_ctx->master_pdev->dev; + ipa3_ctx->mbox_client.tx_block = true; + ipa3_ctx->mbox_client.tx_tout = MBOX_TOUT_MS; + ipa3_ctx->mbox_client.knows_txdone = false; + + ipa3_ctx->mbox = mbox_request_channel(&ipa3_ctx->mbox_client, 0); + if (IS_ERR(ipa3_ctx->mbox)) { + ret = PTR_ERR(ipa3_ctx->mbox); + if (ret != -EPROBE_DEFER) + IPAERR("mailbox channel request failed, ret=%d\n", ret); + goto cleanup; + } + + /* prepare the QMP packet to send */ + pkt.size = MAX_LEN; + pkt.data = buf; + + /* send the QMP packet to AOP */ + ret = mbox_send_message(ipa3_ctx->mbox, &pkt); + if (ret < 0) { + IPAERR("qmp message send failed, ret=%d\n", ret); + goto cleanup; + } + +cleanup: + ipa3_ctx->mbox = NULL; + mbox_free_channel(ipa3_ctx->mbox); +} + +/************************************************************** + * PCIe Version + *************************************************************/ + +int ipa3_pci_drv_probe( + struct pci_dev *pci_dev, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + int result; + struct ipa3_plat_drv_res *ipa_drv_res; + u32 bar0_offset; + u32 mem_start; + u32 mem_end; + uint32_t bits; + uint32_t ipa_start, gsi_start, intctrl_start; + struct device *dev; + static struct platform_device platform_dev; + + if (!pci_dev || !api_ctrl || !pdrv_match) { + IPAERR( + "Bad arg: pci_dev (%pK) and/or api_ctrl (%pK) and/or pdrv_match (%pK)\n", + pci_dev, api_ctrl, pdrv_match); + return -EOPNOTSUPP; + } + + dev = &(pci_dev->dev); + + IPADBG("IPA PCI driver probing started\n"); + + /* + * Follow PCI driver flow here. + * pci_enable_device: Enables device and assigns resources + * pci_request_region: Makes BAR0 address region usable + */ + result = pci_enable_device(pci_dev); + if (result < 0) { + IPAERR("pci_enable_device() failed\n"); + return -EOPNOTSUPP; + } + + result = pci_request_region(pci_dev, 0, "IPA Memory"); + if (result < 0) { + IPAERR("pci_request_region() failed\n"); + pci_disable_device(pci_dev); + return -EOPNOTSUPP; + } + + /* + * When in the PCI/emulation environment, &platform_dev is + * passed to get_ipa_dts_configuration(), but is unused, since + * all usages of it in the function are replaced by CPP + * relative to definitions in ipa_emulation_stubs.h. Passing + * &platform_dev makes code validity tools happy. + */ + if (get_ipa_dts_configuration(&platform_dev, &ipa3_res) != 0) { + IPAERR("get_ipa_dts_configuration() failed\n"); + pci_release_region(pci_dev, 0); + pci_disable_device(pci_dev); + return -EOPNOTSUPP; + } + + ipa_drv_res = &ipa3_res; + + result = + of_property_read_u32(NULL, "emulator-bar0-offset", + &bar0_offset); + if (result) { + IPAERR(":get resource failed for emulator-bar0-offset!\n"); + pci_release_region(pci_dev, 0); + pci_disable_device(pci_dev); + return -ENODEV; + } + IPADBG(":using emulator-bar0-offset 0x%08X\n", bar0_offset); + + ipa_start = ipa_drv_res->ipa_mem_base; + gsi_start = ipa_drv_res->transport_mem_base; + intctrl_start = ipa_drv_res->emulator_intcntrlr_mem_base; + + /* + * Where will we be inerrupted at? + */ + ipa_drv_res->emulator_irq = pci_dev->irq; + IPADBG( + "EMULATION PCI_INTERRUPT_PIN(%u)\n", + ipa_drv_res->emulator_irq); + + /* + * Set the ipa_mem_base to the PCI base address of BAR0 + */ + mem_start = pci_resource_start(pci_dev, 0); + mem_end = pci_resource_end(pci_dev, 0); + + IPADBG("PCI START = 0x%x\n", mem_start); + IPADBG("PCI END = 0x%x\n", mem_end); + + ipa_drv_res->ipa_mem_base = mem_start + bar0_offset; + + smmu_info.ipa_base = ipa_drv_res->ipa_mem_base; + smmu_info.ipa_size = ipa_drv_res->ipa_mem_size; + + ipa_drv_res->transport_mem_base = + ipa_drv_res->ipa_mem_base + (gsi_start - ipa_start); + + ipa_drv_res->emulator_intcntrlr_mem_base = + ipa_drv_res->ipa_mem_base + (intctrl_start - ipa_start); + + IPADBG("ipa_mem_base = 0x%x\n", + ipa_drv_res->ipa_mem_base); + IPADBG("ipa_mem_size = 0x%x\n", + ipa_drv_res->ipa_mem_size); + + IPADBG("transport_mem_base = 0x%x\n", + ipa_drv_res->transport_mem_base); + IPADBG("transport_mem_size = 0x%x\n", + ipa_drv_res->transport_mem_size); + + IPADBG("emulator_intcntrlr_mem_base = 0x%x\n", + ipa_drv_res->emulator_intcntrlr_mem_base); + IPADBG("emulator_intcntrlr_mem_size = 0x%x\n", + ipa_drv_res->emulator_intcntrlr_mem_size); + + result = ipa3_bind_api_controller(ipa_drv_res->ipa_hw_type, api_ctrl); + if (result != 0) { + IPAERR("ipa3_bind_api_controller() failed\n"); + pci_release_region(pci_dev, 0); + pci_disable_device(pci_dev); + return result; + } + + bits = (ipa_drv_res->use_64_bit_dma_mask) ? 64 : 32; + + if (dma_set_mask(dev, DMA_BIT_MASK(bits)) != 0) { + IPAERR("dma_set_mask(%pK, %u) failed\n", dev, bits); + pci_release_region(pci_dev, 0); + pci_disable_device(pci_dev); + return -EOPNOTSUPP; + } + + if (dma_set_coherent_mask(dev, DMA_BIT_MASK(bits)) != 0) { + IPAERR("dma_set_coherent_mask(%pK, %u) failed\n", dev, bits); + pci_release_region(pci_dev, 0); + pci_disable_device(pci_dev); + return -EOPNOTSUPP; + } + + pci_set_master(pci_dev); + + memset(&platform_dev, 0, sizeof(platform_dev)); + platform_dev.dev = *dev; + + /* Proceed to real initialization */ + result = ipa3_pre_init(&ipa3_res, &platform_dev); + if (result) { + IPAERR("ipa3_init failed\n"); + pci_clear_master(pci_dev); + pci_release_region(pci_dev, 0); + pci_disable_device(pci_dev); + return result; + } + + return result; +} + +/* + * The following returns transport register memory location and + * size... + */ +int ipa3_get_transport_info( + phys_addr_t *phys_addr_ptr, + unsigned long *size_ptr) +{ + if (!phys_addr_ptr || !size_ptr) { + IPAERR("Bad arg: phys_addr_ptr(%pK) and/or size_ptr(%pK)\n", + phys_addr_ptr, size_ptr); + return -EINVAL; + } + + *phys_addr_ptr = ipa3_res.transport_mem_base; + *size_ptr = ipa3_res.transport_mem_size; + + return 0; +} +EXPORT_SYMBOL(ipa3_get_transport_info); + +static uint emulation_type = IPA_HW_v4_0; + +/* + * The following returns emulation type... + */ +uint ipa3_get_emulation_type(void) +{ + return emulation_type; +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA HW device driver"); + +/* + * Module parameter. Invoke as follows: + * insmod ipat.ko emulation_type=[13|14|17|...|N] + * Examples: + * insmod ipat.ko emulation_type=13 # for IPA 3.5.1 + * insmod ipat.ko emulation_type=14 # for IPA 4.0 + * insmod ipat.ko emulation_type=17 # for IPA 4.5 + * + * NOTE: The emulation_type values need to come from: enum ipa_hw_type + * + */ + +module_param(emulation_type, uint, 0000); +MODULE_PARM_DESC( + emulation_type, + "emulation_type=N N can be 13 for IPA 3.5.1, 14 for IPA 4.0, 17 for IPA 4.5"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c new file mode 100644 index 000000000000..1552bd515e3b --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c @@ -0,0 +1,1878 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include "ipa_i.h" +#include "linux/msm_gsi.h" + +/* + * These values were determined empirically and shows good E2E bi- + * directional throughputs + */ +#define IPA_HOLB_TMR_EN 0x1 +#define IPA_HOLB_TMR_DIS 0x0 +#define IPA_POLL_AGGR_STATE_RETRIES_NUM 3 +#define IPA_POLL_AGGR_STATE_SLEEP_MSEC 1 + +#define IPA_PKT_FLUSH_TO_US 100 + +#define IPA_POLL_FOR_EMPTINESS_NUM 50 +#define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20 +#define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5 +#define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200 + +/* xfer_rsc_idx should be 7 bits */ +#define IPA_XFER_RSC_IDX_MAX 127 + +static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep, + bool *is_empty); + +int ipa3_enable_data_path(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl]; + struct ipa_ep_cfg_holb holb_cfg; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + int res = 0; + struct ipahal_reg_endp_init_rsrc_grp rsrc_grp; + + /* Assign the resource group for pipe */ + memset(&rsrc_grp, 0, sizeof(rsrc_grp)); + rsrc_grp.rsrc_grp = ipa_get_ep_group(ep->client); + if (rsrc_grp.rsrc_grp == -1) { + IPAERR("invalid group for client %d\n", ep->client); + WARN_ON(1); + return -EFAULT; + } + + IPADBG("Setting group %d for pipe %d\n", + rsrc_grp.rsrc_grp, clnt_hdl); + ipahal_write_reg_n_fields(IPA_ENDP_INIT_RSRC_GRP_n, clnt_hdl, + &rsrc_grp); + + IPADBG("Enabling data path\n"); + if (IPA_CLIENT_IS_CONS(ep->client)) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + /* + * Set HOLB on USB DPL CONS to avoid IPA stall + * if DPL client is not pulling the data + * on other end from IPA hw. + */ + if (ep->client == IPA_CLIENT_USB_DPL_CONS) + holb_cfg.en = IPA_HOLB_TMR_EN; + else + holb_cfg.en = IPA_HOLB_TMR_DIS; + holb_cfg.tmr_val = 0; + res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg); + } + + /* Enable the pipe */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + if (IPA_CLIENT_IS_CONS(ep->client) && + (ep->keep_ipa_awake || + ipa3_ctx->resume_on_connect[ep->client] || + !ipa3_should_pipe_be_suspended(ep->client))) { + memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_suspend = false; + res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } + } + + return res; +} + +int ipa3_disable_data_path(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl]; + struct ipa_ep_cfg_holb holb_cfg; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + struct ipa_ep_cfg_aggr ep_aggr; + int res = 0; + + IPADBG("Disabling data path\n"); + if (IPA_CLIENT_IS_CONS(ep->client)) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_EN; + holb_cfg.tmr_val = 0; + res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg); + } + + /* + * for IPA 4.0 and above aggregation frame is closed together with + * channel STOP + */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + /* Suspend the pipe */ + if (IPA_CLIENT_IS_CONS(ep->client)) { + /* + * for RG10 workaround uC needs to be loaded before + * pipe can be suspended in this case. + */ + if (ipa3_ctx->apply_rg10_wa && ipa3_uc_state_check()) { + IPADBG("uC is not loaded yet, waiting...\n"); + res = wait_for_completion_timeout( + &ipa3_ctx->uc_loaded_completion_obj, + 60 * HZ); + if (res == 0) + IPADBG("timeout waiting for uC load\n"); + } + + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_suspend = true; + res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } + + udelay(IPA_PKT_FLUSH_TO_US); + ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, + &ep_aggr); + if (ep_aggr.aggr_en) { + res = ipa3_tag_aggr_force_close(clnt_hdl); + if (res) { + IPAERR("tag process timeout client:%d err:%d\n", + clnt_hdl, res); + ipa_assert(); + } + } + } + + return res; +} + +static void ipa_chan_err_cb(struct gsi_chan_err_notify *notify) +{ + /* + * These are the errors that hardware has returned, + * which indicates hardware unexpected state. + */ + if (notify) { + switch (notify->evt_id) { + case GSI_CHAN_INVALID_TRE_ERR: + IPAERR("Received GSI_CHAN_INVALID_TRE_ERR\n"); + break; + case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR: + IPAERR("Received GSI_CHAN_NON_ALLOC_EVT_ACCESS_ERR\n"); + break; + case GSI_CHAN_OUT_OF_BUFFERS_ERR: + IPAERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n"); + break; + case GSI_CHAN_OUT_OF_RESOURCES_ERR: + IPAERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n"); + break; + case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR: + IPAERR("Received GSI_CHAN_UNSUPP_INTER_EE_OP_ERR\n"); + break; + case GSI_CHAN_HWO_1_ERR: + IPAERR("Received GSI_CHAN_HWO_1_ERR\n"); + break; + default: + IPAERR("Unexpected err evt: %d\n", notify->evt_id); + } + ipa_assert(); + } +} + +static void ipa_xfer_cb(struct gsi_chan_xfer_notify *notify) +{ +} + +static int ipa3_reconfigure_channel_to_gpi(struct ipa3_ep_context *ep, + struct gsi_chan_props *orig_chan_props, + struct ipa_mem_buffer *chan_dma) +{ + struct gsi_chan_props chan_props; + enum gsi_status gsi_res; + dma_addr_t chan_dma_addr; + int result; + + /* Set up channel properties */ + memset(&chan_props, 0, sizeof(struct gsi_chan_props)); + chan_props.prot = GSI_CHAN_PROT_GPI; + chan_props.dir = GSI_CHAN_DIR_FROM_GSI; + chan_props.ch_id = orig_chan_props->ch_id; + chan_props.evt_ring_hdl = orig_chan_props->evt_ring_hdl; + chan_props.re_size = GSI_CHAN_RE_SIZE_16B; + chan_props.ring_len = 2 * GSI_CHAN_RE_SIZE_16B; + chan_props.ring_base_vaddr = + dma_alloc_coherent(ipa3_ctx->pdev, chan_props.ring_len, + &chan_dma_addr, GFP_ATOMIC); + chan_props.ring_base_addr = chan_dma_addr; + chan_dma->base = chan_props.ring_base_vaddr; + chan_dma->phys_base = chan_props.ring_base_addr; + chan_dma->size = chan_props.ring_len; + chan_props.use_db_eng = GSI_CHAN_DIRECT_MODE; + chan_props.max_prefetch = GSI_ONE_PREFETCH_SEG; + chan_props.low_weight = 1; + chan_props.chan_user_data = NULL; + chan_props.err_cb = ipa_chan_err_cb; + chan_props.xfer_cb = ipa_xfer_cb; + + gsi_res = gsi_set_channel_cfg(ep->gsi_chan_hdl, &chan_props, NULL); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error setting channel properties\n"); + result = -EFAULT; + goto set_chan_cfg_fail; + } + + return 0; + +set_chan_cfg_fail: + dma_free_coherent(ipa3_ctx->pdev, chan_dma->size, + chan_dma->base, chan_dma->phys_base); + return result; + +} + +static int ipa3_restore_channel_properties(struct ipa3_ep_context *ep, + struct gsi_chan_props *chan_props, + union gsi_channel_scratch *chan_scratch) +{ + enum gsi_status gsi_res; + + gsi_res = gsi_set_channel_cfg(ep->gsi_chan_hdl, chan_props, + chan_scratch); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error restoring channel properties\n"); + return -EFAULT; + } + + return 0; +} + +static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl, + struct ipa3_ep_context *ep) +{ + int result = -EFAULT; + enum gsi_status gsi_res; + struct gsi_chan_props orig_chan_props; + union gsi_channel_scratch orig_chan_scratch; + struct ipa_mem_buffer chan_dma; + void *buff; + dma_addr_t dma_addr; + struct gsi_xfer_elem xfer_elem; + int i; + int aggr_active_bitmap = 0; + bool pipe_suspended = false; + struct ipa_ep_cfg_ctrl ctrl; + + IPADBG("Applying reset channel with open aggregation frame WA\n"); + ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl)); + + /* Reset channel */ + gsi_res = gsi_reset_channel(ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error resetting channel: %d\n", gsi_res); + return -EFAULT; + } + + /* Reconfigure channel to dummy GPI channel */ + memset(&orig_chan_props, 0, sizeof(struct gsi_chan_props)); + memset(&orig_chan_scratch, 0, sizeof(union gsi_channel_scratch)); + gsi_res = gsi_get_channel_cfg(ep->gsi_chan_hdl, &orig_chan_props, + &orig_chan_scratch); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error getting channel properties: %d\n", gsi_res); + return -EFAULT; + } + memset(&chan_dma, 0, sizeof(struct ipa_mem_buffer)); + result = ipa3_reconfigure_channel_to_gpi(ep, &orig_chan_props, + &chan_dma); + if (result) + return -EFAULT; + + memset(&ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + + ipahal_read_reg_n_fields(IPA_ENDP_INIT_CTRL_n, clnt_hdl, &ctrl); + if (ctrl.ipa_ep_suspend) { + IPADBG("pipe is suspended, remove suspend\n"); + pipe_suspended = true; + ctrl.ipa_ep_suspend = false; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, + clnt_hdl, &ctrl); + } + + /* Start channel and put 1 Byte descriptor on it */ + gsi_res = gsi_start_channel(ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error starting channel: %d\n", gsi_res); + goto start_chan_fail; + } + + memset(&xfer_elem, 0, sizeof(struct gsi_xfer_elem)); + buff = dma_alloc_coherent(ipa3_ctx->pdev, 1, &dma_addr, + GFP_ATOMIC); + xfer_elem.addr = dma_addr; + xfer_elem.len = 1; + xfer_elem.flags = GSI_XFER_FLAG_EOT; + xfer_elem.type = GSI_XFER_ELEM_DATA; + + gsi_res = gsi_queue_xfer(ep->gsi_chan_hdl, 1, &xfer_elem, + true); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error queueing xfer: %d\n", gsi_res); + result = -EFAULT; + goto queue_xfer_fail; + } + + /* Wait for aggregation frame to be closed and stop channel*/ + for (i = 0; i < IPA_POLL_AGGR_STATE_RETRIES_NUM; i++) { + aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE); + if (!(aggr_active_bitmap & (1 << clnt_hdl))) + break; + msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC); + } + + if (aggr_active_bitmap & (1 << clnt_hdl)) { + IPAERR("Failed closing aggr frame for client: %d\n", + clnt_hdl); + /* Unexpected hardware state */ + ipa_assert(); + } + + dma_free_coherent(ipa3_ctx->pdev, 1, buff, dma_addr); + + result = ipa3_stop_gsi_channel(clnt_hdl); + if (result) { + IPAERR("Error stopping channel: %d\n", result); + goto start_chan_fail; + } + + /* Reset channel */ + gsi_res = gsi_reset_channel(ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error resetting channel: %d\n", gsi_res); + result = -EFAULT; + goto start_chan_fail; + } + + /* + * Need to sleep for 1ms as required by H/W verified + * sequence for resetting GSI channel + */ + msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC); + + if (pipe_suspended) { + IPADBG("suspend the pipe again\n"); + ctrl.ipa_ep_suspend = true; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, + clnt_hdl, &ctrl); + } + + /* Restore channels properties */ + result = ipa3_restore_channel_properties(ep, &orig_chan_props, + &orig_chan_scratch); + if (result) + goto restore_props_fail; + dma_free_coherent(ipa3_ctx->pdev, chan_dma.size, + chan_dma.base, chan_dma.phys_base); + + return 0; + +queue_xfer_fail: + ipa3_stop_gsi_channel(clnt_hdl); + dma_free_coherent(ipa3_ctx->pdev, 1, buff, dma_addr); +start_chan_fail: + if (pipe_suspended) { + IPADBG("suspend the pipe again\n"); + ctrl.ipa_ep_suspend = true; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, + clnt_hdl, &ctrl); + } + ipa3_restore_channel_properties(ep, &orig_chan_props, + &orig_chan_scratch); +restore_props_fail: + dma_free_coherent(ipa3_ctx->pdev, chan_dma.size, + chan_dma.base, chan_dma.phys_base); + return result; +} + +int ipa3_reset_gsi_channel(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + int result = -EFAULT; + enum gsi_status gsi_res; + int aggr_active_bitmap = 0; + bool undo_aggr_value = false; + struct ipahal_reg_clkon_cfg fields; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameter\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + /* + * IPAv4.0 HW has a limitation where WSEQ in MBIM NTH header is not + * reset to 0 when MBIM pipe is reset. Workaround is to disable + * HW clock gating for AGGR block using IPA_CLKON_CFG reg. undo flag to + * disable the bit after reset is finished + */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + if (ep->cfg.aggr.aggr == IPA_MBIM_16 && + ep->cfg.aggr.aggr_en != IPA_BYPASS_AGGR) { + ipahal_read_reg_fields(IPA_CLKON_CFG, &fields); + if (fields.open_aggr_wrapper) { + undo_aggr_value = true; + fields.open_aggr_wrapper = false; + ipahal_write_reg_fields(IPA_CLKON_CFG, &fields); + } + } + } + + /* + * for IPA 4.0 and above aggregation frame is closed together with + * channel STOP. Below workaround not required for IPA 4.0 and above + * versions. + */ + + /* + * Check for open aggregation frame on Consumer EP - + * reset with open aggregation frame WA + */ + if (IPA_CLIENT_IS_CONS(ep->client) && + ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE); + if (aggr_active_bitmap & (1 << clnt_hdl)) { + result = ipa3_reset_with_open_aggr_frame_wa(clnt_hdl, + ep); + if (result) + goto reset_chan_fail; + goto finish_reset; + } + } + + /* + * Reset channel + * If the reset called after stop, need to wait 1ms + */ + msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC); + gsi_res = gsi_reset_channel(ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error resetting channel: %d\n", gsi_res); + result = -EFAULT; + goto reset_chan_fail; + } + +finish_reset: + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + /* undo the aggr value if flag was set above*/ + if (undo_aggr_value) { + fields.open_aggr_wrapper = false; + ipahal_write_reg_fields(IPA_CLKON_CFG, &fields); + } + + IPADBG("exit\n"); + return 0; + +reset_chan_fail: + /* undo the aggr value if flag was set above*/ + if (undo_aggr_value) { + fields.open_aggr_wrapper = false; + ipahal_write_reg_fields(IPA_CLKON_CFG, &fields); + } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + +int ipa3_reset_gsi_event_ring(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + int result = -EFAULT; + enum gsi_status gsi_res; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + /* Reset event ring */ + gsi_res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error resetting event: %d\n", gsi_res); + result = -EFAULT; + goto reset_evt_fail; + } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("exit\n"); + return 0; + +reset_evt_fail: + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + +static bool ipa3_is_legal_params(struct ipa_request_gsi_channel_params *params) +{ + if (params->client >= IPA_CLIENT_MAX) + return false; + else + return true; +} + +int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map, + enum ipa_smmu_cb_type cb_type) +{ + struct iommu_domain *smmu_domain; + int res; + + if (cb_type >= IPA_SMMU_CB_MAX) { + IPAERR("invalid cb_type\n"); + return -EINVAL; + } + + if (ipa3_ctx->s1_bypass_arr[cb_type]) { + IPADBG("CB# %d is set to s1 bypass\n", cb_type); + return 0; + } + + smmu_domain = ipa3_get_smmu_domain_by_type(cb_type); + if (!smmu_domain) { + IPAERR("invalid smmu domain\n"); + return -EINVAL; + } + + if (map) { + res = ipa3_iommu_map(smmu_domain, phys_addr, phys_addr, + PAGE_SIZE, IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); + } else { + res = iommu_unmap(smmu_domain, phys_addr, PAGE_SIZE); + res = (res != PAGE_SIZE); + } + if (res) { + IPAERR("Fail to %s reg 0x%pa\n", map ? "map" : "unmap", + &phys_addr); + return -EINVAL; + } + + IPADBG("Peer reg 0x%pa %s\n", &phys_addr, map ? "map" : "unmap"); + + return 0; +} + +int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt, + enum ipa_smmu_cb_type cb_type) +{ + struct iommu_domain *smmu_domain; + int res; + phys_addr_t phys; + unsigned long va; + struct scatterlist *sg; + int count = 0; + size_t len; + int i; + struct page *page; + + if (cb_type >= IPA_SMMU_CB_MAX) { + IPAERR("invalid cb_type\n"); + return -EINVAL; + } + + if (ipa3_ctx->s1_bypass_arr[cb_type]) { + IPADBG("CB# %d is set to s1 bypass\n", cb_type); + return 0; + } + + smmu_domain = ipa3_get_smmu_domain_by_type(cb_type); + if (!smmu_domain) { + IPAERR("invalid smmu domain\n"); + return -EINVAL; + } + + /* + * USB GSI driver would update sgt irrespective of USB S1 + * is enable or bypass. + * If USB S1 is enabled using IOMMU, iova != pa. + * If USB S1 is bypass, iova == pa. + */ + if (map) { + if (sgt != NULL) { + va = rounddown(iova, PAGE_SIZE); + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + page = sg_page(sg); + phys = page_to_phys(page); + len = PAGE_ALIGN(sg->offset + sg->length); + res = ipa3_iommu_map(smmu_domain, va, phys, + len, IOMMU_READ | IOMMU_WRITE); + if (res) { + IPAERR("Fail to map pa=%pa\n", &phys); + return -EINVAL; + } + va += len; + count++; + } + } else { + res = ipa3_iommu_map(smmu_domain, + rounddown(iova, PAGE_SIZE), + rounddown(iova, PAGE_SIZE), + roundup(size + iova - + rounddown(iova, PAGE_SIZE), + PAGE_SIZE), + IOMMU_READ | IOMMU_WRITE); + if (res) { + IPAERR("Fail to map 0x%llx\n", iova); + return -EINVAL; + } + } + } else { + res = iommu_unmap(smmu_domain, + rounddown(iova, PAGE_SIZE), + roundup(size + iova - rounddown(iova, PAGE_SIZE), + PAGE_SIZE)); + if (res != roundup(size + iova - rounddown(iova, PAGE_SIZE), + PAGE_SIZE)) { + IPAERR("Fail to unmap 0x%llx\n", iova); + return -EINVAL; + } + } + IPADBG("Peer buff %s 0x%llx\n", map ? "map" : "unmap", iova); + return 0; +} + +void ipa3_register_lock_unlock_callback(int (*client_cb)(bool is_lock), + u32 ipa_ep_idx) +{ + struct ipa3_ep_context *ep; + + IPADBG("entry\n"); + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (!ep->valid) { + IPAERR("Invalid EP\n"); + return; + } + + if (client_cb == NULL) { + IPAERR("Bad Param"); + return; + } + + ep->client_lock_unlock = client_cb; + IPADBG("exit\n"); +} + +void ipa3_deregister_lock_unlock_callback(u32 ipa_ep_idx) +{ + struct ipa3_ep_context *ep; + + IPADBG("entry\n"); + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (!ep->valid) { + IPAERR("Invalid EP\n"); + return; + } + + if (ep->client_lock_unlock == NULL) { + IPAERR("client_lock_unlock is already NULL"); + return; + } + + ep->client_lock_unlock = NULL; + IPADBG("exit\n"); +} + +static void client_lock_unlock_cb(u32 ipa_ep_idx, bool is_lock) +{ + struct ipa3_ep_context *ep; + + IPADBG("entry\n"); + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (!ep->valid) { + IPAERR("Invalid EP\n"); + return; + } + + if (ep->client_lock_unlock) + ep->client_lock_unlock(is_lock); + + IPADBG("exit\n"); +} + +int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params, + struct ipa_req_chan_out_params *out_params) +{ + int ipa_ep_idx; + int result = -EFAULT; + struct ipa3_ep_context *ep; + struct ipahal_reg_ep_cfg_status ep_status; + unsigned long gsi_dev_hdl; + enum gsi_status gsi_res; + const struct ipa_gsi_ep_config *gsi_ep_cfg_ptr; + + IPADBG("entry\n"); + if (params == NULL || out_params == NULL || + !ipa3_is_legal_params(params)) { + IPAERR("bad parameters\n"); + return -EINVAL; + } + + ipa_ep_idx = ipa3_get_ep_mapping(params->client); + if (ipa_ep_idx == -1) { + IPAERR("fail to alloc EP.\n"); + goto fail; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (ep->valid) { + IPAERR("EP already allocated.\n"); + goto fail; + } + + memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + ep->skip_ep_cfg = params->skip_ep_cfg; + ep->valid = 1; + ep->client = params->client; + ep->client_notify = params->notify; + ep->priv = params->priv; + ep->keep_ipa_awake = params->keep_ipa_awake; + + + /* Config QMB for USB_CONS ep */ + if (!IPA_CLIENT_IS_PROD(ep->client)) { + IPADBG("Configuring QMB on USB CONS pipe\n"); + if (ipa_ep_idx >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[ipa_ep_idx].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + result = ipa3_cfg_ep_cfg(ipa_ep_idx, ¶ms->ipa_ep_cfg.cfg); + if (result) { + IPAERR("fail to configure QMB.\n"); + return result; + } + } + + if (!ep->skip_ep_cfg) { + if (ipa3_cfg_ep(ipa_ep_idx, ¶ms->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto ipa_cfg_ep_fail; + } + /* Setting EP status 0 */ + memset(&ep_status, 0, sizeof(ep_status)); + if (ipa3_cfg_ep_status(ipa_ep_idx, &ep_status)) { + IPAERR("fail to configure status of EP.\n"); + goto ipa_cfg_ep_fail; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("Skipping endpoint configuration.\n"); + } + + out_params->clnt_hdl = ipa_ep_idx; + + result = ipa3_enable_data_path(out_params->clnt_hdl); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", result, + out_params->clnt_hdl); + goto ipa_cfg_ep_fail; + } + + gsi_dev_hdl = ipa3_ctx->gsi_dev_hdl; + gsi_res = gsi_alloc_evt_ring(¶ms->evt_ring_params, gsi_dev_hdl, + &ep->gsi_evt_ring_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error allocating event ring: %d\n", gsi_res); + result = -EFAULT; + goto ipa_cfg_ep_fail; + } + + gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl, + params->evt_scratch); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error writing event ring scratch: %d\n", gsi_res); + result = -EFAULT; + goto write_evt_scratch_fail; + } + + gsi_ep_cfg_ptr = ipa3_get_gsi_ep_info(ep->client); + if (gsi_ep_cfg_ptr == NULL) { + IPAERR("Error ipa3_get_gsi_ep_info ret NULL\n"); + result = -EFAULT; + goto write_evt_scratch_fail; + } + + params->chan_params.evt_ring_hdl = ep->gsi_evt_ring_hdl; + params->chan_params.ch_id = gsi_ep_cfg_ptr->ipa_gsi_chan_num; + params->chan_params.prefetch_mode = gsi_ep_cfg_ptr->prefetch_mode; + params->chan_params.empty_lvl_threshold = + gsi_ep_cfg_ptr->prefetch_threshold; + gsi_res = gsi_alloc_channel(¶ms->chan_params, gsi_dev_hdl, + &ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error allocating channel: %d, chan_id: %d\n", gsi_res, + params->chan_params.ch_id); + result = -EFAULT; + goto write_evt_scratch_fail; + } + + memcpy(&ep->chan_scratch, ¶ms->chan_scratch, + sizeof(union __packed gsi_channel_scratch)); + + /* + * Update scratch for MCS smart prefetch: + * Starting IPA4.5, smart prefetch implemented by H/W. + * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch + * so keep the fields zero. + */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + ep->chan_scratch.xdci.max_outstanding_tre = + params->chan_params.re_size * gsi_ep_cfg_ptr->ipa_if_tlv; + } + + gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl, + params->chan_scratch); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error writing channel scratch: %d\n", gsi_res); + result = -EFAULT; + goto write_chan_scratch_fail; + } + + gsi_res = gsi_query_channel_db_addr(ep->gsi_chan_hdl, + &out_params->db_reg_phs_addr_lsb, + &out_params->db_reg_phs_addr_msb); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error querying channel DB registers addresses: %d\n", + gsi_res); + result = -EFAULT; + goto write_chan_scratch_fail; + } + + ep->gsi_mem_info.evt_ring_len = params->evt_ring_params.ring_len; + ep->gsi_mem_info.evt_ring_base_addr = + params->evt_ring_params.ring_base_addr; + ep->gsi_mem_info.evt_ring_base_vaddr = + params->evt_ring_params.ring_base_vaddr; + ep->gsi_mem_info.chan_ring_len = params->chan_params.ring_len; + ep->gsi_mem_info.chan_ring_base_addr = + params->chan_params.ring_base_addr; + ep->gsi_mem_info.chan_ring_base_vaddr = + params->chan_params.ring_base_vaddr; + + ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(params->client)) + ipa3_install_dflt_flt_rules(ipa_ep_idx); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + IPADBG("client %d (ep: %d) connected\n", params->client, ipa_ep_idx); + IPADBG("exit\n"); + + return 0; + +write_chan_scratch_fail: + gsi_dealloc_channel(ep->gsi_chan_hdl); +write_evt_scratch_fail: + gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); +ipa_cfg_ep_fail: + memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +fail: + return result; +} + +int ipa3_set_usb_max_packet_size( + enum ipa_usb_max_usb_packet_size usb_max_packet_size) +{ + struct gsi_device_scratch dev_scratch; + enum gsi_status gsi_res; + + IPADBG("entry\n"); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&dev_scratch, 0, sizeof(struct gsi_device_scratch)); + dev_scratch.mhi_base_chan_idx_valid = false; + dev_scratch.max_usb_pkt_size_valid = true; + dev_scratch.max_usb_pkt_size = usb_max_packet_size; + + gsi_res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl, + &dev_scratch); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error writing device scratch: %d\n", gsi_res); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + IPADBG("exit\n"); + return 0; +} + +/* This function called as part of usb pipe resume */ +int ipa3_xdci_connect(u32 clnt_hdl) +{ + int result; + struct ipa3_ep_context *ep; + + IPADBG("entry\n"); + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + result = ipa3_start_gsi_channel(clnt_hdl); + if (result) { + IPAERR("failed to start gsi channel clnt_hdl=%u\n", clnt_hdl); + goto exit; + } + + result = ipa3_enable_data_path(clnt_hdl); + if (result) { + IPAERR("enable data path failed res=%d clnt_hdl=%d.\n", result, + clnt_hdl); + goto stop_ch; + } + + IPADBG("exit\n"); + goto exit; + +stop_ch: + (void)ipa3_stop_gsi_channel(clnt_hdl); +exit: + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + + +/* This function called as part of usb pipe connect */ +int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid) +{ + struct ipa3_ep_context *ep; + int result = -EFAULT; + enum gsi_status gsi_res; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || + xferrscidx > IPA_XFER_RSC_IDX_MAX) { + IPAERR("Bad parameters.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + if (xferrscidx_valid) { + ep->chan_scratch.xdci.xferrscidx = xferrscidx; + gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl, + ep->chan_scratch); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error writing channel scratch: %d\n", gsi_res); + goto write_chan_scratch_fail; + } + } + + if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = true; + ep->ep_delay_set = true; + + result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + if (result) + IPAERR("client (ep: %d) failed result=%d\n", + clnt_hdl, result); + else + IPADBG("client (ep: %d) success\n", clnt_hdl); + } else { + ep->ep_delay_set = false; + } + + gsi_res = gsi_start_channel(ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error starting channel: %d\n", gsi_res); + goto write_chan_scratch_fail; + } + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("exit\n"); + return 0; + +write_chan_scratch_fail: + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + +static int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info, + unsigned long chan_hdl) +{ + enum gsi_status gsi_res; + + memset(gsi_chan_info, 0, sizeof(struct gsi_chan_info)); + gsi_res = gsi_query_channel_info(chan_hdl, gsi_chan_info); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error querying channel info: %d\n", gsi_res); + return -EFAULT; + } + if (!gsi_chan_info->evt_valid) { + IPAERR("Event info invalid\n"); + return -EFAULT; + } + + return 0; +} + +static bool ipa3_is_xdci_channel_with_given_info_empty( + struct ipa3_ep_context *ep, struct gsi_chan_info *chan_info) +{ + bool is_empty = false; + + if (!IPA_CLIENT_IS_CONS(ep->client)) { + /* For UL channel: chan.RP == chan.WP */ + is_empty = (chan_info->rp == chan_info->wp); + } else { + /* For DL channel: */ + if (chan_info->wp != + (ep->gsi_mem_info.chan_ring_base_addr + + ep->gsi_mem_info.chan_ring_len - + GSI_CHAN_RE_SIZE_16B)) { + /* if chan.WP != LINK TRB: chan.WP == evt.RP */ + is_empty = (chan_info->wp == chan_info->evt_rp); + } else { + /* + * if chan.WP == LINK TRB: chan.base_xfer_ring_addr + * == evt.RP + */ + is_empty = (ep->gsi_mem_info.chan_ring_base_addr == + chan_info->evt_rp); + } + } + + return is_empty; +} + +static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep, + bool *is_empty) +{ + struct gsi_chan_info chan_info; + int res; + + if (!ep || !is_empty || !ep->valid) { + IPAERR("Input Error\n"); + return -EFAULT; + } + + res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl); + if (res) { + IPAERR("Failed to get GSI channel info\n"); + return -EFAULT; + } + + *is_empty = ipa3_is_xdci_channel_with_given_info_empty(ep, &chan_info); + + return 0; +} + +int ipa3_enable_force_clear(u32 request_id, bool throttle_source, + u32 source_pipe_bitmask) +{ + struct ipa_enable_force_clear_datapath_req_msg_v01 req; + int result; + + memset(&req, 0, sizeof(req)); + req.request_id = request_id; + req.source_pipe_bitmask = source_pipe_bitmask; + if (throttle_source) { + req.throttle_source_valid = 1; + req.throttle_source = 1; + } + result = ipa3_qmi_enable_force_clear_datapath_send(&req); + if (result) { + IPAERR("ipa3_qmi_enable_force_clear_datapath_send failed %d\n", + result); + return result; + } + + return 0; +} + +int ipa3_disable_force_clear(u32 request_id) +{ + struct ipa_disable_force_clear_datapath_req_msg_v01 req; + int result; + + memset(&req, 0, sizeof(req)); + req.request_id = request_id; + result = ipa3_qmi_disable_force_clear_datapath_send(&req); + if (result) { + IPAERR("ipa3_qmi_disable_force_clear_datapath_send failed %d\n", + result); + return result; + } + + return 0; +} + +/* Clocks should be voted before invoking this function */ +static int ipa3_xdci_stop_gsi_channel(u32 clnt_hdl, bool *stop_in_proc) +{ + int res; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || + !stop_in_proc) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + res = ipa3_stop_gsi_channel(clnt_hdl); + if (res != 0 && res != -GSI_STATUS_AGAIN && + res != -GSI_STATUS_TIMED_OUT) { + IPAERR("xDCI stop channel failed res=%d\n", res); + return -EFAULT; + } + + if (res) + *stop_in_proc = true; + else + *stop_in_proc = false; + + IPADBG("xDCI channel is %s (result=%d)\n", + res ? "STOP_IN_PROC/TimeOut" : "STOP", res); + + IPADBG("exit\n"); + return 0; +} + +/* Clocks should be voted before invoking this function */ +static int ipa3_xdci_stop_gsi_ch_brute_force(u32 clnt_hdl, + bool *stop_in_proc) +{ + unsigned long jiffies_start; + unsigned long jiffies_timeout = + msecs_to_jiffies(IPA_CHANNEL_STOP_IN_PROC_TO_MSEC); + int res; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || + !stop_in_proc) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + jiffies_start = jiffies; + while (1) { + res = ipa3_xdci_stop_gsi_channel(clnt_hdl, + stop_in_proc); + if (res) { + IPAERR("failed to stop xDCI channel hdl=%d\n", + clnt_hdl); + return res; + } + + if (!*stop_in_proc) { + IPADBG("xDCI channel STOP hdl=%d\n", clnt_hdl); + return res; + } + + /* + * Give chance to the previous stop request to be accomplished + * before the retry + */ + udelay(IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC); + + if (time_after(jiffies, jiffies_start + jiffies_timeout)) { + IPADBG("timeout waiting for xDCI channel emptiness\n"); + return res; + } + } +} + +/* Clocks should be voted for before invoking this function */ +static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id, + u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl, + bool remove_delay) +{ + int result; + bool is_empty = false; + int i; + bool stop_in_proc; + struct ipa3_ep_context *ep; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + + IPADBG("entry\n"); + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + /* first try to stop the channel */ + result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl, + &stop_in_proc); + if (result) { + IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + goto exit; + } + if (!stop_in_proc) + goto exit; + + if (remove_delay && ep->ep_delay_set) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = false; + result = ipa3_cfg_ep_ctrl(clnt_hdl, + &ep_cfg_ctrl); + if (result) { + IPAERR + ("client (ep: %d) failed to remove delay result=%d\n", + clnt_hdl, result); + } else { + IPADBG("client (ep: %d) delay removed\n", + clnt_hdl); + ep->ep_delay_set = false; + } + } + + /* if stop_in_proc, lets wait for emptiness */ + for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) { + result = ipa3_is_xdci_channel_empty(ep, &is_empty); + if (result) + goto exit; + if (is_empty) + break; + udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC); + } + /* In case of empty, lets try to stop the channel again */ + if (is_empty) { + result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl, + &stop_in_proc); + if (result) { + IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + goto exit; + } + if (!stop_in_proc) + goto exit; + } + /* if still stop_in_proc or not empty, activate force clear */ + if (should_force_clear) { + result = ipa3_enable_force_clear(qmi_req_id, false, + source_pipe_bitmask); + if (result) { + struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 }; + + /* + * assuming here modem SSR\shutdown, AP can remove + * the delay in this case + */ + IPAERR( + "failed to force clear %d, remove delay from SCND reg\n" + , result); + ep_ctrl_scnd.endp_delay = false; + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_CTRL_SCND_n, clnt_hdl, + &ep_ctrl_scnd); + } + } + /* with force clear, wait for emptiness */ + for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) { + result = ipa3_is_xdci_channel_empty(ep, &is_empty); + if (result) + goto disable_force_clear_and_exit; + if (is_empty) + break; + + udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC); + } + /* try to stop for the last time */ + result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl, + &stop_in_proc); + if (result) { + IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + goto disable_force_clear_and_exit; + } + result = stop_in_proc ? -EFAULT : 0; + +disable_force_clear_and_exit: + if (should_force_clear) + ipa3_disable_force_clear(qmi_req_id); +exit: + if (remove_delay && ep->ep_delay_set) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = false; + result = ipa3_cfg_ep_ctrl(clnt_hdl, + &ep_cfg_ctrl); + if (result) { + IPAERR + ("client (ep: %d) failed to remove delay result=%d\n", + clnt_hdl, result); + } else { + IPADBG("client (ep: %d) delay removed\n", + clnt_hdl); + ep->ep_delay_set = false; + } + } + IPADBG("exit\n"); + return result; +} + +/* + * Set reset ep_delay for CLIENT PROD pipe + * Clocks, should be voted before calling this API + * locks should be taken before calling this API + */ + +int ipa3_set_reset_client_prod_pipe_delay(bool set_reset, + enum ipa_client_type client) +{ + int result = 0; + int pipe_idx; + struct ipa3_ep_context *ep; + struct ipa_ep_cfg_ctrl ep_ctrl; + + memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_ctrl.ipa_ep_delay = set_reset; + + if (IPA_CLIENT_IS_CONS(client)) { + IPAERR("client (%d) not PROD\n", client); + return -EINVAL; + } + + pipe_idx = ipa3_get_ep_mapping(client); + + if (pipe_idx == IPA_EP_NOT_ALLOCATED) { + IPAERR("client (%d) not valid\n", client); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[pipe_idx]; + + /* Setting delay on USB_PROD with skip_ep_cfg */ + client_lock_unlock_cb(pipe_idx, true); + if (ep->valid && ep->skip_ep_cfg) { + ep->ep_delay_set = ep_ctrl.ipa_ep_delay; + result = ipa3_cfg_ep_ctrl(pipe_idx, &ep_ctrl); + if (result) + IPAERR("client (ep: %d) failed result=%d\n", + pipe_idx, result); + else + IPADBG("client (ep: %d) success\n", pipe_idx); + } + client_lock_unlock_cb(pipe_idx, false); + return result; +} + +int ipa3_set_reset_client_cons_pipe_sus_holb(bool set_reset, + enum ipa_client_type client) +{ + int pipe_idx; + struct ipa3_ep_context *ep; + struct ipa_ep_cfg_ctrl ep_suspend; + struct ipa_ep_cfg_holb ep_holb; + + memset(&ep_suspend, 0, sizeof(ep_suspend)); + memset(&ep_holb, 0, sizeof(ep_holb)); + + ep_suspend.ipa_ep_suspend = set_reset; + ep_holb.tmr_val = 0; + ep_holb.en = set_reset; + + if (IPA_CLIENT_IS_PROD(client)) { + IPAERR("client (%d) not CONS\n", client); + return -EINVAL; + } + + pipe_idx = ipa3_get_ep_mapping(client); + + if (pipe_idx == IPA_EP_NOT_ALLOCATED) { + IPAERR("client (%d) not valid\n", client); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[pipe_idx]; + /* Setting sus/holb on MHI_CONS with skip_ep_cfg */ + client_lock_unlock_cb(pipe_idx, true); + if (ep->valid && ep->skip_ep_cfg) { + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_CTRL_n, + pipe_idx, &ep_suspend); + /* + * ipa3_cfg_ep_holb is not used here because we are + * setting HOLB on Q6 pipes, and from APPS perspective + * they are not valid, therefore, the above function + * will fail. + */ + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, + pipe_idx, &ep_holb); + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_HOL_BLOCK_EN_n, + pipe_idx, &ep_holb); + } + client_lock_unlock_cb(pipe_idx, false); + return 0; +} + +void ipa3_xdci_ep_delay_rm(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + int result; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (ep->ep_delay_set) { + + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = false; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP + (ipa3_get_client_mapping(clnt_hdl)); + + result = ipa3_cfg_ep_ctrl(clnt_hdl, + &ep_cfg_ctrl); + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP + (ipa3_get_client_mapping(clnt_hdl)); + + if (result) { + IPAERR + ("client (ep: %d) failed to remove delay result=%d\n", + clnt_hdl, result); + } else { + IPADBG("client (ep: %d) delay removed\n", + clnt_hdl); + ep->ep_delay_set = false; + } + } +} + +int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id) +{ + struct ipa3_ep_context *ep; + int result; + u32 source_pipe_bitmask = 0; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipa3_disable_data_path(clnt_hdl); + + if (!IPA_CLIENT_IS_CONS(ep->client)) { + IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + source_pipe_bitmask = 1 << + ipa3_get_ep_mapping(ep->client); + result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id, + source_pipe_bitmask, should_force_clear, clnt_hdl, + true); + if (result) { + IPAERR("Fail to stop UL channel with data drain\n"); + WARN_ON(1); + goto stop_chan_fail; + } + } else { + IPADBG("Stopping CONS channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + result = ipa3_stop_gsi_channel(clnt_hdl); + if (result) { + IPAERR("Error stopping channel (CONS client): %d\n", + result); + goto stop_chan_fail; + } + } + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("exit\n"); + return 0; + +stop_chan_fail: + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + +int ipa3_release_gsi_channel(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + int result = -EFAULT; + enum gsi_status gsi_res; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + gsi_res = gsi_dealloc_channel(ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error deallocating channel: %d\n", gsi_res); + goto dealloc_chan_fail; + } + + gsi_res = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error deallocating event: %d\n", gsi_res); + goto dealloc_chan_fail; + } + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) + ipa3_delete_dflt_flt_rules(clnt_hdl); + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context)); + + IPADBG("exit\n"); + return 0; + +dealloc_chan_fail: + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + +int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + bool should_force_clear, u32 qmi_req_id, bool is_dpl) +{ + struct ipa3_ep_context *ul_ep = NULL; + struct ipa3_ep_context *dl_ep; + int result = -EFAULT; + u32 source_pipe_bitmask = 0; + bool dl_data_pending = true; + bool ul_data_pending = true; + int i; + bool is_empty = false; + struct gsi_chan_info ul_gsi_chan_info, dl_gsi_chan_info; + int aggr_active_bitmap = 0; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + + /* In case of DPL, dl is the DPL channel/client */ + + IPADBG("entry\n"); + if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[dl_clnt_hdl].valid == 0 || + (!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + dl_ep = &ipa3_ctx->ep[dl_clnt_hdl]; + if (!is_dpl) + ul_ep = &ipa3_ctx->ep[ul_clnt_hdl]; + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl)); + + result = ipa3_get_gsi_chan_info(&dl_gsi_chan_info, + dl_ep->gsi_chan_hdl); + if (result) + goto disable_clk_and_exit; + + if (!is_dpl) { + result = ipa3_get_gsi_chan_info(&ul_gsi_chan_info, + ul_ep->gsi_chan_hdl); + if (result) + goto disable_clk_and_exit; + } + + for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) { + if (!dl_data_pending && !ul_data_pending) + break; + result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty); + if (result) + goto disable_clk_and_exit; + if (!is_empty) { + dl_data_pending = true; + break; + } + dl_data_pending = false; + if (!is_dpl) { + result = ipa3_is_xdci_channel_empty(ul_ep, &is_empty); + if (result) + goto disable_clk_and_exit; + ul_data_pending = !is_empty; + } else { + ul_data_pending = false; + } + + udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC); + } + + if (!dl_data_pending) { + aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE); + if (aggr_active_bitmap & (1 << dl_clnt_hdl)) { + IPADBG("DL/DPL data pending due to open aggr. frame\n"); + dl_data_pending = true; + } + } + if (dl_data_pending) { + IPAERR("DL/DPL data pending, can't suspend\n"); + result = -EFAULT; + goto disable_clk_and_exit; + } + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + /* Suspend the DL/DPL EP */ + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_suspend = true; + ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl); + } + + /* + * Check if DL/DPL channel is empty again, data could enter the channel + * before its IPA EP was suspended + */ + result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty); + if (result) + goto unsuspend_dl_and_exit; + if (!is_empty) { + IPAERR("DL/DPL data pending, can't suspend\n"); + result = -EFAULT; + goto unsuspend_dl_and_exit; + } + + /* Stop DL channel */ + result = ipa3_stop_gsi_channel(dl_clnt_hdl); + if (result) { + IPAERR("Error stopping DL/DPL channel: %d\n", result); + result = -EFAULT; + goto unsuspend_dl_and_exit; + } + + /* STOP UL channel */ + if (!is_dpl) { + source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client); + result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id, + source_pipe_bitmask, should_force_clear, ul_clnt_hdl, + false); + if (result) { + IPAERR("Error stopping UL channel: result = %d\n", + result); + goto start_dl_and_exit; + } + } + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl)); + + IPADBG("exit\n"); + return 0; + +start_dl_and_exit: + gsi_start_channel(dl_ep->gsi_chan_hdl); +unsuspend_dl_and_exit: + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + /* Unsuspend the DL EP */ + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_suspend = false; + ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl); + } +disable_clk_and_exit: + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl)); + return result; +} + +int ipa3_start_gsi_channel(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + int result = -EFAULT; + enum gsi_status gsi_res; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameters.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + gsi_res = gsi_start_channel(ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error starting channel: %d\n", gsi_res); + goto start_chan_fail; + } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("exit\n"); + return 0; + +start_chan_fail: + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + +int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl) +{ + struct ipa3_ep_context *ul_ep = NULL; + struct ipa3_ep_context *dl_ep = NULL; + enum gsi_status gsi_res; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + + /* In case of DPL, dl is the DPL channel/client */ + + IPADBG("entry\n"); + if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[dl_clnt_hdl].valid == 0 || + (!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + dl_ep = &ipa3_ctx->ep[dl_clnt_hdl]; + if (!is_dpl) + ul_ep = &ipa3_ctx->ep[ul_clnt_hdl]; + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl)); + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + /* Unsuspend the DL/DPL EP */ + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_suspend = false; + ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl); + } + + /* Start DL channel */ + gsi_res = gsi_start_channel(dl_ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) + IPAERR("Error starting DL channel: %d\n", gsi_res); + + /* Start UL channel */ + if (!is_dpl) { + gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) + IPAERR("Error starting UL channel: %d\n", gsi_res); + } + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl)); + + IPADBG("exit\n"); + return 0; +} +/** + * ipa3_clear_endpoint_delay() - Remove ep delay set on the IPA pipe before + * client disconnect. + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Should be called by the driver of the peripheral that wants to remove + * ep delay on IPA consumer ipe before disconnect in non GPI mode. this api + * expects caller to take responsibility to free any needed headers, routing + * and filtering tables and rules as needed. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_clear_endpoint_delay(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + struct ipa_ep_cfg_ctrl ep_ctrl = {0}; + struct ipa_enable_force_clear_datapath_req_msg_v01 req = {0}; + int res; + + if (unlikely(!ipa3_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ipa3_ctx->tethered_flow_control) { + IPADBG("APPS flow control is not enabled\n"); + /* Send a message to modem to disable flow control honoring. */ + req.request_id = clnt_hdl; + req.source_pipe_bitmask = 1 << clnt_hdl; + res = ipa3_qmi_enable_force_clear_datapath_send(&req); + if (res) { + IPADBG("enable_force_clear_datapath failed %d\n", + res); + } + ep->qmi_request_sent = true; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + /* Set disconnect in progress flag so further flow control events are + * not honored. + */ + spin_lock(&ipa3_ctx->disconnect_lock); + ep->disconnect_in_progress = true; + spin_unlock(&ipa3_ctx->disconnect_lock); + + /* If flow is disabled at this point, restore the ep state.*/ + ep_ctrl.ipa_ep_delay = false; + ep_ctrl.ipa_ep_suspend = false; + ipa3_cfg_ep_ctrl(clnt_hdl, &ep_ctrl); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl); + + return 0; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c new file mode 100644 index 000000000000..d59df68ed67c --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -0,0 +1,2372 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#ifdef CONFIG_DEBUG_FS + +#include +#include +#include +#include "ipa_i.h" +#include "../ipa_rm_i.h" +#include "ipahal/ipahal_nat.h" +#include "ipa_odl.h" + +#define IPA_MAX_ENTRY_STRING_LEN 500 +#define IPA_MAX_MSG_LEN 4096 +#define IPA_DBG_MAX_RULE_IN_TBL 128 +#define IPA_DBG_ACTIVE_CLIENT_BUF_SIZE ((IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN \ + * IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) + IPA_MAX_MSG_LEN) + +#define IPA_DUMP_STATUS_FIELD(f) \ + pr_err(#f "=0x%x\n", status->f) + +#define IPA_READ_ONLY_MODE 0444 +#define IPA_READ_WRITE_MODE 0664 +#define IPA_WRITE_ONLY_MODE 0220 + +struct ipa3_debugfs_file { + const char *name; + umode_t mode; + void *data; + const struct file_operations fops; +}; + + +const char *ipa3_event_name[] = { + __stringify(WLAN_CLIENT_CONNECT), + __stringify(WLAN_CLIENT_DISCONNECT), + __stringify(WLAN_CLIENT_POWER_SAVE_MODE), + __stringify(WLAN_CLIENT_NORMAL_MODE), + __stringify(SW_ROUTING_ENABLE), + __stringify(SW_ROUTING_DISABLE), + __stringify(WLAN_AP_CONNECT), + __stringify(WLAN_AP_DISCONNECT), + __stringify(WLAN_STA_CONNECT), + __stringify(WLAN_STA_DISCONNECT), + __stringify(WLAN_CLIENT_CONNECT_EX), + __stringify(WLAN_SWITCH_TO_SCC), + __stringify(WLAN_SWITCH_TO_MCC), + __stringify(WLAN_WDI_ENABLE), + __stringify(WLAN_WDI_DISABLE), + __stringify(WAN_UPSTREAM_ROUTE_ADD), + __stringify(WAN_UPSTREAM_ROUTE_DEL), + __stringify(WAN_EMBMS_CONNECT), + __stringify(WAN_XLAT_CONNECT), + __stringify(ECM_CONNECT), + __stringify(ECM_DISCONNECT), + __stringify(IPA_TETHERING_STATS_UPDATE_STATS), + __stringify(IPA_TETHERING_STATS_UPDATE_NETWORK_STATS), + __stringify(IPA_QUOTA_REACH), + __stringify(IPA_SSR_BEFORE_SHUTDOWN), + __stringify(IPA_SSR_AFTER_POWERUP), + __stringify(ADD_VLAN_IFACE), + __stringify(DEL_VLAN_IFACE), + __stringify(ADD_L2TP_VLAN_MAPPING), + __stringify(DEL_L2TP_VLAN_MAPPING), + __stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT), + __stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT), + __stringify(ADD_BRIDGE_VLAN_MAPPING), + __stringify(DEL_BRIDGE_VLAN_MAPPING), + __stringify(WLAN_FWR_SSR_BEFORE_SHUTDOWN), + __stringify(IPA_GSB_CONNECT), + __stringify(IPA_GSB_DISCONNECT), +}; + +const char *ipa3_hdr_l2_type_name[] = { + __stringify(IPA_HDR_L2_NONE), + __stringify(IPA_HDR_L2_ETHERNET_II), + __stringify(IPA_HDR_L2_802_3), +}; + +const char *ipa3_hdr_proc_type_name[] = { + __stringify(IPA_HDR_PROC_NONE), + __stringify(IPA_HDR_PROC_ETHII_TO_ETHII), + __stringify(IPA_HDR_PROC_ETHII_TO_802_3), + __stringify(IPA_HDR_PROC_802_3_TO_ETHII), + __stringify(IPA_HDR_PROC_802_3_TO_802_3), + __stringify(IPA_HDR_PROC_L2TP_HEADER_ADD), + __stringify(IPA_HDR_PROC_L2TP_HEADER_REMOVE), +}; + +static struct dentry *dent; +static char dbg_buff[IPA_MAX_MSG_LEN]; +static char *active_clients_buf; + +static s8 ep_reg_idx; +static void *ipa_ipc_low_buff; + + +static ssize_t ipa3_read_gen_reg(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + struct ipahal_reg_shared_mem_size smem_sz; + + memset(&smem_sz, 0, sizeof(smem_sz)); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz); + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA_VERSION=0x%x\n" + "IPA_COMP_HW_VERSION=0x%x\n" + "IPA_ROUTE=0x%x\n" + "IPA_SHARED_MEM_RESTRICTED=0x%x\n" + "IPA_SHARED_MEM_SIZE=0x%x\n" + "IPA_QTIME_TIMESTAMP_CFG=0x%x\n" + "IPA_TIMERS_PULSE_GRAN_CFG=0x%x\n" + "IPA_TIMERS_XO_CLK_DIV_CFG=0x%x\n", + ipahal_read_reg(IPA_VERSION), + ipahal_read_reg(IPA_COMP_HW_VERSION), + ipahal_read_reg(IPA_ROUTE), + smem_sz.shared_mem_baddr, + smem_sz.shared_mem_sz, + ipahal_read_reg(IPA_QTIME_TIMESTAMP_CFG), + ipahal_read_reg(IPA_TIMERS_PULSE_GRAN_CFG), + ipahal_read_reg(IPA_TIMERS_XO_CLK_DIV_CFG)); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa3_write_ep_holb(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct ipa_ep_cfg_holb holb; + u32 en; + u32 tmr_val; + u32 ep_idx; + unsigned long missing; + char *sptr, *token; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + + sptr = dbg_buff; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &ep_idx)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &en)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &tmr_val)) + return -EINVAL; + + holb.en = en; + holb.tmr_val = tmr_val; + + ipa3_cfg_ep_holb(ep_idx, &holb); + + return count; +} + +static ssize_t ipa3_write_ep_reg(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + if (option >= ipa3_ctx->ipa_num_pipes) { + IPAERR("bad pipe specified %u\n", option); + return count; + } + + ep_reg_idx = option; + + return count; +} + +/** + * _ipa_read_ep_reg_v3_0() - Reads and prints endpoint configuration registers + * + * Returns the number of characters printed + */ +int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe) +{ + return scnprintf( + dbg_buff, IPA_MAX_MSG_LEN, + "IPA_ENDP_INIT_NAT_%u=0x%x\n" + "IPA_ENDP_INIT_HDR_%u=0x%x\n" + "IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n" + "IPA_ENDP_INIT_MODE_%u=0x%x\n" + "IPA_ENDP_INIT_AGGR_%u=0x%x\n" + "IPA_ENDP_INIT_ROUTE_%u=0x%x\n" + "IPA_ENDP_INIT_CTRL_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_EN_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n" + "IPA_ENDP_INIT_DEAGGR_%u=0x%x\n" + "IPA_ENDP_INIT_CFG_%u=0x%x\n", + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_NAT_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_EXT_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_MODE_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_AGGR_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_ROUTE_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CTRL_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_DEAGGR_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe)); +} + +/** + * _ipa_read_ep_reg_v4_0() - Reads and prints endpoint configuration registers + * + * Returns the number of characters printed + * Removed IPA_ENDP_INIT_ROUTE_n from v3 + */ +int _ipa_read_ep_reg_v4_0(char *buf, int max_len, int pipe) +{ + return scnprintf( + dbg_buff, IPA_MAX_MSG_LEN, + "IPA_ENDP_INIT_NAT_%u=0x%x\n" + "IPA_ENDP_INIT_CONN_TRACK_n%u=0x%x\n" + "IPA_ENDP_INIT_HDR_%u=0x%x\n" + "IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n" + "IPA_ENDP_INIT_MODE_%u=0x%x\n" + "IPA_ENDP_INIT_AGGR_%u=0x%x\n" + "IPA_ENDP_INIT_CTRL_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_EN_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n" + "IPA_ENDP_INIT_DEAGGR_%u=0x%x\n" + "IPA_ENDP_INIT_CFG_%u=0x%x\n", + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_NAT_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CONN_TRACK_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_EXT_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_MODE_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_AGGR_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CTRL_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_DEAGGR_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe)); +} + +static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + int i; + int start_idx; + int end_idx; + int size = 0; + int ret; + loff_t pos; + + /* negative ep_reg_idx means all registers */ + if (ep_reg_idx < 0) { + start_idx = 0; + end_idx = ipa3_ctx->ipa_num_pipes; + } else { + start_idx = ep_reg_idx; + end_idx = start_idx + 1; + } + pos = *ppos; + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + for (i = start_idx; i < end_idx; i++) { + + nbytes = ipa3_ctx->ctrl->ipa3_read_ep_reg(dbg_buff, + IPA_MAX_MSG_LEN, i); + + *ppos = pos; + ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff, + nbytes); + if (ret < 0) { + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return ret; + } + + size += ret; + ubuf += nbytes; + count -= nbytes; + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + *ppos = pos + size; + return size; +} + +static ssize_t ipa3_write_keep_awake(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + if (option == 1) + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + else if (option == 0) + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + else + return -EFAULT; + + return count; +} + +static ssize_t ipa3_read_keep_awake(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + + mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex); + if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)) + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA APPS power state is ON\n"); + else + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA APPS power state is OFF\n"); + mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa3_read_hdr(struct file *file, char __user *ubuf, size_t count, + loff_t *ppos) +{ + int nbytes = 0; + int i = 0; + struct ipa3_hdr_entry *entry; + + mutex_lock(&ipa3_ctx->lock); + + if (ipa3_ctx->hdr_tbl_lcl) + pr_err("Table resides on local memory\n"); + else + pr_err("Table resides on system (ddr) memory\n"); + + list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list, + link) { + if (entry->cookie != IPA_HDR_COOKIE) + continue; + nbytes = scnprintf( + dbg_buff, + IPA_MAX_MSG_LEN, + "name:%s len=%d ref=%d partial=%d type=%s ", + entry->name, + entry->hdr_len, + entry->ref_cnt, + entry->is_partial, + ipa3_hdr_l2_type_name[entry->type]); + + if (entry->is_hdr_proc_ctx) { + nbytes += scnprintf( + dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "phys_base=0x%pa ", + &entry->phys_base); + } else { + nbytes += scnprintf( + dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "ofst=%u ", + entry->offset_entry->offset >> 2); + } + for (i = 0; i < entry->hdr_len; i++) { + scnprintf(dbg_buff + nbytes + i * 2, + IPA_MAX_MSG_LEN - nbytes - i * 2, + "%02x", entry->hdr[i]); + } + scnprintf(dbg_buff + nbytes + entry->hdr_len * 2, + IPA_MAX_MSG_LEN - nbytes - entry->hdr_len * 2, + "\n"); + pr_err("%s", dbg_buff); + } + mutex_unlock(&ipa3_ctx->lock); + + return 0; +} + +static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib, + enum ipa_ip_type ip) +{ + uint32_t addr[4]; + uint32_t mask[4]; + int i; + + if (attrib->attrib_mask & IPA_FLT_IS_PURE_ACK) + pr_err("is_pure_ack "); + + if (attrib->attrib_mask & IPA_FLT_TOS) + pr_err("tos:%d ", attrib->u.v4.tos); + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + pr_err("tos_value:%d ", attrib->tos_value); + pr_err("tos_mask:%d ", attrib->tos_mask); + } + + if (attrib->attrib_mask & IPA_FLT_PROTOCOL) + pr_err("protocol:%d ", attrib->u.v4.protocol); + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (ip == IPA_IP_v4) { + addr[0] = htonl(attrib->u.v4.src_addr); + mask[0] = htonl(attrib->u.v4.src_addr_mask); + pr_err( + "src_addr:%pI4 src_addr_mask:%pI4 ", + addr + 0, mask + 0); + } else if (ip == IPA_IP_v6) { + for (i = 0; i < 4; i++) { + addr[i] = htonl(attrib->u.v6.src_addr[i]); + mask[i] = htonl(attrib->u.v6.src_addr_mask[i]); + } + pr_err( + "src_addr:%pI6 src_addr_mask:%pI6 ", + addr + 0, mask + 0); + } + } + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (ip == IPA_IP_v4) { + addr[0] = htonl(attrib->u.v4.dst_addr); + mask[0] = htonl(attrib->u.v4.dst_addr_mask); + pr_err( + "dst_addr:%pI4 dst_addr_mask:%pI4 ", + addr + 0, mask + 0); + } else if (ip == IPA_IP_v6) { + for (i = 0; i < 4; i++) { + addr[i] = htonl(attrib->u.v6.dst_addr[i]); + mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]); + } + pr_err( + "dst_addr:%pI6 dst_addr_mask:%pI6 ", + addr + 0, mask + 0); + } + } + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + pr_err("src_port_range:%u %u ", + attrib->src_port_lo, + attrib->src_port_hi); + } + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + pr_err("dst_port_range:%u %u ", + attrib->dst_port_lo, + attrib->dst_port_hi); + } + if (attrib->attrib_mask & IPA_FLT_TYPE) + pr_err("type:%d ", attrib->type); + + if (attrib->attrib_mask & IPA_FLT_CODE) + pr_err("code:%d ", attrib->code); + + if (attrib->attrib_mask & IPA_FLT_SPI) + pr_err("spi:%x ", attrib->spi); + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) + pr_err("src_port:%u ", attrib->src_port); + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) + pr_err("dst_port:%u ", attrib->dst_port); + + if (attrib->attrib_mask & IPA_FLT_TC) + pr_err("tc:%d ", attrib->u.v6.tc); + + if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) + pr_err("flow_label:%x ", attrib->u.v6.flow_label); + + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) + pr_err("next_hdr:%d ", attrib->u.v6.next_hdr); + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + pr_err( + "metadata:%x metadata_mask:%x ", + attrib->meta_data, attrib->meta_data_mask); + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) + pr_err("frg "); + + if ((attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) || + (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3)) { + pr_err("src_mac_addr:%pM ", attrib->src_mac_addr); + } + + if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) || + (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) || + (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP)) { + pr_err("dst_mac_addr:%pM ", attrib->dst_mac_addr); + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) + pr_err("ether_type:%x ", attrib->ether_type); + + if (attrib->attrib_mask & IPA_FLT_TCP_SYN) + pr_err("tcp syn "); + + if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) + pr_err("tcp syn l2tp "); + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) + pr_err("l2tp inner ip type: %d ", attrib->type); + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) { + addr[0] = htonl(attrib->u.v4.dst_addr); + mask[0] = htonl(attrib->u.v4.dst_addr_mask); + pr_err("dst_addr:%pI4 dst_addr_mask:%pI4 ", addr, mask); + } + + pr_err("\n"); + return 0; +} + +static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib) +{ + uint8_t addr[16]; + uint8_t mask[16]; + int i; + int j; + + if (attrib->tos_eq_present) { + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) + pr_err("pure_ack "); + else + pr_err("tos:%d ", attrib->tos_eq); + } + + if (attrib->protocol_eq_present) + pr_err("protocol:%d ", attrib->protocol_eq); + + if (attrib->tc_eq_present) + pr_err("tc:%d ", attrib->tc_eq); + + if (attrib->num_offset_meq_128 > IPA_IPFLTR_NUM_MEQ_128_EQNS) { + IPAERR_RL("num_offset_meq_128 Max %d passed value %d\n", + IPA_IPFLTR_NUM_MEQ_128_EQNS, attrib->num_offset_meq_128); + return -EPERM; + } + + for (i = 0; i < attrib->num_offset_meq_128; i++) { + for (j = 0; j < 16; j++) { + addr[j] = attrib->offset_meq_128[i].value[j]; + mask[j] = attrib->offset_meq_128[i].mask[j]; + } + pr_err( + "(ofst_meq128: ofst:%d mask:%pI6 val:%pI6) ", + attrib->offset_meq_128[i].offset, + mask, addr); + } + + if (attrib->num_offset_meq_32 > IPA_IPFLTR_NUM_MEQ_32_EQNS) { + IPAERR_RL("num_offset_meq_32 Max %d passed value %d\n", + IPA_IPFLTR_NUM_MEQ_32_EQNS, attrib->num_offset_meq_32); + return -EPERM; + } + + for (i = 0; i < attrib->num_offset_meq_32; i++) + pr_err( + "(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ", + attrib->offset_meq_32[i].offset, + attrib->offset_meq_32[i].mask, + attrib->offset_meq_32[i].value); + + if (attrib->num_ihl_offset_meq_32 > IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS) { + IPAERR_RL("num_ihl_offset_meq_32 Max %d passed value %d\n", + IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS, attrib->num_ihl_offset_meq_32); + return -EPERM; + } + + for (i = 0; i < attrib->num_ihl_offset_meq_32; i++) + pr_err( + "(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ", + attrib->ihl_offset_meq_32[i].offset, + attrib->ihl_offset_meq_32[i].mask, + attrib->ihl_offset_meq_32[i].value); + + if (attrib->metadata_meq32_present) + pr_err( + "(metadata: ofst:%u mask:0x%x val:0x%x) ", + attrib->metadata_meq32.offset, + attrib->metadata_meq32.mask, + attrib->metadata_meq32.value); + + if (attrib->num_ihl_offset_range_16 > + IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS) { + IPAERR_RL("num_ihl_offset_range_16 Max %d passed value %d\n", + IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS, + attrib->num_ihl_offset_range_16); + return -EPERM; + } + + for (i = 0; i < attrib->num_ihl_offset_range_16; i++) + pr_err( + "(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ", + attrib->ihl_offset_range_16[i].offset, + attrib->ihl_offset_range_16[i].range_low, + attrib->ihl_offset_range_16[i].range_high); + + if (attrib->ihl_offset_eq_32_present) + pr_err( + "(ihl_ofst_eq32:%d val:0x%x) ", + attrib->ihl_offset_eq_32.offset, + attrib->ihl_offset_eq_32.value); + + if (attrib->ihl_offset_eq_16_present) + pr_err( + "(ihl_ofst_eq16:%d val:0x%x) ", + attrib->ihl_offset_eq_16.offset, + attrib->ihl_offset_eq_16.value); + + if (attrib->fl_eq_present) + pr_err("flow_label:%d ", attrib->fl_eq); + + if (attrib->ipv4_frag_eq_present) + pr_err("frag "); + + pr_err("\n"); + return 0; +} + +static int ipa3_open_dbg(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t ipa3_read_rt(struct file *file, char __user *ubuf, size_t count, + loff_t *ppos) +{ + int i = 0; + struct ipa3_rt_tbl *tbl; + struct ipa3_rt_entry *entry; + struct ipa3_rt_tbl_set *set; + enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data; + u32 ofst; + u32 ofst_words; + + set = &ipa3_ctx->rt_tbl_set[ip]; + + mutex_lock(&ipa3_ctx->lock); + + if (ip == IPA_IP_v6) { + if (ipa3_ctx->ip6_rt_tbl_hash_lcl) + pr_err("Hashable table resides on local memory\n"); + else + pr_err("Hashable table resides on system (ddr) memory\n"); + if (ipa3_ctx->ip6_rt_tbl_nhash_lcl) + pr_err("Non-Hashable table resides on local memory\n"); + else + pr_err("Non-Hashable table resides on system (ddr) memory\n"); + } else if (ip == IPA_IP_v4) { + if (ipa3_ctx->ip4_rt_tbl_hash_lcl) + pr_err("Hashable table resides on local memory\n"); + else + pr_err("Hashable table resides on system (ddr) memory\n"); + if (ipa3_ctx->ip4_rt_tbl_nhash_lcl) + pr_err("Non-Hashable table resides on local memory\n"); + else + pr_err("Non-Hashable table resides on system (ddr) memory\n"); + } + + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + i = 0; + list_for_each_entry(entry, &tbl->head_rt_rule_list, link) { + if (entry->proc_ctx) { + ofst = entry->proc_ctx->offset_entry->offset; + ofst_words = + (ofst + + ipa3_ctx->hdr_proc_ctx_tbl.start_offset) + >> 5; + + pr_err("tbl_idx:%d tbl_name:%s tbl_ref:%u ", + entry->tbl->idx, entry->tbl->name, + entry->tbl->ref_cnt); + pr_err("rule_idx:%d dst:%d ep:%d S:%u ", + i, entry->rule.dst, + ipa3_get_ep_mapping(entry->rule.dst), + !ipa3_ctx->hdr_proc_ctx_tbl_lcl); + pr_err("proc_ctx[32B]:%u attrib_mask:%08x ", + ofst_words, + entry->rule.attrib.attrib_mask); + pr_err("rule_id:%u max_prio:%u prio:%u ", + entry->rule_id, entry->rule.max_prio, + entry->prio); + pr_err("hashable:%u retain_hdr:%u ", + entry->rule.hashable, + entry->rule.retain_hdr); + } else { + if (entry->hdr) + ofst = entry->hdr->offset_entry->offset; + else + ofst = 0; + + pr_err("tbl_idx:%d tbl_name:%s tbl_ref:%u ", + entry->tbl->idx, entry->tbl->name, + entry->tbl->ref_cnt); + pr_err("rule_idx:%d dst:%d ep:%d S:%u ", + i, entry->rule.dst, + ipa3_get_ep_mapping(entry->rule.dst), + !ipa3_ctx->hdr_tbl_lcl); + pr_err("hdr_ofst[words]:%u attrib_mask:%08x ", + ofst >> 2, + entry->rule.attrib.attrib_mask); + pr_err("rule_id:%u max_prio:%u prio:%u ", + entry->rule_id, entry->rule.max_prio, + entry->prio); + pr_err("hashable:%u retain_hdr:%u ", + entry->rule.hashable, + entry->rule.retain_hdr); + } + + ipa3_attrib_dump(&entry->rule.attrib, ip); + i++; + } + } + mutex_unlock(&ipa3_ctx->lock); + + return 0; +} + +static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data; + int tbls_num; + int rules_num; + int tbl; + int rl; + int res = 0; + struct ipahal_rt_rule_entry *rules = NULL; + + switch (ip) { + case IPA_IP_v4: + tbls_num = IPA_MEM_PART(v4_rt_num_index); + break; + case IPA_IP_v6: + tbls_num = IPA_MEM_PART(v6_rt_num_index); + break; + default: + IPAERR("ip type error %d\n", ip); + return -EINVAL; + } + + IPADBG("Tring to parse %d H/W routing tables - IP=%d\n", tbls_num, ip); + + rules = kzalloc(sizeof(*rules) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL); + if (!rules) { + IPAERR("failed to allocate mem for tbl rules\n"); + return -ENOMEM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + mutex_lock(&ipa3_ctx->lock); + + for (tbl = 0 ; tbl < tbls_num ; tbl++) { + pr_err("=== Routing Table %d = Hashable Rules ===\n", tbl); + rules_num = IPA_DBG_MAX_RULE_IN_TBL; + res = ipa3_rt_read_tbl_from_hw(tbl, ip, true, rules, + &rules_num); + if (res) { + pr_err("ERROR - Check the logs\n"); + IPAERR("failed reading tbl from hw\n"); + goto bail; + } + if (!rules_num) + pr_err("-->No rules. Empty tbl or modem system table\n"); + + for (rl = 0 ; rl < rules_num ; rl++) { + pr_err("rule_idx:%d dst ep:%d L:%u ", + rl, rules[rl].dst_pipe_idx, rules[rl].hdr_lcl); + + if (rules[rl].hdr_type == IPAHAL_RT_RULE_HDR_PROC_CTX) + pr_err("proc_ctx:%u attrib_mask:%08x ", + rules[rl].hdr_ofst, + rules[rl].eq_attrib.rule_eq_bitmap); + else + pr_err("hdr_ofst:%u attrib_mask:%08x ", + rules[rl].hdr_ofst, + rules[rl].eq_attrib.rule_eq_bitmap); + + pr_err("rule_id:%u prio:%u retain_hdr:%u ", + rules[rl].id, rules[rl].priority, + rules[rl].retain_hdr); + res = ipa3_attrib_dump_eq(&rules[rl].eq_attrib); + if (res) { + IPAERR_RL("failed read attrib eq\n"); + goto bail; + } + } + + pr_err("=== Routing Table %d = Non-Hashable Rules ===\n", tbl); + rules_num = IPA_DBG_MAX_RULE_IN_TBL; + res = ipa3_rt_read_tbl_from_hw(tbl, ip, false, rules, + &rules_num); + if (res) { + pr_err("ERROR - Check the logs\n"); + IPAERR("failed reading tbl from hw\n"); + goto bail; + } + if (!rules_num) + pr_err("-->No rules. Empty tbl or modem system table\n"); + + for (rl = 0 ; rl < rules_num ; rl++) { + pr_err("rule_idx:%d dst ep:%d L:%u ", + rl, rules[rl].dst_pipe_idx, rules[rl].hdr_lcl); + + if (rules[rl].hdr_type == IPAHAL_RT_RULE_HDR_PROC_CTX) + pr_err("proc_ctx:%u attrib_mask:%08x ", + rules[rl].hdr_ofst, + rules[rl].eq_attrib.rule_eq_bitmap); + else + pr_err("hdr_ofst:%u attrib_mask:%08x ", + rules[rl].hdr_ofst, + rules[rl].eq_attrib.rule_eq_bitmap); + + pr_err("rule_id:%u prio:%u retain_hdr:%u\n", + rules[rl].id, rules[rl].priority, + rules[rl].retain_hdr); + res = ipa3_attrib_dump_eq(&rules[rl].eq_attrib); + if (res) { + IPAERR_RL("failed read attrib eq\n"); + goto bail; + } + } + pr_err("\n"); + } + +bail: + mutex_unlock(&ipa3_ctx->lock); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + kfree(rules); + return res; +} + +static ssize_t ipa3_read_proc_ctx(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes = 0; + struct ipa3_hdr_proc_ctx_tbl *tbl; + struct ipa3_hdr_proc_ctx_entry *entry; + u32 ofst_words; + + tbl = &ipa3_ctx->hdr_proc_ctx_tbl; + + mutex_lock(&ipa3_ctx->lock); + + if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) + pr_info("Table resides on local memory\n"); + else + pr_info("Table resides on system(ddr) memory\n"); + + list_for_each_entry(entry, &tbl->head_proc_ctx_entry_list, link) { + ofst_words = (entry->offset_entry->offset + + ipa3_ctx->hdr_proc_ctx_tbl.start_offset) + >> 5; + if (entry->hdr->is_hdr_proc_ctx) { + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "id:%u hdr_proc_type:%s proc_ctx[32B]:%u ", + entry->id, + ipa3_hdr_proc_type_name[entry->type], + ofst_words); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "hdr_phys_base:0x%pa\n", + &entry->hdr->phys_base); + } else { + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "id:%u hdr_proc_type:%s proc_ctx[32B]:%u ", + entry->id, + ipa3_hdr_proc_type_name[entry->type], + ofst_words); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "hdr[words]:%u\n", + entry->hdr->offset_entry->offset >> 2); + } + } + mutex_unlock(&ipa3_ctx->lock); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count, + loff_t *ppos) +{ + int i; + int j; + struct ipa3_flt_tbl *tbl; + struct ipa3_flt_entry *entry; + enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data; + struct ipa3_rt_tbl *rt_tbl; + u32 rt_tbl_idx; + u32 bitmap; + bool eq; + int res = 0; + + mutex_lock(&ipa3_ctx->lock); + + for (j = 0; j < ipa3_ctx->ipa_num_pipes; j++) { + if (!ipa_is_ep_support_flt(j)) + continue; + tbl = &ipa3_ctx->flt_tbl[j][ip]; + i = 0; + list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { + if (entry->cookie != IPA_FLT_COOKIE) + continue; + if (entry->rule.eq_attrib_type) { + rt_tbl_idx = entry->rule.rt_tbl_idx; + bitmap = entry->rule.eq_attrib.rule_eq_bitmap; + eq = true; + } else { + rt_tbl = ipa3_id_find(entry->rule.rt_tbl_hdl); + if (rt_tbl == NULL || + rt_tbl->cookie != IPA_RT_TBL_COOKIE) + rt_tbl_idx = ~0; + else + rt_tbl_idx = rt_tbl->idx; + bitmap = entry->rule.attrib.attrib_mask; + eq = false; + } + pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ", + j, i, entry->rule.action, rt_tbl_idx); + pr_err("attrib_mask:%08x retain_hdr:%d eq:%d ", + bitmap, entry->rule.retain_hdr, eq); + pr_err("hashable:%u rule_id:%u max_prio:%u prio:%u ", + entry->rule.hashable, entry->rule_id, + entry->rule.max_prio, entry->prio); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + pr_err("pdn index %d, set metadata %d ", + entry->rule.pdn_idx, + entry->rule.set_metadata); + if (eq) { + res = ipa3_attrib_dump_eq( + &entry->rule.eq_attrib); + if (res) { + IPAERR_RL("failed read attrib eq\n"); + goto bail; + } + } else + ipa3_attrib_dump( + &entry->rule.attrib, ip); + i++; + } + } +bail: + mutex_unlock(&ipa3_ctx->lock); + + return res; +} + +static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int pipe; + int rl; + int rules_num; + struct ipahal_flt_rule_entry *rules; + enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data; + u32 rt_tbl_idx; + u32 bitmap; + int res = 0; + + IPADBG("Tring to parse %d H/W filtering tables - IP=%d\n", + ipa3_ctx->ep_flt_num, ip); + + rules = kzalloc(sizeof(*rules) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL); + if (!rules) + return -ENOMEM; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + mutex_lock(&ipa3_ctx->lock); + for (pipe = 0; pipe < ipa3_ctx->ipa_num_pipes; pipe++) { + if (!ipa_is_ep_support_flt(pipe)) + continue; + pr_err("=== Filtering Table ep:%d = Hashable Rules ===\n", + pipe); + rules_num = IPA_DBG_MAX_RULE_IN_TBL; + res = ipa3_flt_read_tbl_from_hw(pipe, ip, true, rules, + &rules_num); + if (res) { + pr_err("ERROR - Check the logs\n"); + IPAERR("failed reading tbl from hw\n"); + goto bail; + } + if (!rules_num) + pr_err("-->No rules. Empty tbl or modem sys table\n"); + + for (rl = 0; rl < rules_num; rl++) { + rt_tbl_idx = rules[rl].rule.rt_tbl_idx; + bitmap = rules[rl].rule.eq_attrib.rule_eq_bitmap; + pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ", + pipe, rl, rules[rl].rule.action, rt_tbl_idx); + pr_err("attrib_mask:%08x retain_hdr:%d ", + bitmap, rules[rl].rule.retain_hdr); + pr_err("rule_id:%u prio:%u ", + rules[rl].id, rules[rl].priority); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + pr_err("pdn: %u, set_metadata: %u ", + rules[rl].rule.pdn_idx, + rules[rl].rule.set_metadata); + res = ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib); + if (res) { + IPAERR_RL("failed read attrib eq\n"); + goto bail; + } + } + + pr_err("=== Filtering Table ep:%d = Non-Hashable Rules ===\n", + pipe); + rules_num = IPA_DBG_MAX_RULE_IN_TBL; + res = ipa3_flt_read_tbl_from_hw(pipe, ip, false, rules, + &rules_num); + if (res) { + IPAERR("failed reading tbl from hw\n"); + goto bail; + } + if (!rules_num) + pr_err("-->No rules. Empty tbl or modem sys table\n"); + for (rl = 0; rl < rules_num; rl++) { + rt_tbl_idx = rules[rl].rule.rt_tbl_idx; + bitmap = rules[rl].rule.eq_attrib.rule_eq_bitmap; + pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ", + pipe, rl, rules[rl].rule.action, rt_tbl_idx); + pr_err("attrib_mask:%08x retain_hdr:%d ", + bitmap, rules[rl].rule.retain_hdr); + pr_err("rule_id:%u prio:%u ", + rules[rl].id, rules[rl].priority); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + pr_err("pdn: %u, set_metadata: %u ", + rules[rl].rule.pdn_idx, + rules[rl].rule.set_metadata); + res = ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib); + if (res) { + IPAERR_RL("failed read attrib eq\n"); + goto bail; + } + } + pr_err("\n"); + } + +bail: + mutex_unlock(&ipa3_ctx->lock); + kfree(rules); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + int i; + int cnt = 0; + uint connect = 0; + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) + connect |= (ipa3_ctx->ep[i].valid << i); + + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "sw_tx=%u\n" + "hw_tx=%u\n" + "tx_non_linear=%u\n" + "tx_compl=%u\n" + "wan_rx=%u\n" + "stat_compl=%u\n" + "lan_aggr_close=%u\n" + "wan_aggr_close=%u\n" + "act_clnt=%u\n" + "con_clnt_bmap=0x%x\n" + "wan_rx_empty=%u\n" + "wan_repl_rx_empty=%u\n" + "lan_rx_empty=%u\n" + "lan_repl_rx_empty=%u\n" + "flow_enable=%u\n" + "flow_disable=%u\n", + ipa3_ctx->stats.tx_sw_pkts, + ipa3_ctx->stats.tx_hw_pkts, + ipa3_ctx->stats.tx_non_linear, + ipa3_ctx->stats.tx_pkts_compl, + ipa3_ctx->stats.rx_pkts, + ipa3_ctx->stats.stat_compl, + ipa3_ctx->stats.aggr_close, + ipa3_ctx->stats.wan_aggr_close, + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt), + connect, + ipa3_ctx->stats.wan_rx_empty, + ipa3_ctx->stats.wan_repl_rx_empty, + ipa3_ctx->stats.lan_rx_empty, + ipa3_ctx->stats.lan_repl_rx_empty, + ipa3_ctx->stats.flow_enable, + ipa3_ctx->stats.flow_disable); + cnt += nbytes; + + for (i = 0; i < IPAHAL_PKT_STATUS_EXCEPTION_MAX; i++) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, + "lan_rx_excp[%u:%20s]=%u\n", i, + ipahal_pkt_status_exception_str(i), + ipa3_ctx->stats.rx_excp_pkts[i]); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa3_read_odlstats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + int cnt = 0; + + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "ODL received pkt =%u\n" + "ODL processed pkt to DIAG=%u\n" + "ODL dropped pkt =%u\n" + "ODL packet in queue =%u\n", + ipa3_odl_ctx->stats.odl_rx_pkt, + ipa3_odl_ctx->stats.odl_tx_diag_pkt, + ipa3_odl_ctx->stats.odl_drop_pkt, + atomic_read(&ipa3_odl_ctx->stats.numer_in_queue)); + + cnt += nbytes; + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa3_read_wstats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + +#define HEAD_FRMT_STR "%25s\n" +#define FRMT_STR "%25s %10u\n" +#define FRMT_STR1 "%25s %10u\n\n" + + int cnt = 0; + int nbytes; + int ipa_ep_idx; + enum ipa_client_type client = IPA_CLIENT_WLAN1_PROD; + struct ipa3_ep_context *ep; + + do { + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + HEAD_FRMT_STR, "Client IPA_CLIENT_WLAN1_PROD Stats:"); + cnt += nbytes; + + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + break; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + if (ep->valid != 1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + break; + } + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Avail Fifo Desc:", + atomic_read(&ep->avail_fifo_desc)); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx Pkts Rcvd:", ep->wstats.rx_pkts_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx Pkts Status Rcvd:", + ep->wstats.rx_pkts_status_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx DH Rcvd:", ep->wstats.rx_hd_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx DH Processed:", + ep->wstats.rx_hd_processed); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx DH Sent Back:", ep->wstats.rx_hd_reply); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx Pkt Leak:", ep->wstats.rx_pkt_leak); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR1, "Rx DP Fail:", ep->wstats.rx_dp_fail); + cnt += nbytes; + + } while (0); + + client = IPA_CLIENT_WLAN1_CONS; + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN1_CONS Stats:"); + cnt += nbytes; + while (1) { + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + goto nxt_clnt_cons; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + if (ep->valid != 1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + goto nxt_clnt_cons; + } + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Tx Pkts Received:", ep->wstats.tx_pkts_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Tx Pkts Sent:", ep->wstats.tx_pkts_sent); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR1, "Tx Pkts Dropped:", + ep->wstats.tx_pkts_dropped); + cnt += nbytes; + +nxt_clnt_cons: + switch (client) { + case IPA_CLIENT_WLAN1_CONS: + client = IPA_CLIENT_WLAN2_CONS; + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN2_CONS Stats:"); + cnt += nbytes; + continue; + case IPA_CLIENT_WLAN2_CONS: + client = IPA_CLIENT_WLAN3_CONS; + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN3_CONS Stats:"); + cnt += nbytes; + continue; + case IPA_CLIENT_WLAN3_CONS: + client = IPA_CLIENT_WLAN4_CONS; + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN4_CONS Stats:"); + cnt += nbytes; + continue; + case IPA_CLIENT_WLAN4_CONS: + default: + break; + } + break; + } + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "\n"HEAD_FRMT_STR, "All Wlan Consumer pipes stats:"); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR, + "Tx Comm Buff Allocated:", + ipa3_ctx->wc_memb.wlan_comm_total_cnt); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR, + "Tx Comm Buff Avail:", ipa3_ctx->wc_memb.wlan_comm_free_cnt); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR1, + "Total Tx Pkts Freed:", ipa3_ctx->wc_memb.total_tx_pkts_freed); + cnt += nbytes; + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa3_read_ntn(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ +#define TX_STATS(y) \ + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y +#define RX_STATS(y) \ + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y + + struct Ipa3HwStatsNTNInfoData_t stats; + int nbytes; + int cnt = 0; + + if (!ipa3_get_ntn_stats(&stats)) { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "TX num_pkts_processed=%u\n" + "TX ringFull=%u\n" + "TX ringEmpty=%u\n" + "TX ringUsageHigh=%u\n" + "TX ringUsageLow=%u\n" + "TX RingUtilCount=%u\n" + "TX bamFifoFull=%u\n" + "TX bamFifoEmpty=%u\n" + "TX bamFifoUsageHigh=%u\n" + "TX bamFifoUsageLow=%u\n" + "TX bamUtilCount=%u\n" + "TX num_db=%u\n" + "TX num_qmb_int_handled=%u\n" + "TX ipa_pipe_number=%u\n", + TX_STATS(num_pkts_processed), + TX_STATS(ring_stats.ringFull), + TX_STATS(ring_stats.ringEmpty), + TX_STATS(ring_stats.ringUsageHigh), + TX_STATS(ring_stats.ringUsageLow), + TX_STATS(ring_stats.RingUtilCount), + TX_STATS(gsi_stats.bamFifoFull), + TX_STATS(gsi_stats.bamFifoEmpty), + TX_STATS(gsi_stats.bamFifoUsageHigh), + TX_STATS(gsi_stats.bamFifoUsageLow), + TX_STATS(gsi_stats.bamUtilCount), + TX_STATS(num_db), + TX_STATS(num_qmb_int_handled), + TX_STATS(ipa_pipe_number)); + cnt += nbytes; + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "RX num_pkts_processed=%u\n" + "RX ringFull=%u\n" + "RX ringEmpty=%u\n" + "RX ringUsageHigh=%u\n" + "RX ringUsageLow=%u\n" + "RX RingUtilCount=%u\n" + "RX bamFifoFull=%u\n" + "RX bamFifoEmpty=%u\n" + "RX bamFifoUsageHigh=%u\n" + "RX bamFifoUsageLow=%u\n" + "RX bamUtilCount=%u\n" + "RX num_db=%u\n" + "RX num_qmb_int_handled=%u\n" + "RX ipa_pipe_number=%u\n", + RX_STATS(num_pkts_processed), + RX_STATS(ring_stats.ringFull), + RX_STATS(ring_stats.ringEmpty), + RX_STATS(ring_stats.ringUsageHigh), + RX_STATS(ring_stats.ringUsageLow), + RX_STATS(ring_stats.RingUtilCount), + RX_STATS(gsi_stats.bamFifoFull), + RX_STATS(gsi_stats.bamFifoEmpty), + RX_STATS(gsi_stats.bamFifoUsageHigh), + RX_STATS(gsi_stats.bamFifoUsageLow), + RX_STATS(gsi_stats.bamUtilCount), + RX_STATS(num_db), + RX_STATS(num_qmb_int_handled), + RX_STATS(ipa_pipe_number)); + cnt += nbytes; + } else { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "Fail to read NTN stats\n"); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa3_read_wdi(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct IpaHwStatsWDIInfoData_t stats; + int nbytes; + int cnt = 0; + struct IpaHwStatsWDITxInfoData_t *tx_ch_ptr; + + if (!ipa3_get_wdi_stats(&stats)) { + tx_ch_ptr = &stats.tx_ch_stats; + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "TX num_pkts_processed=%u\n" + "TX copy_engine_doorbell_value=%u\n" + "TX num_db_fired=%u\n" + "TX ringFull=%u\n" + "TX ringEmpty=%u\n" + "TX ringUsageHigh=%u\n" + "TX ringUsageLow=%u\n" + "TX RingUtilCount=%u\n" + "TX bamFifoFull=%u\n" + "TX bamFifoEmpty=%u\n" + "TX bamFifoUsageHigh=%u\n" + "TX bamFifoUsageLow=%u\n" + "TX bamUtilCount=%u\n" + "TX num_db=%u\n" + "TX num_unexpected_db=%u\n" + "TX num_bam_int_handled=%u\n" + "TX num_bam_int_in_non_running_state=%u\n" + "TX num_qmb_int_handled=%u\n" + "TX num_bam_int_handled_while_wait_for_bam=%u\n", + tx_ch_ptr->num_pkts_processed, + tx_ch_ptr->copy_engine_doorbell_value, + tx_ch_ptr->num_db_fired, + tx_ch_ptr->tx_comp_ring_stats.ringFull, + tx_ch_ptr->tx_comp_ring_stats.ringEmpty, + tx_ch_ptr->tx_comp_ring_stats.ringUsageHigh, + tx_ch_ptr->tx_comp_ring_stats.ringUsageLow, + tx_ch_ptr->tx_comp_ring_stats.RingUtilCount, + tx_ch_ptr->bam_stats.bamFifoFull, + tx_ch_ptr->bam_stats.bamFifoEmpty, + tx_ch_ptr->bam_stats.bamFifoUsageHigh, + tx_ch_ptr->bam_stats.bamFifoUsageLow, + tx_ch_ptr->bam_stats.bamUtilCount, + tx_ch_ptr->num_db, + tx_ch_ptr->num_unexpected_db, + tx_ch_ptr->num_bam_int_handled, + tx_ch_ptr->num_bam_int_in_non_running_state, + tx_ch_ptr->num_qmb_int_handled, + tx_ch_ptr->num_bam_int_handled_while_wait_for_bam); + cnt += nbytes; + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "RX max_outstanding_pkts=%u\n" + "RX num_pkts_processed=%u\n" + "RX rx_ring_rp_value=%u\n" + "RX ringFull=%u\n" + "RX ringEmpty=%u\n" + "RX ringUsageHigh=%u\n" + "RX ringUsageLow=%u\n" + "RX RingUtilCount=%u\n" + "RX bamFifoFull=%u\n" + "RX bamFifoEmpty=%u\n" + "RX bamFifoUsageHigh=%u\n" + "RX bamFifoUsageLow=%u\n" + "RX bamUtilCount=%u\n" + "RX num_bam_int_handled=%u\n" + "RX num_db=%u\n" + "RX num_unexpected_db=%u\n" + "RX num_pkts_in_dis_uninit_state=%u\n" + "RX num_ic_inj_vdev_change=%u\n" + "RX num_ic_inj_fw_desc_change=%u\n" + "RX num_qmb_int_handled=%u\n" + "RX reserved1=%u\n" + "RX reserved2=%u\n", + stats.rx_ch_stats.max_outstanding_pkts, + stats.rx_ch_stats.num_pkts_processed, + stats.rx_ch_stats.rx_ring_rp_value, + stats.rx_ch_stats.rx_ind_ring_stats.ringFull, + stats.rx_ch_stats.rx_ind_ring_stats.ringEmpty, + stats.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh, + stats.rx_ch_stats.rx_ind_ring_stats.ringUsageLow, + stats.rx_ch_stats.rx_ind_ring_stats.RingUtilCount, + stats.rx_ch_stats.bam_stats.bamFifoFull, + stats.rx_ch_stats.bam_stats.bamFifoEmpty, + stats.rx_ch_stats.bam_stats.bamFifoUsageHigh, + stats.rx_ch_stats.bam_stats.bamFifoUsageLow, + stats.rx_ch_stats.bam_stats.bamUtilCount, + stats.rx_ch_stats.num_bam_int_handled, + stats.rx_ch_stats.num_db, + stats.rx_ch_stats.num_unexpected_db, + stats.rx_ch_stats.num_pkts_in_dis_uninit_state, + stats.rx_ch_stats.num_ic_inj_vdev_change, + stats.rx_ch_stats.num_ic_inj_fw_desc_change, + stats.rx_ch_stats.num_qmb_int_handled, + stats.rx_ch_stats.reserved1, + stats.rx_ch_stats.reserved2); + cnt += nbytes; + } else { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "Fail to read WDI stats\n"); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa3_write_dbg_cnt(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long missing; + u32 option = 0; + struct ipahal_reg_debug_cnt_ctrl dbg_cnt_ctrl; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + IPAERR("IPA_DEBUG_CNT_CTRL is not supported in IPA 4.0\n"); + return -EPERM; + } + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtou32(dbg_buff, 0, &option)) + return -EFAULT; + + memset(&dbg_cnt_ctrl, 0, sizeof(dbg_cnt_ctrl)); + dbg_cnt_ctrl.type = DBG_CNT_TYPE_GENERAL; + dbg_cnt_ctrl.product = true; + dbg_cnt_ctrl.src_pipe = 0xff; + dbg_cnt_ctrl.rule_idx_pipe_rule = false; + dbg_cnt_ctrl.rule_idx = 0; + if (option == 1) + dbg_cnt_ctrl.en = true; + else + dbg_cnt_ctrl.en = false; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipahal_write_reg_n_fields(IPA_DEBUG_CNT_CTRL_n, 0, &dbg_cnt_ctrl); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return count; +} + +static ssize_t ipa3_read_dbg_cnt(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + u32 regval; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + IPAERR("IPA_DEBUG_CNT_REG is not supported in IPA 4.0\n"); + return -EPERM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + regval = + ipahal_read_reg_n(IPA_DEBUG_CNT_REG_n, 0); + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA_DEBUG_CNT_REG_0=0x%x\n", regval); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa3_read_msg(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + int cnt = 0; + int i; + + for (i = 0; i < IPA_EVENT_MAX_NUM; i++) { + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "msg[%u:%27s] W:%u R:%u\n", i, + ipa3_event_name[i], + ipa3_ctx->stats.msg_w[i], + ipa3_ctx->stats.msg_r[i]); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static int ipa3_read_table( + char *table_addr, u32 table_size, + char *buff, u32 buff_size, + u32 *total_num_entries, + u32 *rule_id, + enum ipahal_nat_type nat_type) +{ + int result; + char *entry; + size_t entry_size; + bool entry_zeroed; + bool entry_valid; + u32 i, num_entries = 0, id = *rule_id, pos = 0; + + IPADBG("\n"); + + if (table_addr == NULL) + return 0; + + result = ipahal_nat_entry_size(nat_type, &entry_size); + if (result) { + IPAERR("Failed to retrieve size of %s entry\n", + ipahal_nat_type_str(nat_type)); + return 0; + } + + for (i = 0, entry = table_addr; + i < table_size; + ++i, ++id, entry += entry_size) { + result = ipahal_nat_is_entry_zeroed(nat_type, entry, + &entry_zeroed); + if (result) { + IPAERR( + "Failed to determine whether the %s entry is definitely zero\n" + , ipahal_nat_type_str(nat_type)); + goto bail; + } + if (entry_zeroed) + continue; + + result = ipahal_nat_is_entry_valid(nat_type, entry, + &entry_valid); + if (result) { + IPAERR( + "Failed to determine whether the %s entry is valid\n" + , ipahal_nat_type_str(nat_type)); + goto bail; + } + + if (entry_valid) { + ++num_entries; + pos += scnprintf(buff + pos, buff_size - pos, + "\tEntry_Index=%d\n", id); + } else { + pos += scnprintf(buff + pos, buff_size - pos, + "\tEntry_Index=%d - Invalid Entry\n", id); + } + + pos += ipahal_nat_stringify_entry(nat_type, entry, + buff + pos, buff_size - pos); + } + + if (num_entries) + pos += scnprintf(buff + pos, buff_size - pos, "\n"); + else + pos += scnprintf(buff + pos, buff_size - pos, "\tEmpty\n\n"); + + IPADBG("return\n"); +bail: + *rule_id = id; + *total_num_entries += num_entries; + return pos; +} + +static int ipa3_start_read_memory_device( + struct ipa3_nat_ipv6ct_common_mem *dev, + char *buff, u32 buff_size, + enum ipahal_nat_type nat_type, + u32 *num_entries) +{ + u32 rule_id = 0, pos = 0; + + IPADBG("\n"); + + pos += scnprintf(buff + pos, buff_size - pos, "%s_Table_Size=%d\n", + dev->name, dev->table_entries + 1); + + pos += scnprintf(buff + pos, buff_size - pos, + "%s_Expansion_Table_Size=%d\n", + dev->name, dev->expn_table_entries); + + if (!dev->is_sys_mem) + pos += scnprintf(buff + pos, buff_size - pos, + "Not supported for local(shared) memory\n"); + + pos += scnprintf(buff + pos, buff_size - pos, + "\n%s Base Table:\n", dev->name); + pos += ipa3_read_table(dev->base_table_addr, dev->table_entries + 1, + buff + pos, buff_size - pos, num_entries, &rule_id, nat_type); + + pos += scnprintf(buff + pos, buff_size - pos, + "%s Expansion Table:\n", dev->name); + pos += ipa3_read_table( + dev->expansion_table_addr, dev->expn_table_entries, + buff + pos, buff_size - pos, + num_entries, + &rule_id, + nat_type); + + IPADBG("return\n"); + return pos; +} + +static int ipa3_finish_read_memory_device( + struct ipa3_nat_ipv6ct_common_mem *dev, + char *buff, u32 buff_size, + u32 curr_pos, + u32 num_entries) +{ + u32 pos = 0; + + IPADBG("\n"); + + /* + * A real buffer and buff size, so need to use the + * real current position + */ + pos += scnprintf(buff + curr_pos, buff_size - curr_pos, + "Overall number %s entries: %d\n\n", dev->name, num_entries); + + if (curr_pos + pos >= buff_size - 1) + IPAERR( + "The %s debug information is larger than the internal buffer, so the read information might be incomplete", + dev->name); + + IPADBG("return\n"); + return pos; +} + +static int ipa3_read_pdn_table(char *buff, u32 buff_size) +{ + int i, result; + char *pdn_entry; + size_t pdn_entry_size; + bool entry_zeroed; + bool entry_valid; + u32 pos = 0; + + IPADBG("\n"); + + result = ipahal_nat_entry_size(IPAHAL_NAT_IPV4_PDN, &pdn_entry_size); + if (result) { + IPAERR("Failed to retrieve size of PDN entry"); + return 0; + } + + for (i = 0, pdn_entry = ipa3_ctx->nat_mem.pdn_mem.base; + i < IPA_MAX_PDN_NUM; + ++i, pdn_entry += pdn_entry_size) { + result = ipahal_nat_is_entry_zeroed(IPAHAL_NAT_IPV4_PDN, + pdn_entry, &entry_zeroed); + if (result) { + IPAERR( + "Failed to determine whether the PDN entry is definitely zero\n"); + goto bail; + } + if (entry_zeroed) + continue; + + result = ipahal_nat_is_entry_valid(IPAHAL_NAT_IPV4_PDN, + pdn_entry, &entry_valid); + if (result) { + IPAERR( + "Failed to determine whether the PDN entry is valid\n"); + goto bail; + } + if (entry_valid) + pos += scnprintf(buff + pos, buff_size - pos, + "PDN %d: ", i); + else + pos += scnprintf(buff + pos, buff_size - pos, + "PDN %d - Invalid: ", i); + + pos += ipahal_nat_stringify_entry(IPAHAL_NAT_IPV4_PDN, + pdn_entry, buff + pos, buff_size - pos); + } + pos += scnprintf(buff + pos, buff_size - pos, "\n"); + + IPADBG("return\n"); +bail: + return pos; +} + +static ssize_t ipa3_read_nat4(struct file *file, + char __user *ubuf, size_t count, + loff_t *ppos) +{ + ssize_t ret; + char *buff; + u32 rule_id = 0, pos = 0, num_entries = 0, index_num_entries = 0; + const u32 buff_size = IPA_MAX_MSG_LEN + 2 * IPA_MAX_ENTRY_STRING_LEN * ( + ipa3_ctx->nat_mem.dev.table_entries + 1 + + ipa3_ctx->nat_mem.dev.expn_table_entries); + + IPADBG("\n"); + + buff = kzalloc(buff_size, GFP_KERNEL); + if (buff == NULL) + return 0; + + if (!ipa3_ctx->nat_mem.dev.is_dev_init) { + pos += scnprintf(buff + pos, buff_size - pos, + "NAT hasn't been initialized or not supported\n"); + goto ret; + } + + mutex_lock(&ipa3_ctx->nat_mem.dev.lock); + + if (!ipa3_ctx->nat_mem.dev.is_hw_init) { + pos += scnprintf(buff + pos, buff_size - pos, + "NAT H/W hasn't been initialized\n"); + goto bail; + } + + pos += scnprintf(buff + pos, buff_size - pos, "\n"); + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + pos += ipa3_read_pdn_table(buff + pos, buff_size - pos); + } else { + pos += scnprintf(buff + pos, buff_size - pos, + "NAT Table IP Address=%pI4h\n\n", + &ipa3_ctx->nat_mem.public_ip_addr); + } + + pos += ipa3_start_read_memory_device(&ipa3_ctx->nat_mem.dev, + buff + pos, buff_size - pos, IPAHAL_NAT_IPV4, &num_entries); + + /* Print Index tables */ + pos += scnprintf(buff + pos, buff_size - pos, + "ipaNatTable Index Table:\n"); + pos += ipa3_read_table( + ipa3_ctx->nat_mem.index_table_addr, + ipa3_ctx->nat_mem.dev.table_entries + 1, + buff + pos, buff_size - pos, + &index_num_entries, + &rule_id, + IPAHAL_NAT_IPV4_INDEX); + + pos += scnprintf(buff + pos, buff_size - pos, + "ipaNatTable Expansion Index Table:\n"); + pos += ipa3_read_table( + ipa3_ctx->nat_mem.index_table_expansion_addr, + ipa3_ctx->nat_mem.dev.expn_table_entries, + buff + pos, buff_size - pos, + &index_num_entries, + &rule_id, + IPAHAL_NAT_IPV4_INDEX); + + if (num_entries != index_num_entries) + IPAERR( + "The NAT table number of entries %d is different from index table number of entries %d\n", + num_entries, index_num_entries); + + pos += ipa3_finish_read_memory_device(&ipa3_ctx->nat_mem.dev, + buff, buff_size, pos, num_entries); + + IPADBG("return\n"); +bail: + mutex_unlock(&ipa3_ctx->nat_mem.dev.lock); +ret: + ret = simple_read_from_buffer(ubuf, count, ppos, buff, pos); + kfree(buff); + return ret; +} + +static ssize_t ipa3_read_ipv6ct(struct file *file, + char __user *ubuf, size_t count, + loff_t *ppos) +{ + ssize_t ret; + char *buff; + u32 pos = 0, num_entries = 0; + const u32 buff_size = IPA_MAX_MSG_LEN + IPA_MAX_ENTRY_STRING_LEN * ( + ipa3_ctx->nat_mem.dev.table_entries + 1 + + ipa3_ctx->nat_mem.dev.expn_table_entries); + + IPADBG("\n"); + + buff = kzalloc(buff_size, GFP_KERNEL); + if (buff == NULL) + return 0; + + pos += scnprintf(buff + pos, buff_size - pos, "\n"); + + if (!ipa3_ctx->ipv6ct_mem.dev.is_dev_init) { + pos += scnprintf(buff + pos, buff_size - pos, + "IPv6 connection tracking hasn't been initialized or not supported\n"); + goto ret; + } + + mutex_lock(&ipa3_ctx->ipv6ct_mem.dev.lock); + + if (!ipa3_ctx->ipv6ct_mem.dev.is_hw_init) { + pos += scnprintf(buff + pos, buff_size - pos, + "IPv6 connection tracking H/W hasn't been initialized\n"); + goto bail; + } + + pos += ipa3_start_read_memory_device(&ipa3_ctx->ipv6ct_mem.dev, + buff + pos, buff_size - pos, IPAHAL_NAT_IPV6CT, &num_entries); + pos += ipa3_finish_read_memory_device(&ipa3_ctx->ipv6ct_mem.dev, + buff, buff_size, pos, num_entries); + + IPADBG("return\n"); +bail: + mutex_unlock(&ipa3_ctx->ipv6ct_mem.dev.lock); +ret: + ret = simple_read_from_buffer(ubuf, count, ppos, buff, pos); + kfree(buff); + return ret; +} + +static ssize_t ipa3_rm_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int result, cnt = 0; + + /* deprecate if IPA PM is used */ + if (ipa3_ctx->use_ipa_pm) { + cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "IPA RM is disabled\n"); + goto ret; + } + + result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN); + if (result < 0) { + cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "Error in printing RM stat %d\n", result); + goto ret; + } + cnt += result; +ret: + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa3_pm_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int result, cnt = 0; + + if (!ipa3_ctx->use_ipa_pm) { + cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "IPA PM is disabled\n"); + goto ret; + } + + result = ipa_pm_stat(dbg_buff, IPA_MAX_MSG_LEN); + if (result < 0) { + cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "Error in printing PM stat %d\n", result); + goto ret; + } + cnt += result; +ret: + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa3_pm_ex_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int result, cnt = 0; + + if (!ipa3_ctx->use_ipa_pm) { + cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "IPA PM is disabled\n"); + goto ret; + } + + result = ipa_pm_exceptions_stat(dbg_buff, IPA_MAX_MSG_LEN); + if (result < 0) { + cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "Error in printing PM stat %d\n", result); + goto ret; + } + cnt += result; +ret: + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa3_read_ipahal_regs(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipahal_print_all_regs(true); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +static void ipa_dump_status(struct ipahal_pkt_status *status) +{ + IPA_DUMP_STATUS_FIELD(status_opcode); + IPA_DUMP_STATUS_FIELD(exception); + IPA_DUMP_STATUS_FIELD(status_mask); + IPA_DUMP_STATUS_FIELD(pkt_len); + IPA_DUMP_STATUS_FIELD(endp_src_idx); + IPA_DUMP_STATUS_FIELD(endp_dest_idx); + IPA_DUMP_STATUS_FIELD(metadata); + IPA_DUMP_STATUS_FIELD(flt_local); + IPA_DUMP_STATUS_FIELD(flt_hash); + IPA_DUMP_STATUS_FIELD(flt_global); + IPA_DUMP_STATUS_FIELD(flt_ret_hdr); + IPA_DUMP_STATUS_FIELD(flt_miss); + IPA_DUMP_STATUS_FIELD(flt_rule_id); + IPA_DUMP_STATUS_FIELD(rt_local); + IPA_DUMP_STATUS_FIELD(rt_hash); + IPA_DUMP_STATUS_FIELD(ucp); + IPA_DUMP_STATUS_FIELD(rt_tbl_idx); + IPA_DUMP_STATUS_FIELD(rt_miss); + IPA_DUMP_STATUS_FIELD(rt_rule_id); + IPA_DUMP_STATUS_FIELD(nat_hit); + IPA_DUMP_STATUS_FIELD(nat_entry_idx); + IPA_DUMP_STATUS_FIELD(nat_type); + pr_err("tag = 0x%llx\n", (u64)status->tag_info & 0xFFFFFFFFFFFF); + IPA_DUMP_STATUS_FIELD(seq_num); + IPA_DUMP_STATUS_FIELD(time_of_day_ctr); + IPA_DUMP_STATUS_FIELD(hdr_local); + IPA_DUMP_STATUS_FIELD(hdr_offset); + IPA_DUMP_STATUS_FIELD(frag_hit); + IPA_DUMP_STATUS_FIELD(frag_rule); +} + +static ssize_t ipa_status_stats_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ipa3_status_stats *stats; + int i, j; + + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) + return -EFAULT; + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa3_ctx->ep[i].sys || !ipa3_ctx->ep[i].sys->status_stat) + continue; + + memcpy(stats, ipa3_ctx->ep[i].sys->status_stat, sizeof(*stats)); + pr_err("Statuses for pipe %d\n", i); + for (j = 0; j < IPA_MAX_STATUS_STAT_NUM; j++) { + pr_err("curr=%d\n", stats->curr); + ipa_dump_status(&stats->status[stats->curr]); + pr_err("\n\n\n"); + stats->curr = (stats->curr + 1) % + IPA_MAX_STATUS_STAT_NUM; + } + } + + kfree(stats); + return 0; +} + +static ssize_t ipa3_print_active_clients_log(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int cnt; + int table_size; + + if (active_clients_buf == NULL) { + IPAERR("Active Clients buffer is not allocated"); + return 0; + } + memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENT_BUF_SIZE); + mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex); + cnt = ipa3_active_clients_log_print_buffer(active_clients_buf, + IPA_DBG_ACTIVE_CLIENT_BUF_SIZE - IPA_MAX_MSG_LEN); + table_size = ipa3_active_clients_log_print_table(active_clients_buf + + cnt, IPA_MAX_MSG_LEN); + mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex); + + return simple_read_from_buffer(ubuf, count, ppos, + active_clients_buf, cnt + table_size); +} + +static ssize_t ipa3_clear_active_clients_log(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + ipa3_active_clients_log_clear(); + + return count; +} + +static ssize_t ipa3_enable_ipc_low(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + mutex_lock(&ipa3_ctx->lock); + if (option) { + if (!ipa_ipc_low_buff) { + ipa_ipc_low_buff = + ipc_log_context_create(IPA_IPC_LOG_PAGES, + "ipa_low", 0); + } + if (ipa_ipc_low_buff == NULL) + IPADBG("failed to get logbuf_low\n"); + ipa3_ctx->logbuf_low = ipa_ipc_low_buff; + } else { + ipa3_ctx->logbuf_low = NULL; + } + mutex_unlock(&ipa3_ctx->lock); + + return count; +} + +static const struct ipa3_debugfs_file debugfs_files[] = { + { + "gen_reg", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_read_gen_reg + } + }, { + "active_clients", IPA_READ_WRITE_MODE, NULL, { + .read = ipa3_print_active_clients_log, + .write = ipa3_clear_active_clients_log + } + }, { + "ep_reg", IPA_READ_WRITE_MODE, NULL, { + .read = ipa3_read_ep_reg, + .write = ipa3_write_ep_reg, + } + }, { + "keep_awake", IPA_READ_WRITE_MODE, NULL, { + .read = ipa3_read_keep_awake, + .write = ipa3_write_keep_awake, + } + }, { + "holb", IPA_WRITE_ONLY_MODE, NULL, { + .write = ipa3_write_ep_holb, + } + }, { + "hdr", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_read_hdr, + } + }, { + "proc_ctx", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_read_proc_ctx, + } + }, { + "ip4_rt", IPA_READ_ONLY_MODE, (void *)IPA_IP_v4, { + .read = ipa3_read_rt, + .open = ipa3_open_dbg, + } + }, { + "ip4_rt_hw", IPA_READ_ONLY_MODE, (void *)IPA_IP_v4, { + .read = ipa3_read_rt_hw, + .open = ipa3_open_dbg, + } + }, { + "ip6_rt", IPA_READ_ONLY_MODE, (void *)IPA_IP_v6, { + .read = ipa3_read_rt, + .open = ipa3_open_dbg, + } + }, { + "ip6_rt_hw", IPA_READ_ONLY_MODE, (void *)IPA_IP_v6, { + .read = ipa3_read_rt_hw, + .open = ipa3_open_dbg, + } + }, { + "ip4_flt", IPA_READ_ONLY_MODE, (void *)IPA_IP_v4, { + .read = ipa3_read_flt, + .open = ipa3_open_dbg, + } + }, { + "ip4_flt_hw", IPA_READ_ONLY_MODE, (void *)IPA_IP_v4, { + .read = ipa3_read_flt_hw, + .open = ipa3_open_dbg, + } + }, { + "ip6_flt", IPA_READ_ONLY_MODE, (void *)IPA_IP_v6, { + .read = ipa3_read_flt, + .open = ipa3_open_dbg, + } + }, { + "ip6_flt_hw", IPA_READ_ONLY_MODE, (void *)IPA_IP_v6, { + .read = ipa3_read_flt_hw, + .open = ipa3_open_dbg, + } + }, { + "stats", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_read_stats, + } + }, { + "wstats", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_read_wstats, + } + }, { + "odlstats", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_read_odlstats, + } + }, { + "wdi", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_read_wdi, + } + }, { + "ntn", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_read_ntn, + } + }, { + "dbg_cnt", IPA_READ_WRITE_MODE, NULL, { + .read = ipa3_read_dbg_cnt, + .write = ipa3_write_dbg_cnt, + } + }, { + "msg", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_read_msg, + } + }, { + "ip4_nat", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_read_nat4, + } + }, { + "ipv6ct", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_read_ipv6ct, + } + }, { + "rm_stats", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_rm_read_stats, + } + }, { + "pm_stats", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_pm_read_stats, + } + }, { + "pm_ex_stats", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_pm_ex_read_stats, + } + }, { + "status_stats", IPA_READ_ONLY_MODE, NULL, { + .read = ipa_status_stats_read, + } + }, { + "enable_low_prio_print", IPA_WRITE_ONLY_MODE, NULL, { + .write = ipa3_enable_ipc_low, + } + }, { + "ipa_dump_regs", IPA_READ_ONLY_MODE, NULL, { + .read = ipa3_read_ipahal_regs, + } + } +}; + +void ipa3_debugfs_init(void) +{ + const size_t debugfs_files_num = + sizeof(debugfs_files) / sizeof(struct ipa3_debugfs_file); + size_t i; + struct dentry *file; + + dent = debugfs_create_dir("ipa", 0); + if (IS_ERR(dent)) { + IPAERR("fail to create folder in debug_fs.\n"); + return; + } + + file = debugfs_create_u32("hw_type", IPA_READ_ONLY_MODE, + dent, &ipa3_ctx->ipa_hw_type); + if (!file) { + IPAERR("could not create hw_type file\n"); + goto fail; + } + + + for (i = 0; i < debugfs_files_num; ++i) { + const struct ipa3_debugfs_file *curr = &debugfs_files[i]; + + file = debugfs_create_file(curr->name, curr->mode, dent, + curr->data, &curr->fops); + if (!file || IS_ERR(file)) { + IPAERR("fail to create file for debug_fs %s\n", + curr->name); + goto fail; + } + } + + active_clients_buf = NULL; + active_clients_buf = kzalloc(IPA_DBG_ACTIVE_CLIENT_BUF_SIZE, + GFP_KERNEL); + if (active_clients_buf == NULL) + goto fail; + + file = debugfs_create_u32("enable_clock_scaling", IPA_READ_WRITE_MODE, + dent, &ipa3_ctx->enable_clock_scaling); + if (!file) { + IPAERR("could not create enable_clock_scaling file\n"); + goto fail; + } + + file = debugfs_create_u32("enable_napi_chain", IPA_READ_WRITE_MODE, + dent, &ipa3_ctx->enable_napi_chain); + if (!file) { + IPAERR("could not create enable_napi_chain file\n"); + goto fail; + } + + file = debugfs_create_u32("clock_scaling_bw_threshold_nominal_mbps", + IPA_READ_WRITE_MODE, dent, + &ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal); + if (!file) { + IPAERR("could not create bw_threshold_nominal_mbps\n"); + goto fail; + } + + file = debugfs_create_u32("clock_scaling_bw_threshold_turbo_mbps", + IPA_READ_WRITE_MODE, dent, + &ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo); + if (!file) { + IPAERR("could not create bw_threshold_turbo_mbps\n"); + goto fail; + } + + file = debugfs_create_u32("clk_rate", IPA_READ_ONLY_MODE, + dent, &ipa3_ctx->curr_ipa_clk_rate); + if (!file) { + IPAERR("could not create clk_rate file\n"); + goto fail; + } + + ipa_debugfs_init_stats(dent); + + return; + +fail: + debugfs_remove_recursive(dent); +} + +void ipa3_debugfs_remove(void) +{ + if (IS_ERR(dent)) { + IPAERR("Debugfs:folder was not created.\n"); + return; + } + if (active_clients_buf != NULL) { + kfree(active_clients_buf); + active_clients_buf = NULL; + } + debugfs_remove_recursive(dent); +} + +struct dentry *ipa_debugfs_get_root(void) +{ + return dent; +} +EXPORT_SYMBOL(ipa_debugfs_get_root); + +#else /* !CONFIG_DEBUG_FS */ +void ipa3_debugfs_init(void) {} +void ipa3_debugfs_remove(void) {} +#endif diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c new file mode 100644 index 000000000000..2b94d85d85bf --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c @@ -0,0 +1,1251 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include "linux/msm_gsi.h" +#include +#include "ipa_i.h" + +#define IPA_DMA_POLLING_MIN_SLEEP_RX 1010 +#define IPA_DMA_POLLING_MAX_SLEEP_RX 1050 +#define IPA_DMA_SYS_DESC_MAX_FIFO_SZ 0x7FF8 +#define IPA_DMA_MAX_PKT_SZ 0xFFFF +#define IPA_DMA_DUMMY_BUFF_SZ 8 +#define IPA_DMA_PREFETCH_WA_THRESHOLD 9 + +#define IPADMA_DRV_NAME "ipa_dma" + +#define IPADMA_DBG(fmt, args...) \ + do { \ + pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPADMA_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPADMA_ERR(fmt, args...) \ + do { \ + pr_err(IPADMA_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPADMA_FUNC_ENTRY() \ + IPADMA_DBG_LOW("ENTRY\n") + +#define IPADMA_FUNC_EXIT() \ + IPADMA_DBG_LOW("EXIT\n") + +#ifdef CONFIG_DEBUG_FS +#define IPADMA_MAX_MSG_LEN 1024 +static char dbg_buff[IPADMA_MAX_MSG_LEN]; +static void ipa3_dma_debugfs_init(void); +static void ipa3_dma_debugfs_destroy(void); +#else +static void ipa3_dma_debugfs_init(void) {} +static void ipa3_dma_debugfs_destroy(void) {} +#endif + +/** + * struct ipa3_dma_ctx -IPADMA driver context information + * @enable_ref_cnt: ipa dma enable reference count + * @destroy_pending: destroy ipa_dma after handling all pending memcpy + * @ipa_dma_xfer_wrapper_cache: cache of ipa3_dma_xfer_wrapper structs + * @sync_lock: lock for synchronisation in sync_memcpy + * @async_lock: lock for synchronisation in async_memcpy + * @enable_lock: lock for is_enabled + * @pending_lock: lock for synchronize is_enable and pending_cnt + * @done: no pending works-ipadma can be destroyed + * @ipa_dma_sync_prod_hdl: handle of sync memcpy producer + * @ipa_dma_async_prod_hdl:handle of async memcpy producer + * @ipa_dma_sync_cons_hdl: handle of sync memcpy consumer + * @sync_memcpy_pending_cnt: number of pending sync memcopy operations + * @async_memcpy_pending_cnt: number of pending async memcopy operations + * @uc_memcpy_pending_cnt: number of pending uc memcopy operations + * @total_sync_memcpy: total number of sync memcpy (statistics) + * @total_async_memcpy: total number of async memcpy (statistics) + * @total_uc_memcpy: total number of uc memcpy (statistics) + */ +struct ipa3_dma_ctx { + unsigned int enable_ref_cnt; + bool destroy_pending; + struct kmem_cache *ipa_dma_xfer_wrapper_cache; + struct mutex sync_lock; + spinlock_t async_lock; + struct mutex enable_lock; + spinlock_t pending_lock; + struct completion done; + u32 ipa_dma_sync_prod_hdl; + u32 ipa_dma_async_prod_hdl; + u32 ipa_dma_sync_cons_hdl; + u32 ipa_dma_async_cons_hdl; + atomic_t sync_memcpy_pending_cnt; + atomic_t async_memcpy_pending_cnt; + atomic_t uc_memcpy_pending_cnt; + atomic_t total_sync_memcpy; + atomic_t total_async_memcpy; + atomic_t total_uc_memcpy; + struct ipa_mem_buffer ipa_dma_dummy_src_sync; + struct ipa_mem_buffer ipa_dma_dummy_dst_sync; + struct ipa_mem_buffer ipa_dma_dummy_src_async; + struct ipa_mem_buffer ipa_dma_dummy_dst_async; +}; +static struct ipa3_dma_ctx *ipa3_dma_ctx; + +/** + * struct ipa3_dma_init_refcnt_ctrl -IPADMA driver init control information + * @ref_cnt: reference count for initialization operations + * @lock: lock for the reference count + */ +struct ipa3_dma_init_refcnt_ctrl { + unsigned int ref_cnt; + struct mutex lock; +}; +static struct ipa3_dma_init_refcnt_ctrl *ipa3_dma_init_refcnt_ctrl; + +/** + * ipa3_dma_setup() - One time setup for IPA DMA + * + * This function should be called once to setup ipa dma + * by creating the init reference count controller + * + * Return codes: 0: success + * Negative value: failure + */ +int ipa3_dma_setup(void) +{ + IPADMA_FUNC_ENTRY(); + + if (ipa3_dma_init_refcnt_ctrl) { + IPADMA_ERR("Setup already done\n"); + return -EFAULT; + } + + ipa3_dma_init_refcnt_ctrl = + kzalloc(sizeof(*(ipa3_dma_init_refcnt_ctrl)), GFP_KERNEL); + + if (!ipa3_dma_init_refcnt_ctrl) { + IPADMA_ERR("kzalloc error.\n"); + return -ENOMEM; + } + + mutex_init(&ipa3_dma_init_refcnt_ctrl->lock); + + IPADMA_FUNC_EXIT(); + return 0; +} + +/** + * ipa3_dma_shutdown() - Clear setup operations. + * + * Cleanup for the setup function. + * Should be called during IPA driver unloading. + * It assumes all ipa_dma operations are done and ipa_dma is destroyed. + * + * Return codes: None. + */ +void ipa3_dma_shutdown(void) +{ + IPADMA_FUNC_ENTRY(); + + if (!ipa3_dma_init_refcnt_ctrl) + return; + + kfree(ipa3_dma_init_refcnt_ctrl); + ipa3_dma_init_refcnt_ctrl = NULL; + + IPADMA_FUNC_EXIT(); +} + +/** + * ipa3_dma_init() -Initialize IPADMA. + * + * This function initialize all IPADMA internal data and connect in dma: + * MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS + * MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS + * + * Can be executed several times (re-entrant) + * + * Return codes: 0: success + * -EFAULT: Mismatch between context existence and init ref_cnt + * -EINVAL: IPA driver is not initialized + * -ENOMEM: allocating memory error + * -EPERM: pipe connection failed + */ +int ipa3_dma_init(void) +{ + struct ipa3_dma_ctx *ipa_dma_ctx_t; + struct ipa_sys_connect_params sys_in; + int res = 0; + int sync_sz; + int async_sz; + + IPADMA_FUNC_ENTRY(); + + if (!ipa3_dma_init_refcnt_ctrl) { + IPADMA_ERR("Setup isn't done yet!\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_dma_init_refcnt_ctrl->lock); + if (ipa3_dma_init_refcnt_ctrl->ref_cnt > 0) { + IPADMA_DBG("Already initialized refcnt=%d\n", + ipa3_dma_init_refcnt_ctrl->ref_cnt); + if (!ipa3_dma_ctx) { + IPADMA_ERR("Context missing. refcnt=%d\n", + ipa3_dma_init_refcnt_ctrl->ref_cnt); + res = -EFAULT; + } else { + ipa3_dma_init_refcnt_ctrl->ref_cnt++; + } + goto init_unlock; + } + + if (ipa3_dma_ctx) { + IPADMA_ERR("Context already exist\n"); + res = -EFAULT; + goto init_unlock; + } + + if (!ipa3_is_ready()) { + IPADMA_ERR("IPA is not ready yet\n"); + res = -EINVAL; + goto init_unlock; + } + + ipa_dma_ctx_t = kzalloc(sizeof(*(ipa3_dma_ctx)), GFP_KERNEL); + + if (!ipa_dma_ctx_t) { + res = -ENOMEM; + goto init_unlock; + } + + ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache = + kmem_cache_create("IPA DMA XFER WRAPPER", + sizeof(struct ipa3_dma_xfer_wrapper), 0, 0, NULL); + if (!ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache) { + IPAERR(":failed to create ipa dma xfer wrapper cache.\n"); + res = -ENOMEM; + goto fail_mem_ctrl; + } + + mutex_init(&ipa_dma_ctx_t->enable_lock); + spin_lock_init(&ipa_dma_ctx_t->async_lock); + mutex_init(&ipa_dma_ctx_t->sync_lock); + spin_lock_init(&ipa_dma_ctx_t->pending_lock); + init_completion(&ipa_dma_ctx_t->done); + ipa_dma_ctx_t->enable_ref_cnt = 0; + ipa_dma_ctx_t->destroy_pending = false; + atomic_set(&ipa_dma_ctx_t->async_memcpy_pending_cnt, 0); + atomic_set(&ipa_dma_ctx_t->sync_memcpy_pending_cnt, 0); + atomic_set(&ipa_dma_ctx_t->uc_memcpy_pending_cnt, 0); + atomic_set(&ipa_dma_ctx_t->total_async_memcpy, 0); + atomic_set(&ipa_dma_ctx_t->total_sync_memcpy, 0); + atomic_set(&ipa_dma_ctx_t->total_uc_memcpy, 0); + + sync_sz = IPA_SYS_DESC_FIFO_SZ; + async_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ; + /* + * for ipav3.5 we need to double the rings and allocate dummy buffers + * in order to apply the prefetch WA + */ + if (ipa_get_hw_type() == IPA_HW_v3_5) { + sync_sz *= 2; + async_sz *= 2; + + ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base = + dma_alloc_coherent(ipa3_ctx->pdev, + IPA_DMA_DUMMY_BUFF_SZ * 4, + &ipa_dma_ctx_t->ipa_dma_dummy_src_sync.phys_base, + GFP_KERNEL); + + if (!ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base) { + IPAERR("DMA alloc fail %d bytes for prefetch WA\n", + IPA_DMA_DUMMY_BUFF_SZ); + res = -ENOMEM; + goto fail_alloc_dummy; + } + + ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.base = + ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base + + IPA_DMA_DUMMY_BUFF_SZ; + ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.phys_base = + ipa_dma_ctx_t->ipa_dma_dummy_src_sync.phys_base + + IPA_DMA_DUMMY_BUFF_SZ; + ipa_dma_ctx_t->ipa_dma_dummy_src_async.base = + ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.base + + IPA_DMA_DUMMY_BUFF_SZ; + ipa_dma_ctx_t->ipa_dma_dummy_src_async.phys_base = + ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.phys_base + + IPA_DMA_DUMMY_BUFF_SZ; + ipa_dma_ctx_t->ipa_dma_dummy_dst_async.base = + ipa_dma_ctx_t->ipa_dma_dummy_src_async.base + + IPA_DMA_DUMMY_BUFF_SZ; + ipa_dma_ctx_t->ipa_dma_dummy_dst_async.phys_base = + ipa_dma_ctx_t->ipa_dma_dummy_src_async.phys_base + + IPA_DMA_DUMMY_BUFF_SZ; + } + + /* IPADMA SYNC PROD-source for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_PROD; + sys_in.desc_fifo_sz = sync_sz; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS; + sys_in.skip_ep_cfg = false; + if (ipa3_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_sync_prod_hdl)) { + IPADMA_ERR(":setup sync prod pipe failed\n"); + res = -EPERM; + goto fail_sync_prod; + } + + /* IPADMA SYNC CONS-destination for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS; + sys_in.desc_fifo_sz = sync_sz; + sys_in.skip_ep_cfg = false; + sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; + sys_in.notify = NULL; + sys_in.priv = NULL; + if (ipa3_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_sync_cons_hdl)) { + IPADMA_ERR(":setup sync cons pipe failed.\n"); + res = -EPERM; + goto fail_sync_cons; + } + + IPADMA_DBG("SYNC MEMCPY pipes are connected\n"); + + /* IPADMA ASYNC PROD-source for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD; + sys_in.desc_fifo_sz = async_sz; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS; + sys_in.skip_ep_cfg = false; + sys_in.notify = NULL; + if (ipa3_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_async_prod_hdl)) { + IPADMA_ERR(":setup async prod pipe failed.\n"); + res = -EPERM; + goto fail_async_prod; + } + + /* IPADMA ASYNC CONS-destination for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS; + sys_in.desc_fifo_sz = async_sz; + sys_in.skip_ep_cfg = false; + sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; + sys_in.notify = ipa3_dma_async_memcpy_notify_cb; + sys_in.priv = NULL; + if (ipa3_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_async_cons_hdl)) { + IPADMA_ERR(":setup async cons pipe failed.\n"); + res = -EPERM; + goto fail_async_cons; + } + ipa3_dma_debugfs_init(); + ipa3_dma_ctx = ipa_dma_ctx_t; + ipa3_dma_init_refcnt_ctrl->ref_cnt = 1; + IPADMA_DBG("ASYNC MEMCPY pipes are connected\n"); + + IPADMA_FUNC_EXIT(); + goto init_unlock; + +fail_async_cons: + ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl); +fail_async_prod: + ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl); +fail_sync_cons: + ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl); +fail_sync_prod: + dma_free_coherent(ipa3_ctx->pdev, IPA_DMA_DUMMY_BUFF_SZ * 4, + ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base, + ipa_dma_ctx_t->ipa_dma_dummy_src_sync.phys_base); +fail_alloc_dummy: + kmem_cache_destroy(ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache); +fail_mem_ctrl: + kfree(ipa_dma_ctx_t); + ipa3_dma_ctx = NULL; +init_unlock: + mutex_unlock(&ipa3_dma_init_refcnt_ctrl->lock); + return res; + +} + +/** + * ipa3_dma_enable() -Vote for IPA clocks. + * + * Can be executed several times (re-entrant) + * + *Return codes: 0: success + * -EINVAL: IPADMA is not initialized + */ +int ipa3_dma_enable(void) +{ + IPADMA_FUNC_ENTRY(); + if ((ipa3_dma_ctx == NULL) || + (ipa3_dma_init_refcnt_ctrl->ref_cnt < 1)) { + IPADMA_ERR("IPADMA isn't initialized, can't enable\n"); + return -EINVAL; + } + mutex_lock(&ipa3_dma_ctx->enable_lock); + if (ipa3_dma_ctx->enable_ref_cnt > 0) { + IPADMA_ERR("Already enabled refcnt=%d\n", + ipa3_dma_ctx->enable_ref_cnt); + ipa3_dma_ctx->enable_ref_cnt++; + mutex_unlock(&ipa3_dma_ctx->enable_lock); + return 0; + } + IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA"); + ipa3_dma_ctx->enable_ref_cnt = 1; + mutex_unlock(&ipa3_dma_ctx->enable_lock); + + IPADMA_FUNC_EXIT(); + return 0; +} + +static bool ipa3_dma_work_pending(void) +{ + if (atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt)) { + IPADMA_DBG("pending sync\n"); + return true; + } + if (atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt)) { + IPADMA_DBG("pending async\n"); + return true; + } + if (atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt)) { + IPADMA_DBG("pending uc\n"); + return true; + } + IPADMA_DBG_LOW("no pending work\n"); + return false; +} + +/** + * ipa3_dma_disable()- Unvote for IPA clocks. + * + * enter to power save mode. + * + * Return codes: 0: success + * -EINVAL: IPADMA is not initialized + * -EPERM: Operation not permitted as ipa_dma is already + * diabled + * -EFAULT: can not disable ipa_dma as there are pending + * memcopy works + */ +int ipa3_dma_disable(void) +{ + unsigned long flags; + int res = 0; + bool dec_clks = false; + + IPADMA_FUNC_ENTRY(); + if ((ipa3_dma_ctx == NULL) || + (ipa3_dma_init_refcnt_ctrl->ref_cnt < 1)) { + IPADMA_ERR("IPADMA isn't initialized, can't disable\n"); + return -EINVAL; + } + mutex_lock(&ipa3_dma_ctx->enable_lock); + spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags); + if (ipa3_dma_ctx->enable_ref_cnt > 1) { + IPADMA_DBG("Multiple enablement done. refcnt=%d\n", + ipa3_dma_ctx->enable_ref_cnt); + ipa3_dma_ctx->enable_ref_cnt--; + goto completed; + } + + if (ipa3_dma_ctx->enable_ref_cnt == 0) { + IPADMA_ERR("Already disabled\n"); + res = -EPERM; + goto completed; + } + + if (ipa3_dma_work_pending()) { + IPADMA_ERR("There is pending work, can't disable.\n"); + res = -EFAULT; + goto completed; + } + ipa3_dma_ctx->enable_ref_cnt = 0; + dec_clks = true; + IPADMA_FUNC_EXIT(); + +completed: + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + if (dec_clks) + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA"); + mutex_unlock(&ipa3_dma_ctx->enable_lock); + return res; +} + +/** + * ipa3_dma_sync_memcpy()- Perform synchronous memcpy using IPA. + * + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -gsi_status : on GSI failures + * -EFAULT: other + */ +int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len) +{ + int ep_idx; + int res; + int i = 0; + struct ipa3_sys_context *cons_sys; + struct ipa3_sys_context *prod_sys; + struct ipa3_dma_xfer_wrapper *xfer_descr = NULL; + struct ipa3_dma_xfer_wrapper *head_descr = NULL; + struct gsi_xfer_elem prod_xfer_elem; + struct gsi_xfer_elem cons_xfer_elem; + struct gsi_chan_xfer_notify gsi_notify; + unsigned long flags; + bool stop_polling = false; + bool prefetch_wa = false; + + IPADMA_FUNC_ENTRY(); + IPADMA_DBG_LOW("dest = 0x%llx, src = 0x%llx, len = %d\n", + dest, src, len); + if (ipa3_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n"); + return -EPERM; + } + if ((max(src, dest) - min(src, dest)) < len) { + IPADMA_ERR("invalid addresses - overlapping buffers\n"); + return -EINVAL; + } + if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) { + IPADMA_ERR("invalid len, %d\n", len); + return -EINVAL; + } + spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags); + if (!ipa3_dma_ctx->enable_ref_cnt) { + IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n"); + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + return -EPERM; + } + atomic_inc(&ipa3_dma_ctx->sync_memcpy_pending_cnt); + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_SYNC_CONS); + return -EFAULT; + } + cons_sys = ipa3_ctx->ep[ep_idx].sys; + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_SYNC_PROD); + return -EFAULT; + } + prod_sys = ipa3_ctx->ep[ep_idx].sys; + + xfer_descr = kmem_cache_zalloc(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, + GFP_KERNEL); + if (!xfer_descr) { + IPADMA_ERR("failed to alloc xfer descr wrapper\n"); + res = -ENOMEM; + goto fail_mem_alloc; + } + xfer_descr->phys_addr_dest = dest; + xfer_descr->phys_addr_src = src; + xfer_descr->len = len; + init_completion(&xfer_descr->xfer_done); + + mutex_lock(&ipa3_dma_ctx->sync_lock); + list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list); + cons_sys->len++; + cons_xfer_elem.addr = dest; + cons_xfer_elem.len = len; + cons_xfer_elem.type = GSI_XFER_ELEM_DATA; + cons_xfer_elem.flags = GSI_XFER_FLAG_EOT; + + prod_xfer_elem.addr = src; + prod_xfer_elem.len = len; + prod_xfer_elem.type = GSI_XFER_ELEM_DATA; + prod_xfer_elem.xfer_user_data = NULL; + + /* + * when copy is less than 9B we need to chain another dummy + * copy so the total size will be larger (for ipav3.5) + * for the consumer we have to prepare an additional credit + */ + prefetch_wa = ((ipa_get_hw_type() == IPA_HW_v3_5) && + len < IPA_DMA_PREFETCH_WA_THRESHOLD); + if (prefetch_wa) { + cons_xfer_elem.xfer_user_data = NULL; + res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1, + &cons_xfer_elem, false); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer dest descr res:%d\n", + res); + goto fail_send; + } + cons_xfer_elem.addr = + ipa3_dma_ctx->ipa_dma_dummy_dst_sync.phys_base; + cons_xfer_elem.len = IPA_DMA_DUMMY_BUFF_SZ; + cons_xfer_elem.type = GSI_XFER_ELEM_DATA; + cons_xfer_elem.flags = GSI_XFER_FLAG_EOT; + cons_xfer_elem.xfer_user_data = xfer_descr; + res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1, + &cons_xfer_elem, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer dummy dest descr res:%d\n", + res); + goto fail_send; + } + prod_xfer_elem.flags = GSI_XFER_FLAG_CHAIN; + res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1, + &prod_xfer_elem, false); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer src descr res:%d\n", + res); + ipa_assert(); + goto fail_send; + } + prod_xfer_elem.addr = + ipa3_dma_ctx->ipa_dma_dummy_src_sync.phys_base; + prod_xfer_elem.len = IPA_DMA_DUMMY_BUFF_SZ; + prod_xfer_elem.type = GSI_XFER_ELEM_DATA; + prod_xfer_elem.flags = GSI_XFER_FLAG_EOT; + prod_xfer_elem.xfer_user_data = NULL; + res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1, + &prod_xfer_elem, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer dummy src descr res:%d\n", + res); + ipa_assert(); + goto fail_send; + } + } else { + cons_xfer_elem.xfer_user_data = xfer_descr; + res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1, + &cons_xfer_elem, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer dest descr res:%d\n", + res); + goto fail_send; + } + prod_xfer_elem.flags = GSI_XFER_FLAG_EOT; + res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1, + &prod_xfer_elem, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer src descr res:%d\n", + res); + ipa_assert(); + goto fail_send; + } + } + head_descr = list_first_entry(&cons_sys->head_desc_list, + struct ipa3_dma_xfer_wrapper, link); + + /* in case we are not the head of the list, wait for head to wake us */ + if (xfer_descr != head_descr) { + mutex_unlock(&ipa3_dma_ctx->sync_lock); + wait_for_completion(&xfer_descr->xfer_done); + mutex_lock(&ipa3_dma_ctx->sync_lock); + head_descr = list_first_entry(&cons_sys->head_desc_list, + struct ipa3_dma_xfer_wrapper, link); + /* Unexpected transfer sent from HW */ + ipa_assert_on(xfer_descr != head_descr); + } + mutex_unlock(&ipa3_dma_ctx->sync_lock); + + do { + /* wait for transfer to complete */ + res = gsi_poll_channel(cons_sys->ep->gsi_chan_hdl, + &gsi_notify); + if (res == GSI_STATUS_SUCCESS) + stop_polling = true; + else if (res != GSI_STATUS_POLL_EMPTY) + IPADMA_ERR( + "Failed: gsi_poll_chanel, returned %d loop#:%d\n", + res, i); + usleep_range(IPA_DMA_POLLING_MIN_SLEEP_RX, + IPA_DMA_POLLING_MAX_SLEEP_RX); + i++; + } while (!stop_polling); + + /* for prefetch WA we will receive the length of the dummy + * transfer in the event (because it is the second element) + */ + if (prefetch_wa) + ipa_assert_on(gsi_notify.bytes_xfered != + IPA_DMA_DUMMY_BUFF_SZ); + else + ipa_assert_on(len != gsi_notify.bytes_xfered); + + ipa_assert_on(dest != ((struct ipa3_dma_xfer_wrapper *) + (gsi_notify.xfer_user_data))->phys_addr_dest); + + mutex_lock(&ipa3_dma_ctx->sync_lock); + list_del(&head_descr->link); + cons_sys->len--; + kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); + /* wake the head of the list */ + if (!list_empty(&cons_sys->head_desc_list)) { + head_descr = list_first_entry(&cons_sys->head_desc_list, + struct ipa3_dma_xfer_wrapper, link); + complete(&head_descr->xfer_done); + } + mutex_unlock(&ipa3_dma_ctx->sync_lock); + + atomic_inc(&ipa3_dma_ctx->total_sync_memcpy); + atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt); + if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending()) + complete(&ipa3_dma_ctx->done); + + IPADMA_FUNC_EXIT(); + return res; + +fail_send: + list_del(&xfer_descr->link); + cons_sys->len--; + mutex_unlock(&ipa3_dma_ctx->sync_lock); + kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); +fail_mem_alloc: + atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt); + if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending()) + complete(&ipa3_dma_ctx->done); + return res; +} + +/** + * ipa3_dma_async_memcpy()- Perform asynchronous memcpy using IPA. + * + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * @user_cb: callback function to notify the client when the copy was done. + * @user_param: cookie for user_cb. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -gsi_status : on GSI failures + * -EFAULT: descr fifo is full. + */ +int ipa3_dma_async_memcpy(u64 dest, u64 src, int len, + void (*user_cb)(void *user1), void *user_param) +{ + int ep_idx; + int res = 0; + struct ipa3_dma_xfer_wrapper *xfer_descr = NULL; + struct ipa3_sys_context *prod_sys; + struct ipa3_sys_context *cons_sys; + struct gsi_xfer_elem xfer_elem_cons, xfer_elem_prod; + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + IPADMA_DBG_LOW("dest = 0x%llx, src = 0x%llx, len = %d\n", + dest, src, len); + if (ipa3_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n"); + return -EPERM; + } + if ((max(src, dest) - min(src, dest)) < len) { + IPADMA_ERR("invalid addresses - overlapping buffers\n"); + return -EINVAL; + } + if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) { + IPADMA_ERR("invalid len, %d\n", len); + return -EINVAL; + } + if (!user_cb) { + IPADMA_ERR("null pointer: user_cb\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags); + if (!ipa3_dma_ctx->enable_ref_cnt) { + IPADMA_ERR("can't memcpy, IPA_DMA isn't enabled\n"); + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + return -EPERM; + } + atomic_inc(&ipa3_dma_ctx->async_memcpy_pending_cnt); + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); + return -EFAULT; + } + cons_sys = ipa3_ctx->ep[ep_idx].sys; + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD); + return -EFAULT; + } + prod_sys = ipa3_ctx->ep[ep_idx].sys; + + xfer_descr = kmem_cache_zalloc(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, + GFP_KERNEL); + if (!xfer_descr) { + res = -ENOMEM; + goto fail_mem_alloc; + } + xfer_descr->phys_addr_dest = dest; + xfer_descr->phys_addr_src = src; + xfer_descr->len = len; + xfer_descr->callback = user_cb; + xfer_descr->user1 = user_param; + + spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags); + list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list); + cons_sys->len++; + /* + * when copy is less than 9B we need to chain another dummy + * copy so the total size will be larger (for ipav3.5) + */ + if ((ipa_get_hw_type() == IPA_HW_v3_5) && len < + IPA_DMA_PREFETCH_WA_THRESHOLD) { + xfer_elem_cons.addr = dest; + xfer_elem_cons.len = len; + xfer_elem_cons.type = GSI_XFER_ELEM_DATA; + xfer_elem_cons.flags = GSI_XFER_FLAG_EOT; + xfer_elem_cons.xfer_user_data = NULL; + res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1, + &xfer_elem_cons, false); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer on dest descr res: %d\n", + res); + goto fail_send; + } + xfer_elem_cons.addr = + ipa3_dma_ctx->ipa_dma_dummy_dst_async.phys_base; + xfer_elem_cons.len = IPA_DMA_DUMMY_BUFF_SZ; + xfer_elem_cons.type = GSI_XFER_ELEM_DATA; + xfer_elem_cons.flags = GSI_XFER_FLAG_EOT; + xfer_elem_cons.xfer_user_data = xfer_descr; + res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1, + &xfer_elem_cons, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer on dummy dest descr res: %d\n", + res); + goto fail_send; + } + + xfer_elem_prod.addr = src; + xfer_elem_prod.len = len; + xfer_elem_prod.type = GSI_XFER_ELEM_DATA; + xfer_elem_prod.flags = GSI_XFER_FLAG_CHAIN; + xfer_elem_prod.xfer_user_data = NULL; + res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1, + &xfer_elem_prod, false); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer on src descr res: %d\n", + res); + ipa_assert(); + goto fail_send; + } + xfer_elem_prod.addr = + ipa3_dma_ctx->ipa_dma_dummy_src_async.phys_base; + xfer_elem_prod.len = IPA_DMA_DUMMY_BUFF_SZ; + xfer_elem_prod.type = GSI_XFER_ELEM_DATA; + xfer_elem_prod.flags = GSI_XFER_FLAG_EOT; + xfer_elem_prod.xfer_user_data = NULL; + res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1, + &xfer_elem_prod, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer on dummy src descr res: %d\n", + res); + ipa_assert(); + goto fail_send; + } + } else { + + xfer_elem_cons.addr = dest; + xfer_elem_cons.len = len; + xfer_elem_cons.type = GSI_XFER_ELEM_DATA; + xfer_elem_cons.flags = GSI_XFER_FLAG_EOT; + xfer_elem_cons.xfer_user_data = xfer_descr; + res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1, + &xfer_elem_cons, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer on dummy dest descr res: %d\n", + res); + ipa_assert(); + goto fail_send; + } + xfer_elem_prod.addr = src; + xfer_elem_prod.len = len; + xfer_elem_prod.type = GSI_XFER_ELEM_DATA; + xfer_elem_prod.flags = GSI_XFER_FLAG_EOT; + xfer_elem_prod.xfer_user_data = NULL; + res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1, + &xfer_elem_prod, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer on dummy src descr res: %d\n", + res); + ipa_assert(); + goto fail_send; + } + + } + spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags); + IPADMA_FUNC_EXIT(); + return res; + +fail_send: + list_del(&xfer_descr->link); + spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags); + kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); +fail_mem_alloc: + atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt); + if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending()) + complete(&ipa3_dma_ctx->done); + return res; +} + +/** + * ipa3_dma_uc_memcpy() - Perform a memcpy action using IPA uC + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -EBADF: IPA uC is not loaded + */ +int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len) +{ + int res; + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + if (ipa3_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n"); + return -EPERM; + } + if ((max(src, dest) - min(src, dest)) < len) { + IPADMA_ERR("invalid addresses - overlapping buffers\n"); + return -EINVAL; + } + if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) { + IPADMA_ERR("invalid len, %d\n", len); + return -EINVAL; + } + + spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags); + if (!ipa3_dma_ctx->enable_ref_cnt) { + IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n"); + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + return -EPERM; + } + atomic_inc(&ipa3_dma_ctx->uc_memcpy_pending_cnt); + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + + res = ipa3_uc_memcpy(dest, src, len); + if (res) { + IPADMA_ERR("ipa3_uc_memcpy failed %d\n", res); + goto dec_and_exit; + } + + atomic_inc(&ipa3_dma_ctx->total_uc_memcpy); + res = 0; +dec_and_exit: + atomic_dec(&ipa3_dma_ctx->uc_memcpy_pending_cnt); + if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending()) + complete(&ipa3_dma_ctx->done); + IPADMA_FUNC_EXIT(); + return res; +} + +/** + * ipa3_dma_destroy() -teardown IPADMA pipes and release ipadma. + * + * this is a blocking function, returns just after destroying IPADMA. + */ +void ipa3_dma_destroy(void) +{ + int res = 0; + + IPADMA_FUNC_ENTRY(); + + if (!ipa3_dma_init_refcnt_ctrl) { + IPADMA_ERR("Setup isn't done\n"); + return; + } + + mutex_lock(&ipa3_dma_init_refcnt_ctrl->lock); + if (ipa3_dma_init_refcnt_ctrl->ref_cnt > 1) { + IPADMA_DBG("Multiple initialization done. refcnt=%d\n", + ipa3_dma_init_refcnt_ctrl->ref_cnt); + ipa3_dma_init_refcnt_ctrl->ref_cnt--; + goto completed; + } + + if ((!ipa3_dma_ctx) || (ipa3_dma_init_refcnt_ctrl->ref_cnt == 0)) { + IPADMA_ERR("IPADMA isn't initialized ctx=%pK\n", ipa3_dma_ctx); + goto completed; + } + + if (ipa3_dma_work_pending()) { + ipa3_dma_ctx->destroy_pending = true; + IPADMA_DBG("There are pending memcpy, wait for completion\n"); + wait_for_completion(&ipa3_dma_ctx->done); + } + + if (ipa3_dma_ctx->enable_ref_cnt > 0) { + IPADMA_ERR("IPADMA still enabled\n"); + goto completed; + } + + res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_cons_hdl); + if (res) + IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n"); + ipa3_dma_ctx->ipa_dma_async_cons_hdl = 0; + res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_cons_hdl); + if (res) + IPADMA_ERR("teardown IPADMA SYNC CONS failed\n"); + ipa3_dma_ctx->ipa_dma_sync_cons_hdl = 0; + res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_prod_hdl); + if (res) + IPADMA_ERR("teardown IPADMA ASYNC PROD failed\n"); + ipa3_dma_ctx->ipa_dma_async_prod_hdl = 0; + res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_prod_hdl); + if (res) + IPADMA_ERR("teardown IPADMA SYNC PROD failed\n"); + ipa3_dma_ctx->ipa_dma_sync_prod_hdl = 0; + + ipa3_dma_debugfs_destroy(); + kmem_cache_destroy(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache); + dma_free_coherent(ipa3_ctx->pdev, IPA_DMA_DUMMY_BUFF_SZ * 4, + ipa3_dma_ctx->ipa_dma_dummy_src_sync.base, + ipa3_dma_ctx->ipa_dma_dummy_src_sync.phys_base); + kfree(ipa3_dma_ctx); + ipa3_dma_ctx = NULL; + + ipa3_dma_init_refcnt_ctrl->ref_cnt = 0; + IPADMA_FUNC_EXIT(); + +completed: + mutex_unlock(&ipa3_dma_init_refcnt_ctrl->lock); +} + +/** + * ipa3_dma_async_memcpy_notify_cb() - Callback function which will be called + * by IPA driver after getting notify on Rx operation is completed (data was + * written to dest descriptor on async_cons ep). + * + * @priv -not in use. + * @evt - event name - IPA_RECIVE. + * @data -the ipa_mem_buffer. + */ +void ipa3_dma_async_memcpy_notify_cb(void *priv + , enum ipa_dp_evt_type evt, unsigned long data) +{ + int ep_idx = 0; + struct ipa3_dma_xfer_wrapper *xfer_descr_expected; + struct ipa3_sys_context *sys; + unsigned long flags; + struct ipa_mem_buffer *mem_info; + + IPADMA_FUNC_ENTRY(); + + mem_info = (struct ipa_mem_buffer *)data; + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); + if (ep_idx < 0) { + IPADMA_ERR("IPA Client mapping failed\n"); + return; + } + sys = ipa3_ctx->ep[ep_idx].sys; + + spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags); + xfer_descr_expected = list_first_entry(&sys->head_desc_list, + struct ipa3_dma_xfer_wrapper, link); + list_del(&xfer_descr_expected->link); + sys->len--; + spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags); + atomic_inc(&ipa3_dma_ctx->total_async_memcpy); + atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt); + xfer_descr_expected->callback(xfer_descr_expected->user1); + + kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, + xfer_descr_expected); + + if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending()) + complete(&ipa3_dma_ctx->done); + + IPADMA_FUNC_EXIT(); +} + +#ifdef CONFIG_DEBUG_FS +static struct dentry *dent; +static struct dentry *dfile_info; + +static ssize_t ipa3_dma_debugfs_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes = 0; + + if (!ipa3_dma_init_refcnt_ctrl) { + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "Setup was not done\n"); + goto completed; + + } + + if (!ipa3_dma_ctx) { + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "Status:\n Not initialized (ref_cnt=%d)\n", + ipa3_dma_init_refcnt_ctrl->ref_cnt); + } else { + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "Status:\n Initialized (ref_cnt=%d)\n", + ipa3_dma_init_refcnt_ctrl->ref_cnt); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + " %s (ref_cnt=%d)\n", + (ipa3_dma_ctx->enable_ref_cnt > 0) ? + "Enabled" : "Disabled", + ipa3_dma_ctx->enable_ref_cnt); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "Statistics:\n total sync memcpy: %d\n ", + atomic_read(&ipa3_dma_ctx->total_sync_memcpy)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "total async memcpy: %d\n ", + atomic_read(&ipa3_dma_ctx->total_async_memcpy)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "total uc memcpy: %d\n ", + atomic_read(&ipa3_dma_ctx->total_uc_memcpy)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "pending sync memcpy jobs: %d\n ", + atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "pending async memcpy jobs: %d\n ", + atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "pending uc memcpy jobs: %d\n", + atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt)); + } + +completed: + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa3_dma_debugfs_reset_statistics(struct file *file, + const char __user *ubuf, + size_t count, + loff_t *ppos) +{ + unsigned long missing; + s8 in_num = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &in_num)) + return -EFAULT; + switch (in_num) { + case 0: + if (ipa3_dma_work_pending()) + IPADMA_ERR("Note, there are pending memcpy\n"); + + atomic_set(&ipa3_dma_ctx->total_async_memcpy, 0); + atomic_set(&ipa3_dma_ctx->total_sync_memcpy, 0); + break; + default: + IPADMA_ERR("invalid argument: To reset statistics echo 0\n"); + break; + } + return count; +} + +const struct file_operations ipa3_ipadma_stats_ops = { + .read = ipa3_dma_debugfs_read, + .write = ipa3_dma_debugfs_reset_statistics, +}; + +static void ipa3_dma_debugfs_init(void) +{ + const mode_t read_write_mode = 0666; + + dent = debugfs_create_dir("ipa_dma", 0); + if (IS_ERR(dent)) { + IPADMA_ERR("fail to create folder ipa_dma\n"); + return; + } + + dfile_info = + debugfs_create_file("info", read_write_mode, dent, + 0, &ipa3_ipadma_stats_ops); + if (!dfile_info || IS_ERR(dfile_info)) { + IPADMA_ERR("fail to create file stats\n"); + goto fail; + } + return; +fail: + debugfs_remove_recursive(dent); +} + +static void ipa3_dma_debugfs_destroy(void) +{ + debugfs_remove_recursive(dent); +} + +#endif /* !CONFIG_DEBUG_FS */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c new file mode 100644 index 000000000000..1eaccdb2571b --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -0,0 +1,4232 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" +#include "ipa_trace.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_fltrt.h" + +#define IPA_WAN_AGGR_PKT_CNT 5 +#define IPA_WAN_NAPI_MAX_FRAMES (NAPI_WEIGHT / IPA_WAN_AGGR_PKT_CNT) +#define IPA_LAST_DESC_CNT 0xFFFF +#define POLLING_INACTIVITY_RX 40 +#define POLLING_MIN_SLEEP_RX 1010 +#define POLLING_MAX_SLEEP_RX 1050 +#define POLLING_INACTIVITY_TX 40 +#define POLLING_MIN_SLEEP_TX 400 +#define POLLING_MAX_SLEEP_TX 500 +#define SUSPEND_MIN_SLEEP_RX 1000 +#define SUSPEND_MAX_SLEEP_RX 1005 +/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */ +#define IPA_MTU 1500 +#define IPA_GENERIC_AGGR_BYTE_LIMIT 6 +#define IPA_GENERIC_AGGR_TIME_LIMIT 500 /* 0.5msec */ +#define IPA_GENERIC_AGGR_PKT_LIMIT 0 + +#define IPA_GSB_AGGR_BYTE_LIMIT 14 +#define IPA_GSB_RX_BUFF_BASE_SZ 16384 + +#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192 +#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\ + (X) + NET_SKB_PAD) +\ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\ + (IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X))) +#define IPA_GENERIC_RX_BUFF_LIMIT (\ + IPA_REAL_GENERIC_RX_BUFF_SZ(\ + IPA_GENERIC_RX_BUFF_BASE_SZ) -\ + IPA_GENERIC_RX_BUFF_BASE_SZ) + +/* less 1 nominal MTU (1500 bytes) rounded to units of KB */ +#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000) + +#define IPA_RX_BUFF_CLIENT_HEADROOM 256 + +#define IPA_WLAN_RX_POOL_SZ 100 +#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5 +#define IPA_WLAN_RX_BUFF_SZ 2048 +#define IPA_WLAN_COMM_RX_POOL_LOW 100 +#define IPA_WLAN_COMM_RX_POOL_HIGH 900 + +#define IPA_ODU_RX_BUFF_SZ 2048 +#define IPA_ODU_RX_POOL_SZ 64 + +#define IPA_ODL_RX_BUFF_SZ (16 * 1024) + +#define IPA_GSI_MAX_CH_LOW_WEIGHT 15 +#define IPA_GSI_EVT_RING_INT_MODT (16) /* 0.5ms under 32KHz clock */ +#define IPA_GSI_EVT_RING_INT_MODC (20) + +#define IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC 10 +/* The below virtual channel cannot be used by any entity */ +#define IPA_GSI_CH_20_WA_VIRT_CHAN 29 + +#define IPA_DEFAULT_SYS_YELLOW_WM 32 +#define IPA_REPL_XFER_THRESH 20 + +#define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000) + +#define IPA_APPS_BW_FOR_PM 700 + +#define IPA_SEND_MAX_DESC (20) + +#define IPA_EOT_THRESH 32 + +static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags); +static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys); +static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys); +static void ipa3_replenish_rx_work_func(struct work_struct *work); +static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys); +static void ipa3_wq_handle_rx(struct work_struct *work); +static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size); +static void ipa3_wq_rx_napi_chain(struct ipa3_sys_context *sys, + struct ipa_mem_buffer *mem_info, uint32_t num); +static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys, + u32 size); +static int ipa3_assign_policy(struct ipa_sys_connect_params *in, + struct ipa3_sys_context *sys); +static void ipa3_cleanup_rx(struct ipa3_sys_context *sys); +static void ipa3_wq_rx_avail(struct work_struct *work); +static void ipa3_alloc_wlan_rx_common_cache(u32 size); +static void ipa3_cleanup_wlan_rx_common_cache(void); +static void ipa3_wq_repl_rx(struct work_struct *work); +static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys, + struct ipa_mem_buffer *mem_info); +static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in, + struct ipa3_ep_context *ep); +static int ipa_populate_tag_field(struct ipa3_desc *desc, + struct ipa3_tx_pkt_wrapper *tx_pkt, + struct ipahal_imm_cmd_pyld **tag_pyld_ret); +static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys, + struct ipa_mem_buffer *mem_info); +static int ipa_poll_gsi_n_pkt(struct ipa3_sys_context *sys, + struct ipa_mem_buffer *mem_info, int expected_num, int *actual_num); +static unsigned long tag_to_pointer_wa(uint64_t tag); +static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt); + +static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit); + +static void ipa3_wq_write_done_common(struct ipa3_sys_context *sys, + struct ipa3_tx_pkt_wrapper *tx_pkt) +{ + struct ipa3_tx_pkt_wrapper *next_pkt; + int i, cnt; + + if (unlikely(tx_pkt == NULL)) { + IPAERR("tx_pkt is NULL\n"); + return; + } + + cnt = tx_pkt->cnt; + IPADBG_LOW("cnt: %d\n", cnt); + for (i = 0; i < cnt; i++) { + spin_lock_bh(&sys->spinlock); + if (unlikely(list_empty(&sys->head_desc_list))) { + spin_unlock_bh(&sys->spinlock); + return; + } + next_pkt = list_next_entry(tx_pkt, link); + list_del(&tx_pkt->link); + sys->len--; + spin_unlock_bh(&sys->spinlock); + if (!tx_pkt->no_unmap_dma) { + if (tx_pkt->type != IPA_DATA_DESC_SKB_PAGED) { + dma_unmap_single(ipa3_ctx->pdev, + tx_pkt->mem.phys_base, + tx_pkt->mem.size, + DMA_TO_DEVICE); + } else { + dma_unmap_page(ipa3_ctx->pdev, + tx_pkt->mem.phys_base, + tx_pkt->mem.size, + DMA_TO_DEVICE); + } + } + if (tx_pkt->callback) + tx_pkt->callback(tx_pkt->user1, tx_pkt->user2); + + kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt); + tx_pkt = next_pkt; + } +} + +static void ipa3_wq_write_done_status(int src_pipe, + struct ipa3_tx_pkt_wrapper *tx_pkt) +{ + struct ipa3_sys_context *sys; + + WARN_ON(src_pipe >= ipa3_ctx->ipa_num_pipes); + + if (!ipa3_ctx->ep[src_pipe].status.status_en) + return; + + sys = ipa3_ctx->ep[src_pipe].sys; + if (!sys) + return; + + ipa3_wq_write_done_common(sys, tx_pkt); +} + +/** + * ipa_write_done() - this function will be (eventually) called when a Tx + * operation is complete + * * @work: work_struct used by the work queue + * + * Will be called in deferred context. + * - invoke the callback supplied by the client who sent this command + * - iterate over all packets and validate that + * the order for sent packet is the same as expected + * - delete all the tx packet descriptors from the system + * pipe context (not needed anymore) + */ +static void ipa3_wq_write_done(struct work_struct *work) +{ + struct ipa3_tx_pkt_wrapper *tx_pkt; + struct ipa3_sys_context *sys; + struct ipa3_tx_pkt_wrapper *this_pkt; + + tx_pkt = container_of(work, struct ipa3_tx_pkt_wrapper, work); + sys = tx_pkt->sys; + spin_lock_bh(&sys->spinlock); + this_pkt = list_first_entry(&sys->head_desc_list, + struct ipa3_tx_pkt_wrapper, link); + while (tx_pkt != this_pkt) { + spin_unlock_bh(&sys->spinlock); + ipa3_wq_write_done_common(sys, this_pkt); + spin_lock_bh(&sys->spinlock); + this_pkt = list_first_entry(&sys->head_desc_list, + struct ipa3_tx_pkt_wrapper, link); + } + spin_unlock_bh(&sys->spinlock); + ipa3_wq_write_done_common(sys, tx_pkt); +} + + +static void ipa3_send_nop_desc(struct work_struct *work) +{ + struct ipa3_sys_context *sys = container_of(work, + struct ipa3_sys_context, work); + struct gsi_xfer_elem nop_xfer; + struct ipa3_tx_pkt_wrapper *tx_pkt; + + IPADBG_LOW("gsi send NOP for ch: %lu\n", sys->ep->gsi_chan_hdl); + tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, GFP_KERNEL); + if (!tx_pkt) { + queue_work(sys->wq, &sys->work); + return; + } + + INIT_LIST_HEAD(&tx_pkt->link); + tx_pkt->cnt = 1; + INIT_WORK(&tx_pkt->work, ipa3_wq_write_done); + tx_pkt->no_unmap_dma = true; + tx_pkt->sys = sys; + spin_lock_bh(&sys->spinlock); + if (unlikely(!sys->nop_pending)) { + spin_unlock_bh(&sys->spinlock); + kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt); + return; + } + list_add_tail(&tx_pkt->link, &sys->head_desc_list); + sys->nop_pending = false; + + memset(&nop_xfer, 0, sizeof(nop_xfer)); + nop_xfer.type = GSI_XFER_ELEM_NOP; + nop_xfer.flags = GSI_XFER_FLAG_EOT; + nop_xfer.xfer_user_data = tx_pkt; + if (gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, &nop_xfer, true)) { + spin_unlock_bh(&sys->spinlock); + IPAERR("gsi_queue_xfer for ch:%lu failed\n", + sys->ep->gsi_chan_hdl); + queue_work(sys->wq, &sys->work); + return; + } + spin_unlock_bh(&sys->spinlock); + + /* make sure TAG process is sent before clocks are gated */ + ipa3_ctx->tag_process_before_gating = true; + +} + + +/** + * ipa3_send() - Send multiple descriptors in one HW transaction + * @sys: system pipe context + * @num_desc: number of packets + * @desc: packets to send (may be immediate command or data) + * @in_atomic: whether caller is in atomic context + * + * This function is used for GPI connection. + * - ipa3_tx_pkt_wrapper will be used for each ipa + * descriptor (allocated from wrappers cache) + * - The wrapper struct will be configured for each ipa-desc payload and will + * contain information which will be later used by the user callbacks + * - Each packet (command or data) that will be sent will also be saved in + * ipa3_sys_context for later check that all data was sent + * + * Return codes: 0: success, -EFAULT: failure + */ +int ipa3_send(struct ipa3_sys_context *sys, + u32 num_desc, + struct ipa3_desc *desc, + bool in_atomic) +{ + struct ipa3_tx_pkt_wrapper *tx_pkt, *tx_pkt_first; + struct ipahal_imm_cmd_pyld *tag_pyld_ret = NULL; + struct ipa3_tx_pkt_wrapper *next_pkt; + struct gsi_xfer_elem gsi_xfer[IPA_SEND_MAX_DESC]; + int i = 0; + int j; + int result; + u32 mem_flag = GFP_ATOMIC; + const struct ipa_gsi_ep_config *gsi_ep_cfg; + bool send_nop = false; + unsigned int max_desc; + + if (unlikely(!in_atomic)) + mem_flag = GFP_KERNEL; + + gsi_ep_cfg = ipa3_get_gsi_ep_info(sys->ep->client); + if (unlikely(!gsi_ep_cfg)) { + IPAERR("failed to get gsi EP config for client=%d\n", + sys->ep->client); + return -EFAULT; + } + if (unlikely(num_desc > IPA_SEND_MAX_DESC)) { + IPAERR("max descriptors reached need=%d max=%d\n", + num_desc, IPA_SEND_MAX_DESC); + WARN_ON(1); + return -EPERM; + } + + max_desc = gsi_ep_cfg->ipa_if_tlv; + if (gsi_ep_cfg->prefetch_mode == GSI_SMART_PRE_FETCH || + gsi_ep_cfg->prefetch_mode == GSI_FREE_PRE_FETCH) + max_desc -= gsi_ep_cfg->prefetch_threshold; + + if (unlikely(num_desc > max_desc)) { + IPAERR("Too many chained descriptors need=%d max=%d\n", + num_desc, max_desc); + WARN_ON(1); + return -EPERM; + } + + /* initialize only the xfers we use */ + memset(gsi_xfer, 0, sizeof(gsi_xfer[0]) * num_desc); + + spin_lock_bh(&sys->spinlock); + + for (i = 0; i < num_desc; i++) { + tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, + GFP_ATOMIC); + if (!tx_pkt) { + IPAERR("failed to alloc tx wrapper\n"); + result = -ENOMEM; + goto failure; + } + INIT_LIST_HEAD(&tx_pkt->link); + + if (i == 0) { + tx_pkt_first = tx_pkt; + tx_pkt->cnt = num_desc; + INIT_WORK(&tx_pkt->work, ipa3_wq_write_done); + } + + /* populate tag field */ + if (desc[i].is_tag_status) { + if (ipa_populate_tag_field(&desc[i], tx_pkt, + &tag_pyld_ret)) { + IPAERR("Failed to populate tag field\n"); + result = -EFAULT; + goto failure_dma_map; + } + } + + tx_pkt->type = desc[i].type; + + if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) { + tx_pkt->mem.base = desc[i].pyld; + tx_pkt->mem.size = desc[i].len; + + if (!desc[i].dma_address_valid) { + tx_pkt->mem.phys_base = + dma_map_single(ipa3_ctx->pdev, + tx_pkt->mem.base, + tx_pkt->mem.size, + DMA_TO_DEVICE); + } else { + tx_pkt->mem.phys_base = + desc[i].dma_address; + tx_pkt->no_unmap_dma = true; + } + } else { + tx_pkt->mem.base = desc[i].frag; + tx_pkt->mem.size = desc[i].len; + + if (!desc[i].dma_address_valid) { + tx_pkt->mem.phys_base = + skb_frag_dma_map(ipa3_ctx->pdev, + desc[i].frag, + 0, tx_pkt->mem.size, + DMA_TO_DEVICE); + } else { + tx_pkt->mem.phys_base = + desc[i].dma_address; + tx_pkt->no_unmap_dma = true; + } + } + if (dma_mapping_error(ipa3_ctx->pdev, tx_pkt->mem.phys_base)) { + IPAERR("failed to do dma map.\n"); + result = -EFAULT; + goto failure_dma_map; + } + + tx_pkt->sys = sys; + tx_pkt->callback = desc[i].callback; + tx_pkt->user1 = desc[i].user1; + tx_pkt->user2 = desc[i].user2; + + list_add_tail(&tx_pkt->link, &sys->head_desc_list); + + gsi_xfer[i].addr = tx_pkt->mem.phys_base; + + /* + * Special treatment for immediate commands, where + * the structure of the descriptor is different + */ + if (desc[i].type == IPA_IMM_CMD_DESC) { + gsi_xfer[i].len = desc[i].opcode; + gsi_xfer[i].type = + GSI_XFER_ELEM_IMME_CMD; + } else { + gsi_xfer[i].len = desc[i].len; + gsi_xfer[i].type = + GSI_XFER_ELEM_DATA; + } + + if (i == (num_desc - 1)) { + if (!sys->use_comm_evt_ring || + (sys->pkt_sent % IPA_EOT_THRESH == 0)) { + gsi_xfer[i].flags |= + GSI_XFER_FLAG_EOT; + gsi_xfer[i].flags |= + GSI_XFER_FLAG_BEI; + } else { + send_nop = true; + } + gsi_xfer[i].xfer_user_data = + tx_pkt_first; + } else { + gsi_xfer[i].flags |= + GSI_XFER_FLAG_CHAIN; + } + } + + IPADBG_LOW("ch:%lu queue xfer\n", sys->ep->gsi_chan_hdl); + result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc, + gsi_xfer, true); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("GSI xfer failed.\n"); + result = -EFAULT; + goto failure; + } + + if (send_nop && !sys->nop_pending) + sys->nop_pending = true; + else + send_nop = false; + + sys->pkt_sent++; + spin_unlock_bh(&sys->spinlock); + + /* set the timer for sending the NOP descriptor */ + if (send_nop) { + ktime_t time = ktime_set(0, IPA_TX_SEND_COMPL_NOP_DELAY_NS); + + IPADBG_LOW("scheduling timer for ch %lu\n", + sys->ep->gsi_chan_hdl); + hrtimer_start(&sys->db_timer, time, HRTIMER_MODE_REL); + } + + /* make sure TAG process is sent before clocks are gated */ + ipa3_ctx->tag_process_before_gating = true; + + return 0; + +failure_dma_map: + kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt); + +failure: + ipahal_destroy_imm_cmd(tag_pyld_ret); + tx_pkt = tx_pkt_first; + for (j = 0; j < i; j++) { + next_pkt = list_next_entry(tx_pkt, link); + list_del(&tx_pkt->link); + + if (!tx_pkt->no_unmap_dma) { + if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) { + dma_unmap_single(ipa3_ctx->pdev, + tx_pkt->mem.phys_base, + tx_pkt->mem.size, DMA_TO_DEVICE); + } else { + dma_unmap_page(ipa3_ctx->pdev, + tx_pkt->mem.phys_base, + tx_pkt->mem.size, + DMA_TO_DEVICE); + } + } + kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt); + tx_pkt = next_pkt; + } + + spin_unlock_bh(&sys->spinlock); + return result; +} + +/** + * ipa3_send_one() - Send a single descriptor + * @sys: system pipe context + * @desc: descriptor to send + * @in_atomic: whether caller is in atomic context + * + * - Allocate tx_packet wrapper + * - transfer data to the IPA + * - after the transfer was done the SPS will + * notify the sending user via ipa_sps_irq_comp_tx() + * + * Return codes: 0: success, -EFAULT: failure + */ +int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc, + bool in_atomic) +{ + return ipa3_send(sys, 1, desc, in_atomic); +} + +/** + * ipa3_transport_irq_cmd_ack - callback function which will be called by + * the transport driver after an immediate command is complete. + * @user1: pointer to the descriptor of the transfer + * @user2: + * + * Complete the immediate commands completion object, this will release the + * thread which waits on this completion object (ipa3_send_cmd()) + */ +static void ipa3_transport_irq_cmd_ack(void *user1, int user2) +{ + struct ipa3_desc *desc = (struct ipa3_desc *)user1; + + if (WARN(!desc, "desc is NULL")) + return; + + IPADBG_LOW("got ack for cmd=%d\n", desc->opcode); + complete(&desc->xfer_done); +} + +/** + * ipa3_transport_irq_cmd_ack_free - callback function which will be + * called by the transport driver after an immediate command is complete. + * This function will also free the completion object once it is done. + * @tag_comp: pointer to the completion object + * @ignored: parameter not used + * + * Complete the immediate commands completion object, this will release the + * thread which waits on this completion object (ipa3_send_cmd()) + */ +static void ipa3_transport_irq_cmd_ack_free(void *tag_comp, int ignored) +{ + struct ipa3_tag_completion *comp = tag_comp; + + if (!comp) { + IPAERR("comp is NULL\n"); + return; + } + + complete(&comp->comp); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); +} + +/** + * ipa3_send_cmd - send immediate commands + * @num_desc: number of descriptors within the desc struct + * @descr: descriptor structure + * + * Function will block till command gets ACK from IPA HW, caller needs + * to free any resources it allocated after function returns + * The callback in ipa3_desc should not be set by the caller + * for this function. + */ +int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr) +{ + struct ipa3_desc *desc; + int i, result = 0; + struct ipa3_sys_context *sys; + int ep_idx; + + for (i = 0; i < num_desc; i++) + IPADBG("sending imm cmd %d\n", descr[i].opcode); + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD); + if (-1 == ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_CMD_PROD); + return -EFAULT; + } + + sys = ipa3_ctx->ep[ep_idx].sys; + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + if (num_desc == 1) { + init_completion(&descr->xfer_done); + + if (descr->callback || descr->user1) + WARN_ON(1); + + descr->callback = ipa3_transport_irq_cmd_ack; + descr->user1 = descr; + if (ipa3_send_one(sys, descr, true)) { + IPAERR("fail to send immediate command\n"); + result = -EFAULT; + goto bail; + } + wait_for_completion(&descr->xfer_done); + } else { + desc = &descr[num_desc - 1]; + init_completion(&desc->xfer_done); + + if (desc->callback || desc->user1) + WARN_ON(1); + + desc->callback = ipa3_transport_irq_cmd_ack; + desc->user1 = desc; + if (ipa3_send(sys, num_desc, descr, true)) { + IPAERR("fail to send multiple immediate command set\n"); + result = -EFAULT; + goto bail; + } + wait_for_completion(&desc->xfer_done); + } + +bail: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +/** + * ipa3_send_cmd_timeout - send immediate commands with limited time + * waiting for ACK from IPA HW + * @num_desc: number of descriptors within the desc struct + * @descr: descriptor structure + * @timeout: millisecond to wait till get ACK from IPA HW + * + * Function will block till command gets ACK from IPA HW or timeout. + * Caller needs to free any resources it allocated after function returns + * The callback in ipa3_desc should not be set by the caller + * for this function. + */ +int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout) +{ + struct ipa3_desc *desc; + int i, result = 0; + struct ipa3_sys_context *sys; + int ep_idx; + int completed; + struct ipa3_tag_completion *comp; + + for (i = 0; i < num_desc; i++) + IPADBG("sending imm cmd %d\n", descr[i].opcode); + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD); + if (-1 == ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_CMD_PROD); + return -EFAULT; + } + + comp = kzalloc(sizeof(*comp), GFP_ATOMIC); + if (!comp) + return -ENOMEM; + + init_completion(&comp->comp); + + /* completion needs to be released from both here and in ack callback */ + atomic_set(&comp->cnt, 2); + + sys = ipa3_ctx->ep[ep_idx].sys; + + if (num_desc == 1) { + if (descr->callback || descr->user1) + WARN_ON(1); + + descr->callback = ipa3_transport_irq_cmd_ack_free; + descr->user1 = comp; + if (ipa3_send_one(sys, descr, true)) { + IPAERR("fail to send immediate command\n"); + kfree(comp); + result = -EFAULT; + goto bail; + } + } else { + desc = &descr[num_desc - 1]; + + if (desc->callback || desc->user1) + WARN_ON(1); + + desc->callback = ipa3_transport_irq_cmd_ack_free; + desc->user1 = comp; + if (ipa3_send(sys, num_desc, descr, true)) { + IPAERR("fail to send multiple immediate command set\n"); + kfree(comp); + result = -EFAULT; + goto bail; + } + } + + completed = wait_for_completion_timeout( + &comp->comp, msecs_to_jiffies(timeout)); + if (!completed) + IPADBG("timeout waiting for imm-cmd ACK\n"); + + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + +bail: + return result; +} + +/** + * ipa3_handle_rx_core() - The core functionality of packet reception. This + * function is read from multiple code paths. + * + * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN + * endpoint. The function runs as long as there are packets in the pipe. + * For each packet: + * - Disconnect the packet from the system pipe linked list + * - Unmap the packets skb, make it non DMAable + * - Free the packet from the cache + * - Prepare a proper skb + * - Call the endpoints notify function, passing the skb in the parameters + * - Replenish the rx cache + */ +static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all, + bool in_poll_state) +{ + int ret; + int cnt = 0; + struct ipa_mem_buffer mem_info = { 0 }; + + while ((in_poll_state ? atomic_read(&sys->curr_polling_state) : + !atomic_read(&sys->curr_polling_state))) { + if (cnt && !process_all) + break; + + ret = ipa_poll_gsi_pkt(sys, &mem_info); + if (ret) + break; + + if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client)) + ipa3_dma_memcpy_notify(sys, &mem_info); + else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client)) + ipa3_wlan_wq_rx_common(sys, mem_info.size); + else + ipa3_wq_rx_common(sys, mem_info.size); + + ++cnt; + } + return cnt; +} + +/** + * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode + */ +static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys) +{ + int ret; + + atomic_set(&sys->curr_polling_state, 0); + ipa3_dec_release_wakelock(); + ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl, + GSI_CHAN_MODE_CALLBACK); + if (ret != GSI_STATUS_SUCCESS) { + if (ret == -GSI_STATUS_PENDING_IRQ) { + ipa3_inc_acquire_wakelock(); + atomic_set(&sys->curr_polling_state, 1); + } else { + IPAERR("Failed to switch to intr mode.\n"); + } + } + + return ret; +} + +/** + * ipa3_handle_rx() - handle packet reception. This function is executed in the + * context of a work queue. + * @work: work struct needed by the work queue + * + * ipa3_handle_rx_core() is run in polling mode. After all packets has been + * received, the driver switches back to interrupt mode. + */ +static void ipa3_handle_rx(struct ipa3_sys_context *sys) +{ + int inactive_cycles; + int cnt; + int ret; + + if (ipa3_ctx->use_ipa_pm) + ipa_pm_activate_sync(sys->pm_hdl); + else + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); +start_poll: + inactive_cycles = 0; + do { + cnt = ipa3_handle_rx_core(sys, true, true); + if (cnt == 0) + inactive_cycles++; + else + inactive_cycles = 0; + + trace_idle_sleep_enter3(sys->ep->client); + usleep_range(POLLING_MIN_SLEEP_RX, POLLING_MAX_SLEEP_RX); + trace_idle_sleep_exit3(sys->ep->client); + + /* + * if pipe is out of buffers there is no point polling for + * completed descs; release the worker so delayed work can + * run in a timely manner + */ + if (sys->len == 0) + break; + + } while (inactive_cycles <= POLLING_INACTIVITY_RX); + + trace_poll_to_intr3(sys->ep->client); + ret = ipa3_rx_switch_to_intr_mode(sys); + if (ret == -GSI_STATUS_PENDING_IRQ) + goto start_poll; + + if (ipa3_ctx->use_ipa_pm) + ipa_pm_deferred_deactivate(sys->pm_hdl); + else + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + +static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work) +{ + struct delayed_work *dwork; + struct ipa3_sys_context *sys; + + dwork = container_of(work, struct delayed_work, work); + sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work); + + if (sys->napi_obj) { + /* interrupt mode is done in ipa3_rx_poll context */ + ipa_assert(); + } else + ipa3_handle_rx(sys); +} + +enum hrtimer_restart ipa3_ring_doorbell_timer_fn(struct hrtimer *param) +{ + struct ipa3_sys_context *sys = container_of(param, + struct ipa3_sys_context, db_timer); + + queue_work(sys->wq, &sys->work); + return HRTIMER_NORESTART; +} + +static void ipa_pm_sys_pipe_cb(void *p, enum ipa_pm_cb_event event) +{ + struct ipa3_sys_context *sys = (struct ipa3_sys_context *)p; + + switch (event) { + case IPA_PM_CLIENT_ACTIVATED: + /* + * this event is ignored as the sync version of activation + * will be used. + */ + break; + case IPA_PM_REQUEST_WAKEUP: + /* + * pipe will be unsuspended as part of + * enabling IPA clocks + */ + IPADBG("calling wakeup for client %d\n", sys->ep->client); + if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) { + IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_WAN"); + usleep_range(SUSPEND_MIN_SLEEP_RX, + SUSPEND_MAX_SLEEP_RX); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_WAN"); + } else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) { + IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_LAN"); + usleep_range(SUSPEND_MIN_SLEEP_RX, + SUSPEND_MAX_SLEEP_RX); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_LAN"); + } else + IPAERR("Unexpected event %d\n for client %d\n", + event, sys->ep->client); + break; + default: + IPAERR("Unexpected event %d\n for client %d\n", + event, sys->ep->client); + WARN_ON(1); + return; + } +} + +/** + * ipa3_setup_sys_pipe() - Setup an IPA GPI pipe and perform + * IPA EP configuration + * @sys_in: [in] input needed to setup the pipe and configure EP + * @clnt_hdl: [out] client handle + * + * - configure the end-point registers with the supplied + * parameters from the user. + * - Creates a GPI connection with IPA. + * - allocate descriptor FIFO + * + * Returns: 0 on success, negative on failure + */ +int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl) +{ + struct ipa3_ep_context *ep; + int ipa_ep_idx; + int result = -EINVAL; + char buff[IPA_RESOURCE_NAME_MAX]; + + if (sys_in == NULL || clnt_hdl == NULL) { + IPAERR("NULL args\n"); + goto fail_gen; + } + + if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) { + IPAERR("bad parm client:%d fifo_sz:%d\n", + sys_in->client, sys_in->desc_fifo_sz); + goto fail_gen; + } + + ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + goto fail_gen; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + if (ep->valid == 1) { + IPAERR("EP %d already allocated.\n", ipa_ep_idx); + goto fail_gen; + } + + IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client); + memset(ep, 0, offsetof(struct ipa3_ep_context, sys)); + + if (!ep->sys) { + struct ipa_pm_register_params pm_reg; + + memset(&pm_reg, 0, sizeof(pm_reg)); + ep->sys = kzalloc(sizeof(struct ipa3_sys_context), GFP_KERNEL); + if (!ep->sys) { + IPAERR("failed to sys ctx for client %d\n", + sys_in->client); + result = -ENOMEM; + goto fail_and_disable_clocks; + } + + ep->sys->ep = ep; + snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d", + sys_in->client); + ep->sys->wq = alloc_workqueue(buff, + WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1); + + if (!ep->sys->wq) { + IPAERR("failed to create wq for client %d\n", + sys_in->client); + result = -EFAULT; + goto fail_wq; + } + + snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d", + sys_in->client); + ep->sys->repl_wq = alloc_workqueue(buff, + WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1); + if (!ep->sys->repl_wq) { + IPAERR("failed to create rep wq for client %d\n", + sys_in->client); + result = -EFAULT; + goto fail_wq2; + } + + INIT_LIST_HEAD(&ep->sys->head_desc_list); + INIT_LIST_HEAD(&ep->sys->rcycl_list); + spin_lock_init(&ep->sys->spinlock); + hrtimer_init(&ep->sys->db_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn; + + /* create IPA PM resources for handling polling mode */ + if (ipa3_ctx->use_ipa_pm && + IPA_CLIENT_IS_CONS(sys_in->client)) { + pm_reg.name = ipa_clients_strings[sys_in->client]; + pm_reg.callback = ipa_pm_sys_pipe_cb; + pm_reg.user_data = ep->sys; + pm_reg.group = IPA_PM_GROUP_APPS; + result = ipa_pm_register(&pm_reg, &ep->sys->pm_hdl); + if (result) { + IPAERR("failed to create IPA PM client %d\n", + result); + goto fail_pm; + } + + if (IPA_CLIENT_IS_APPS_CONS(sys_in->client)) { + result = ipa_pm_associate_ipa_cons_to_client( + ep->sys->pm_hdl, sys_in->client); + if (result) { + IPAERR("failed to associate\n"); + goto fail_gen2; + } + } + + result = ipa_pm_set_throughput(ep->sys->pm_hdl, + IPA_APPS_BW_FOR_PM); + if (result) { + IPAERR("failed to set profile IPA PM client\n"); + goto fail_gen2; + } + } + } else { + memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep)); + } + + ep->skip_ep_cfg = sys_in->skip_ep_cfg; + if (ipa3_assign_policy(sys_in, ep->sys)) { + IPAERR("failed to sys ctx for client %d\n", sys_in->client); + result = -ENOMEM; + goto fail_gen2; + } + + ep->valid = 1; + ep->client = sys_in->client; + ep->client_notify = sys_in->notify; + ep->sys->napi_obj = sys_in->napi_obj; + ep->priv = sys_in->priv; + ep->keep_ipa_awake = sys_in->keep_ipa_awake; + atomic_set(&ep->avail_fifo_desc, + ((sys_in->desc_fifo_sz / IPA_FIFO_ELEMENT_SIZE) - 1)); + + if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) && + ep->sys->status_stat == NULL) { + ep->sys->status_stat = + kzalloc(sizeof(struct ipa3_status_stats), GFP_KERNEL); + if (!ep->sys->status_stat) + goto fail_gen2; + } + + if (!ep->skip_ep_cfg) { + if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto fail_gen2; + } + if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) { + IPAERR("fail to configure status of EP.\n"); + goto fail_gen2; + } + IPADBG("ep %d configuration successful\n", ipa_ep_idx); + } else { + IPADBG("skipping ep %d configuration\n", ipa_ep_idx); + } + + result = ipa_gsi_setup_channel(sys_in, ep); + if (result) { + IPAERR("Failed to setup GSI channel\n"); + goto fail_gen2; + } + + *clnt_hdl = ipa_ep_idx; + + if (ep->sys->repl_hdlr == ipa3_fast_replenish_rx_cache) { + atomic_set(&ep->sys->repl.pending, 0); + ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1; + ep->sys->repl.cache = kcalloc(ep->sys->repl.capacity, + sizeof(void *), GFP_KERNEL); + if (!ep->sys->repl.cache) { + IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx); + ep->sys->repl_hdlr = ipa3_replenish_rx_cache; + ep->sys->repl.capacity = 0; + } else { + atomic_set(&ep->sys->repl.head_idx, 0); + atomic_set(&ep->sys->repl.tail_idx, 0); + ipa3_wq_repl_rx(&ep->sys->repl_work); + } + } + + if (IPA_CLIENT_IS_CONS(sys_in->client)) + ipa3_replenish_rx_cache(ep->sys); + + if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) { + ipa3_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW); + atomic_inc(&ipa3_ctx->wc_memb.active_clnt_cnt); + } + + ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) { + if (ipa3_ctx->modem_cfg_emb_pipe_flt && + sys_in->client == IPA_CLIENT_APPS_WAN_PROD) + IPADBG("modem cfg emb pipe flt\n"); + else + ipa3_install_dflt_flt_rules(ipa_ep_idx); + } + + result = ipa3_enable_data_path(ipa_ep_idx); + if (result) { + IPAERR("enable data path failed res=%d ep=%d.\n", result, + ipa_ep_idx); + goto fail_gen2; + } + + result = gsi_start_channel(ep->gsi_chan_hdl); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("gsi_start_channel failed res=%d ep=%d.\n", result, + ipa_ep_idx); + goto fail_gen3; + } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + + IPADBG("client %d (ep: %d) connected sys=%pK\n", sys_in->client, + ipa_ep_idx, ep->sys); + + return 0; + +fail_gen3: + ipa3_disable_data_path(ipa_ep_idx); +fail_gen2: + if (ipa3_ctx->use_ipa_pm) + ipa_pm_deregister(ep->sys->pm_hdl); +fail_pm: + destroy_workqueue(ep->sys->repl_wq); +fail_wq2: + destroy_workqueue(ep->sys->wq); +fail_wq: + kfree(ep->sys); + memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); +fail_and_disable_clocks: + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); +fail_gen: + return result; +} + +/** + * ipa3_teardown_sys_pipe() - Teardown the GPI pipe and cleanup IPA EP + * @clnt_hdl: [in] the handle obtained from ipa3_setup_sys_pipe + * + * Returns: 0 on success, negative on failure + */ +int ipa3_teardown_sys_pipe(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + int empty; + int result; + int i; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipa3_disable_data_path(clnt_hdl); + + if (IPA_CLIENT_IS_PROD(ep->client)) { + do { + spin_lock_bh(&ep->sys->spinlock); + empty = list_empty(&ep->sys->head_desc_list); + spin_unlock_bh(&ep->sys->spinlock); + if (!empty) + usleep_range(95, 105); + else + break; + } while (1); + } + + /* channel stop might fail on timeout if IPA is busy */ + for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) { + result = ipa3_stop_gsi_channel(clnt_hdl); + if (result == GSI_STATUS_SUCCESS) + break; + + if (result != -GSI_STATUS_AGAIN && + result != -GSI_STATUS_TIMED_OUT) + break; + } + + if (result != GSI_STATUS_SUCCESS) { + IPAERR("GSI stop chan err: %d.\n", result); + ipa_assert(); + return result; + } + + if (ep->sys->napi_obj) { + do { + usleep_range(95, 105); + } while (atomic_read(&ep->sys->curr_polling_state)); + } + + if (IPA_CLIENT_IS_CONS(ep->client)) + cancel_delayed_work_sync(&ep->sys->replenish_rx_work); + flush_workqueue(ep->sys->wq); + + result = ipa3_reset_gsi_channel(clnt_hdl); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("Failed to reset chan: %d.\n", result); + ipa_assert(); + return result; + } + dma_free_coherent(ipa3_ctx->pdev, + ep->gsi_mem_info.chan_ring_len, + ep->gsi_mem_info.chan_ring_base_vaddr, + ep->gsi_mem_info.chan_ring_base_addr); + result = gsi_dealloc_channel(ep->gsi_chan_hdl); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("Failed to dealloc chan: %d.\n", result); + ipa_assert(); + return result; + } + + /* free event ring only when it is present */ + if (ep->sys->use_comm_evt_ring) { + ipa3_ctx->gsi_evt_comm_ring_rem += + ep->gsi_mem_info.chan_ring_len; + } else if (ep->gsi_evt_ring_hdl != ~0) { + result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl); + if (WARN(result != GSI_STATUS_SUCCESS, "reset evt %d", result)) + return result; + + dma_free_coherent(ipa3_ctx->pdev, + ep->gsi_mem_info.evt_ring_len, + ep->gsi_mem_info.evt_ring_base_vaddr, + ep->gsi_mem_info.evt_ring_base_addr); + result = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); + if (WARN(result != GSI_STATUS_SUCCESS, "deall evt %d", result)) + return result; + } + if (ep->sys->repl_wq) + flush_workqueue(ep->sys->repl_wq); + if (IPA_CLIENT_IS_CONS(ep->client)) + ipa3_cleanup_rx(ep->sys); + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) { + if (ipa3_ctx->modem_cfg_emb_pipe_flt && + ep->client == IPA_CLIENT_APPS_WAN_PROD) + IPADBG("modem cfg emb pipe flt\n"); + else + ipa3_delete_dflt_flt_rules(clnt_hdl); + } + + if (IPA_CLIENT_IS_WLAN_CONS(ep->client)) + atomic_dec(&ipa3_ctx->wc_memb.active_clnt_cnt); + + memset(&ep->wstats, 0, sizeof(struct ipa3_wlan_stats)); + + if (!atomic_read(&ipa3_ctx->wc_memb.active_clnt_cnt)) + ipa3_cleanup_wlan_rx_common_cache(); + + ep->valid = 0; + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + + return 0; +} + +/** + * ipa3_tx_comp_usr_notify_release() - Callback function which will call the + * user supplied callback function to release the skb, or release it on + * its own if no callback function was supplied. + * @user1 + * @user2 + * + * This notified callback is for the destination client. + */ +static void ipa3_tx_comp_usr_notify_release(void *user1, int user2) +{ + struct sk_buff *skb = (struct sk_buff *)user1; + int ep_idx = user2; + + IPADBG_LOW("skb=%pK ep=%d\n", skb, ep_idx); + + IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_pkts_compl); + + if (ipa3_ctx->ep[ep_idx].client_notify) + ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv, + IPA_WRITE_DONE, (unsigned long)skb); + else + dev_kfree_skb_any(skb); +} + +void ipa3_tx_cmd_comp(void *user1, int user2) +{ + ipahal_destroy_imm_cmd(user1); +} + +/** + * ipa3_tx_dp() - Data-path tx handler + * @dst: [in] which IPA destination to route tx packets to + * @skb: [in] the packet to send + * @metadata: [in] TX packet meta-data + * + * Data-path tx handler, this is used for both SW data-path which by-passes most + * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If + * dst is a "valid" CONS type, then SW data-path is used. If dst is the + * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else + * is an error. For errors, client needs to free the skb as needed. For success, + * IPA driver will later invoke client callback if one was supplied. That + * callback should free the skb. If no callback supplied, IPA driver will free + * the skb internally + * + * The function will use two descriptors for this send command + * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent), + * the first descriptor will be used to inform the IPA hardware that + * apps need to push data into the IPA (IP_PACKET_INIT immediate command). + * Once this send was done from transport point-of-view the IPA driver will + * get notified by the supplied callback. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *meta) +{ + struct ipa3_desc *desc; + struct ipa3_desc _desc[3]; + int dst_ep_idx; + struct ipahal_imm_cmd_pyld *cmd_pyld = NULL; + struct ipa3_sys_context *sys; + int src_ep_idx; + int num_frags, f; + const struct ipa_gsi_ep_config *gsi_ep; + int data_idx; + unsigned int max_desc; + + if (unlikely(!ipa3_ctx)) { + IPAERR("IPA3 driver was not initialized\n"); + return -EINVAL; + } + + if (skb->len == 0) { + IPAERR("packet size is 0\n"); + return -EINVAL; + } + + /* + * USB_CONS: PKT_INIT ep_idx = dst pipe + * Q6_CONS: PKT_INIT ep_idx = sender pipe + * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe + * + * LAN TX: all PKT_INIT + * WAN TX: PKT_INIT (cmd) + HW (data) + * + */ + if (IPA_CLIENT_IS_CONS(dst)) { + src_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_PROD); + if (-1 == src_ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_LAN_PROD); + goto fail_gen; + } + dst_ep_idx = ipa3_get_ep_mapping(dst); + } else { + src_ep_idx = ipa3_get_ep_mapping(dst); + if (-1 == src_ep_idx) { + IPAERR("Client %u is not mapped\n", dst); + goto fail_gen; + } + if (meta && meta->pkt_init_dst_ep_valid) + dst_ep_idx = meta->pkt_init_dst_ep; + else + dst_ep_idx = -1; + } + + sys = ipa3_ctx->ep[src_ep_idx].sys; + + if (!sys || !sys->ep->valid) { + IPAERR("pipe not valid\n"); + goto fail_gen; + } + + num_frags = skb_shinfo(skb)->nr_frags; + /* + * make sure TLV FIFO supports the needed frags. + * 2 descriptors are needed for IP_PACKET_INIT and TAG_STATUS. + * 1 descriptor needed for the linear portion of skb. + */ + gsi_ep = ipa3_get_gsi_ep_info(ipa3_ctx->ep[src_ep_idx].client); + if (unlikely(gsi_ep == NULL)) { + IPAERR("failed to get EP %d GSI info\n", src_ep_idx); + goto fail_gen; + } + max_desc = gsi_ep->ipa_if_tlv; + if (gsi_ep->prefetch_mode == GSI_SMART_PRE_FETCH || + gsi_ep->prefetch_mode == GSI_FREE_PRE_FETCH) + max_desc -= gsi_ep->prefetch_threshold; + if (num_frags + 3 > max_desc) { + if (skb_linearize(skb)) { + IPAERR("Failed to linear skb with %d frags\n", + num_frags); + goto fail_gen; + } + num_frags = 0; + } + if (num_frags) { + /* 1 desc for tag to resolve status out-of-order issue; + * 1 desc is needed for the linear portion of skb; + * 1 desc may be needed for the PACKET_INIT; + * 1 desc for each frag + */ + desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC); + if (!desc) { + IPAERR("failed to alloc desc array\n"); + goto fail_gen; + } + } else { + memset(_desc, 0, 3 * sizeof(struct ipa3_desc)); + desc = &_desc[0]; + } + + if (dst_ep_idx != -1) { + /* SW data path */ + data_idx = 0; + if (sys->policy == IPA_POLICY_NOINTR_MODE) { + /* + * For non-interrupt mode channel (where there is no + * event ring) TAG STATUS are used for completion + * notification. IPA will generate a status packet with + * tag info as a result of the TAG STATUS command. + */ + desc[data_idx].is_tag_status = true; + data_idx++; + } + desc[data_idx].opcode = ipa3_ctx->pkt_init_imm_opcode; + desc[data_idx].dma_address_valid = true; + desc[data_idx].dma_address = ipa3_ctx->pkt_init_imm[dst_ep_idx]; + desc[data_idx].type = IPA_IMM_CMD_DESC; + desc[data_idx].callback = NULL; + data_idx++; + desc[data_idx].pyld = skb->data; + desc[data_idx].len = skb_headlen(skb); + desc[data_idx].type = IPA_DATA_DESC_SKB; + desc[data_idx].callback = ipa3_tx_comp_usr_notify_release; + desc[data_idx].user1 = skb; + desc[data_idx].user2 = (meta && meta->pkt_init_dst_ep_valid && + meta->pkt_init_dst_ep_remote) ? + src_ep_idx : + dst_ep_idx; + if (meta && meta->dma_address_valid) { + desc[data_idx].dma_address_valid = true; + desc[data_idx].dma_address = meta->dma_address; + } + data_idx++; + + for (f = 0; f < num_frags; f++) { + desc[data_idx + f].frag = &skb_shinfo(skb)->frags[f]; + desc[data_idx + f].type = IPA_DATA_DESC_SKB_PAGED; + desc[data_idx + f].len = + skb_frag_size(desc[data_idx + f].frag); + } + /* don't free skb till frag mappings are released */ + if (num_frags) { + desc[data_idx + f - 1].callback = desc[2].callback; + desc[data_idx + f - 1].user1 = desc[2].user1; + desc[data_idx + f - 1].user2 = desc[2].user2; + desc[data_idx - 1].callback = NULL; + } + + if (ipa3_send(sys, num_frags + data_idx, desc, true)) { + IPAERR("fail to send skb %pK num_frags %u SWP\n", + skb, num_frags); + goto fail_send; + } + IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_sw_pkts); + } else { + /* HW data path */ + data_idx = 0; + if (sys->policy == IPA_POLICY_NOINTR_MODE) { + /* + * For non-interrupt mode channel (where there is no + * event ring) TAG STATUS are used for completion + * notification. IPA will generate a status packet with + * tag info as a result of the TAG STATUS command. + */ + desc[data_idx].is_tag_status = true; + data_idx++; + } + desc[data_idx].pyld = skb->data; + desc[data_idx].len = skb_headlen(skb); + desc[data_idx].type = IPA_DATA_DESC_SKB; + desc[data_idx].callback = ipa3_tx_comp_usr_notify_release; + desc[data_idx].user1 = skb; + desc[data_idx].user2 = src_ep_idx; + + if (meta && meta->dma_address_valid) { + desc[data_idx].dma_address_valid = true; + desc[data_idx].dma_address = meta->dma_address; + } + if (num_frags == 0) { + if (ipa3_send(sys, data_idx + 1, desc, true)) { + IPAERR("fail to send skb %pK HWP\n", skb); + goto fail_mem; + } + } else { + for (f = 0; f < num_frags; f++) { + desc[data_idx+f+1].frag = + &skb_shinfo(skb)->frags[f]; + desc[data_idx+f+1].type = + IPA_DATA_DESC_SKB_PAGED; + desc[data_idx+f+1].len = + skb_frag_size(desc[data_idx+f+1].frag); + } + /* don't free skb till frag mappings are released */ + desc[data_idx+f].callback = desc[data_idx].callback; + desc[data_idx+f].user1 = desc[data_idx].user1; + desc[data_idx+f].user2 = desc[data_idx].user2; + desc[data_idx].callback = NULL; + + if (ipa3_send(sys, num_frags + data_idx + 1, + desc, true)) { + IPAERR("fail to send skb %pK num_frags %u\n", + skb, num_frags); + goto fail_mem; + } + } + IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_hw_pkts); + } + + if (num_frags) { + kfree(desc); + IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_non_linear); + } + return 0; + +fail_send: + ipahal_destroy_imm_cmd(cmd_pyld); +fail_mem: + if (num_frags) + kfree(desc); +fail_gen: + return -EFAULT; +} + +static void ipa3_wq_handle_rx(struct work_struct *work) +{ + struct ipa3_sys_context *sys; + + sys = container_of(work, struct ipa3_sys_context, work); + + if (sys->napi_obj) { + if (!ipa3_ctx->use_ipa_pm) + IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI"); + else + ipa_pm_activate_sync(sys->pm_hdl); + napi_schedule(sys->napi_obj); + } else + ipa3_handle_rx(sys); +} + +static void ipa3_wq_repl_rx(struct work_struct *work) +{ + struct ipa3_sys_context *sys; + void *ptr; + struct ipa3_rx_pkt_wrapper *rx_pkt; + gfp_t flag = GFP_KERNEL; + u32 next; + u32 curr; + + sys = container_of(work, struct ipa3_sys_context, repl_work); + atomic_set(&sys->repl.pending, 0); + curr = atomic_read(&sys->repl.tail_idx); + +begin: + while (1) { + next = (curr + 1) % sys->repl.capacity; + if (next == atomic_read(&sys->repl.head_idx)) + goto fail_kmem_cache_alloc; + + rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache, + flag); + if (!rx_pkt) + goto fail_kmem_cache_alloc; + + INIT_LIST_HEAD(&rx_pkt->link); + INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail); + rx_pkt->sys = sys; + + rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag); + if (rx_pkt->data.skb == NULL) + goto fail_skb_alloc; + + ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); + rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr, + sys->rx_buff_sz, + DMA_FROM_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) { + pr_err_ratelimited("%s dma map fail %pK for %pK sys=%pK\n", + __func__, (void *)rx_pkt->data.dma_addr, + ptr, sys); + goto fail_dma_mapping; + } + + sys->repl.cache[curr] = rx_pkt; + curr = next; + /* ensure write is done before setting tail index */ + mb(); + atomic_set(&sys->repl.tail_idx, next); + } + + return; + +fail_dma_mapping: + sys->free_skb(rx_pkt->data.skb); +fail_skb_alloc: + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); +fail_kmem_cache_alloc: + if (atomic_read(&sys->repl.tail_idx) == + atomic_read(&sys->repl.head_idx)) { + if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) + IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty); + else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) + IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_repl_rx_empty); + pr_err_ratelimited("%s sys=%pK repl ring empty\n", + __func__, sys); + goto begin; + } +} + +static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt = NULL; + struct ipa3_rx_pkt_wrapper *tmp; + int ret; + struct gsi_xfer_elem gsi_xfer_elem_one; + u32 rx_len_cached = 0; + + IPADBG_LOW("\n"); + + spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); + rx_len_cached = sys->len; + + if (rx_len_cached < sys->rx_pool_sz) { + list_for_each_entry_safe(rx_pkt, tmp, + &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) { + list_del(&rx_pkt->link); + + if (ipa3_ctx->wc_memb.wlan_comm_free_cnt > 0) + ipa3_ctx->wc_memb.wlan_comm_free_cnt--; + + INIT_LIST_HEAD(&rx_pkt->link); + rx_pkt->len = 0; + rx_pkt->sys = sys; + + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + memset(&gsi_xfer_elem_one, 0, + sizeof(gsi_xfer_elem_one)); + gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr; + gsi_xfer_elem_one.len = IPA_WLAN_RX_BUFF_SZ; + gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT; + gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB; + gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA; + gsi_xfer_elem_one.xfer_user_data = rx_pkt; + + ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, + &gsi_xfer_elem_one, true); + + if (ret) { + IPAERR("failed to provide buffer: %d\n", ret); + goto fail_provide_rx_buffer; + } + + rx_len_cached = ++sys->len; + + if (rx_len_cached >= sys->rx_pool_sz) { + spin_unlock_bh( + &ipa3_ctx->wc_memb.wlan_spinlock); + return; + } + } + } + spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); + + if (rx_len_cached < sys->rx_pool_sz && + ipa3_ctx->wc_memb.wlan_comm_total_cnt < + IPA_WLAN_COMM_RX_POOL_HIGH) { + ipa3_replenish_rx_cache(sys); + ipa3_ctx->wc_memb.wlan_comm_total_cnt += + (sys->rx_pool_sz - rx_len_cached); + } + + return; + +fail_provide_rx_buffer: + list_del(&rx_pkt->link); + spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); +} + +static void ipa3_cleanup_wlan_rx_common_cache(void) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt; + struct ipa3_rx_pkt_wrapper *tmp; + + spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); + + list_for_each_entry_safe(rx_pkt, tmp, + &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) { + list_del(&rx_pkt->link); + dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr, + IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_pkt->data.skb); + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); + ipa3_ctx->wc_memb.wlan_comm_free_cnt--; + ipa3_ctx->wc_memb.wlan_comm_total_cnt--; + } + ipa3_ctx->wc_memb.total_tx_pkts_freed = 0; + + if (ipa3_ctx->wc_memb.wlan_comm_free_cnt != 0) + IPAERR("wlan comm buff free cnt: %d\n", + ipa3_ctx->wc_memb.wlan_comm_free_cnt); + + if (ipa3_ctx->wc_memb.wlan_comm_total_cnt != 0) + IPAERR("wlan comm buff total cnt: %d\n", + ipa3_ctx->wc_memb.wlan_comm_total_cnt); + + spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); + +} + +static void ipa3_alloc_wlan_rx_common_cache(u32 size) +{ + void *ptr; + struct ipa3_rx_pkt_wrapper *rx_pkt; + int rx_len_cached = 0; + gfp_t flag = GFP_NOWAIT | __GFP_NOWARN; + + rx_len_cached = ipa3_ctx->wc_memb.wlan_comm_total_cnt; + while (rx_len_cached < size) { + rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache, + flag); + if (!rx_pkt) + goto fail_kmem_cache_alloc; + + INIT_LIST_HEAD(&rx_pkt->link); + INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail); + + rx_pkt->data.skb = + ipa3_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ, + flag); + if (rx_pkt->data.skb == NULL) { + IPAERR("failed to alloc skb\n"); + goto fail_skb_alloc; + } + ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ); + rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr, + IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure %pK for %pK\n", + (void *)rx_pkt->data.dma_addr, ptr); + goto fail_dma_mapping; + } + + spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); + list_add_tail(&rx_pkt->link, + &ipa3_ctx->wc_memb.wlan_comm_desc_list); + rx_len_cached = ++ipa3_ctx->wc_memb.wlan_comm_total_cnt; + + ipa3_ctx->wc_memb.wlan_comm_free_cnt++; + spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); + + } + + return; + +fail_dma_mapping: + dev_kfree_skb_any(rx_pkt->data.skb); +fail_skb_alloc: + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); +fail_kmem_cache_alloc: + return; +} + + +/** + * ipa3_replenish_rx_cache() - Replenish the Rx packets cache. + * + * The function allocates buffers in the rx_pkt_wrapper_cache cache until there + * are IPA_RX_POOL_CEIL buffers in the cache. + * - Allocate a buffer in the cache + * - Initialized the packets link + * - Initialize the packets work struct + * - Allocate the packets socket buffer (skb) + * - Fill the packets skb with data + * - Make the packet DMAable + * - Add the packet to the system pipe linked list + */ +static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys) +{ + void *ptr; + struct ipa3_rx_pkt_wrapper *rx_pkt; + int ret; + int idx = 0; + int rx_len_cached = 0; + struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_THRESH]; + gfp_t flag = GFP_NOWAIT | __GFP_NOWARN; + + rx_len_cached = sys->len; + + /* start replenish only when buffers go lower than the threshold */ + if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH) + return; + + + while (rx_len_cached < sys->rx_pool_sz) { + rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache, + flag); + if (!rx_pkt) + goto fail_kmem_cache_alloc; + + INIT_LIST_HEAD(&rx_pkt->link); + INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail); + rx_pkt->sys = sys; + + rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag); + if (rx_pkt->data.skb == NULL) { + IPAERR("failed to alloc skb\n"); + goto fail_skb_alloc; + } + ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); + rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr, + sys->rx_buff_sz, + DMA_FROM_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure %pK for %pK\n", + (void *)rx_pkt->data.dma_addr, ptr); + goto fail_dma_mapping; + } + + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr; + gsi_xfer_elem_array[idx].len = sys->rx_buff_sz; + gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT; + gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB; + gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI; + gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA; + gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt; + idx++; + rx_len_cached++; + /* + * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH. + * If this size is reached we need to queue the xfers. + */ + if (idx == IPA_REPL_XFER_THRESH) { + ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, + gsi_xfer_elem_array, true); + if (ret == GSI_STATUS_SUCCESS) { + sys->len = rx_len_cached; + } else { + /* we don't expect this will happen */ + IPAERR("failed to provide buffer: %d\n", ret); + WARN_ON(1); + break; + } + idx = 0; + } + } + goto done; + +fail_dma_mapping: + sys->free_skb(rx_pkt->data.skb); +fail_skb_alloc: + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); +fail_kmem_cache_alloc: + if (rx_len_cached == 0) + queue_delayed_work(sys->wq, &sys->replenish_rx_work, + msecs_to_jiffies(1)); +done: + if (idx) { + ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, + gsi_xfer_elem_array, true); + if (ret == GSI_STATUS_SUCCESS) { + sys->len = rx_len_cached; + } else { + /* we don't expect this will happen */ + IPAERR("failed to provide buffer: %d\n", ret); + WARN_ON(1); + } + } +} + +static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) +{ + void *ptr; + struct ipa3_rx_pkt_wrapper *rx_pkt; + int ret; + int idx = 0; + int rx_len_cached = 0; + struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_THRESH]; + gfp_t flag = GFP_NOWAIT | __GFP_NOWARN; + + /* start replenish only when buffers go lower than the threshold */ + if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH) + return; + + rx_len_cached = sys->len; + + while (rx_len_cached < sys->rx_pool_sz) { + if (list_empty(&sys->rcycl_list)) { + rx_pkt = kmem_cache_zalloc( + ipa3_ctx->rx_pkt_wrapper_cache, flag); + if (!rx_pkt) + goto fail_kmem_cache_alloc; + + INIT_LIST_HEAD(&rx_pkt->link); + INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail); + rx_pkt->sys = sys; + + rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag); + if (rx_pkt->data.skb == NULL) { + IPAERR("failed to alloc skb\n"); + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, + rx_pkt); + goto fail_kmem_cache_alloc; + } + ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); + rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, + ptr, sys->rx_buff_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, + rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure %pK for %pK\n", + (void *)rx_pkt->data.dma_addr, ptr); + goto fail_dma_mapping; + } + } else { + spin_lock_bh(&sys->spinlock); + rx_pkt = list_first_entry(&sys->rcycl_list, + struct ipa3_rx_pkt_wrapper, link); + list_del(&rx_pkt->link); + spin_unlock_bh(&sys->spinlock); + INIT_LIST_HEAD(&rx_pkt->link); + ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); + rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, + ptr, sys->rx_buff_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, + rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure %pK for %pK\n", + (void *)rx_pkt->data.dma_addr, ptr); + goto fail_dma_mapping; + } + } + + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr; + gsi_xfer_elem_array[idx].len = sys->rx_buff_sz; + gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT; + gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB; + gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI; + gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA; + gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt; + idx++; + rx_len_cached++; + /* + * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH. + * If this size is reached we need to queue the xfers. + */ + if (idx == IPA_REPL_XFER_THRESH) { + ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, + gsi_xfer_elem_array, true); + if (ret == GSI_STATUS_SUCCESS) { + sys->len = rx_len_cached; + } else { + /* we don't expect this will happen */ + IPAERR("failed to provide buffer: %d\n", ret); + WARN_ON(1); + break; + } + idx = 0; + } + } + goto done; +fail_dma_mapping: + spin_lock_bh(&sys->spinlock); + list_add_tail(&rx_pkt->link, &sys->rcycl_list); + INIT_LIST_HEAD(&rx_pkt->link); + spin_unlock_bh(&sys->spinlock); +fail_kmem_cache_alloc: + if (rx_len_cached == 0) + queue_delayed_work(sys->wq, &sys->replenish_rx_work, + msecs_to_jiffies(1)); +done: + if (idx) { + ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, + gsi_xfer_elem_array, true); + if (ret == GSI_STATUS_SUCCESS) { + sys->len = rx_len_cached; + } else { + /* we don't expect this will happen */ + IPAERR("failed to provide buffer: %d\n", ret); + WARN_ON(1); + } + } +} + +static inline void __trigger_repl_work(struct ipa3_sys_context *sys) +{ + int tail, head, avail; + + if (atomic_read(&sys->repl.pending)) + return; + + tail = atomic_read(&sys->repl.tail_idx); + head = atomic_read(&sys->repl.head_idx); + avail = (tail - head) % sys->repl.capacity; + + if (avail < sys->repl.capacity / 4) { + atomic_set(&sys->repl.pending, 1); + queue_work(sys->repl_wq, &sys->repl_work); + } +} + +static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt; + int ret; + int rx_len_cached = 0; + struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_THRESH]; + u32 curr; + int idx = 0; + + /* start replenish only when buffers go lower than the threshold */ + if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH) + return; + + spin_lock_bh(&sys->spinlock); + rx_len_cached = sys->len; + curr = atomic_read(&sys->repl.head_idx); + + while (rx_len_cached < sys->rx_pool_sz) { + if (curr == atomic_read(&sys->repl.tail_idx)) + break; + rx_pkt = sys->repl.cache[curr]; + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr; + gsi_xfer_elem_array[idx].len = sys->rx_buff_sz; + gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT; + gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB; + gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI; + gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA; + gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt; + rx_len_cached++; + curr = (++curr == sys->repl.capacity) ? 0 : curr; + idx++; + /* + * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH. + * If this size is reached we need to queue the xfers. + */ + if (idx == IPA_REPL_XFER_THRESH) { + ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, + gsi_xfer_elem_array, true); + if (ret == GSI_STATUS_SUCCESS) { + /* ensure write is done before setting head */ + mb(); + atomic_set(&sys->repl.head_idx, curr); + sys->len = rx_len_cached; + } else { + /* we don't expect this will happen */ + IPAERR("failed to provide buffer: %d\n", ret); + WARN_ON(1); + break; + } + idx = 0; + } + } + /* There can still be something left which has not been xfer yet */ + if (idx) { + ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, + gsi_xfer_elem_array, true); + if (ret == GSI_STATUS_SUCCESS) { + /* ensure write is done before setting head index */ + mb(); + atomic_set(&sys->repl.head_idx, curr); + sys->len = rx_len_cached; + } else { + /* we don't expect this will happen */ + IPAERR("failed to provide buffer: %d\n", ret); + WARN_ON(1); + } + } + spin_unlock_bh(&sys->spinlock); + + __trigger_repl_work(sys); + + if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) { + if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) + IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty); + else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) + IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty); + else + WARN_ON(1); + queue_delayed_work(sys->wq, &sys->replenish_rx_work, + msecs_to_jiffies(1)); + } +} + +static void ipa3_replenish_rx_work_func(struct work_struct *work) +{ + struct delayed_work *dwork; + struct ipa3_sys_context *sys; + + dwork = container_of(work, struct delayed_work, work); + sys = container_of(dwork, struct ipa3_sys_context, replenish_rx_work); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + sys->repl_hdlr(sys); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + +/** + * ipa3_cleanup_rx() - release RX queue resources + * + */ +static void ipa3_cleanup_rx(struct ipa3_sys_context *sys) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt; + struct ipa3_rx_pkt_wrapper *r; + u32 head; + u32 tail; + + list_for_each_entry_safe(rx_pkt, r, + &sys->head_desc_list, link) { + list_del(&rx_pkt->link); + dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + sys->free_skb(rx_pkt->data.skb); + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); + } + + list_for_each_entry_safe(rx_pkt, r, + &sys->rcycl_list, link) { + list_del(&rx_pkt->link); + dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + sys->free_skb(rx_pkt->data.skb); + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); + } + + if (sys->repl.cache) { + head = atomic_read(&sys->repl.head_idx); + tail = atomic_read(&sys->repl.tail_idx); + while (head != tail) { + rx_pkt = sys->repl.cache[head]; + dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + sys->free_skb(rx_pkt->data.skb); + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); + head = (head + 1) % sys->repl.capacity; + } + kfree(sys->repl.cache); + } +} + +static struct sk_buff *ipa3_skb_copy_for_client(struct sk_buff *skb, int len) +{ + struct sk_buff *skb2 = NULL; + + skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL); + if (likely(skb2)) { + /* Set the data pointer */ + skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM); + memcpy(skb2->data, skb->data, len); + skb2->len = len; + skb_set_tail_pointer(skb2, len); + } + + return skb2; +} + +static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb, + struct ipa3_sys_context *sys) +{ + struct ipahal_pkt_status status; + u32 pkt_status_sz; + struct sk_buff *skb2; + int pad_len_byte; + int len; + unsigned char *buf; + int src_pipe; + unsigned int used = *(unsigned int *)skb->cb; + unsigned int used_align = ALIGN(used, 32); + unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used; + struct ipa3_tx_pkt_wrapper *tx_pkt = NULL; + unsigned long ptr; + + IPA_DUMP_BUFF(skb->data, 0, skb->len); + + if (skb->len == 0) { + IPAERR("ZLT packet arrived to AP\n"); + return 0; + } + + if (sys->len_partial) { + IPADBG_LOW("len_partial %d\n", sys->len_partial); + buf = skb_push(skb, sys->len_partial); + memcpy(buf, sys->prev_skb->data, sys->len_partial); + sys->len_partial = 0; + sys->free_skb(sys->prev_skb); + sys->prev_skb = NULL; + goto begin; + } + + /* this pipe has TX comp (status only) + mux-ed LAN RX data + * (status+data) + */ + if (sys->len_rem) { + IPADBG_LOW("rem %d skb %d pad %d\n", sys->len_rem, skb->len, + sys->len_pad); + if (sys->len_rem <= skb->len) { + if (sys->prev_skb) { + skb2 = skb_copy_expand(sys->prev_skb, 0, + sys->len_rem, GFP_KERNEL); + if (likely(skb2)) { + memcpy(skb_put(skb2, sys->len_rem), + skb->data, sys->len_rem); + skb_trim(skb2, + skb2->len - sys->len_pad); + skb2->truesize = skb2->len + + sizeof(struct sk_buff); + if (sys->drop_packet) + dev_kfree_skb_any(skb2); + else + sys->ep->client_notify( + sys->ep->priv, + IPA_RECEIVE, + (unsigned long)(skb2)); + } else { + IPAERR("copy expand failed\n"); + } + dev_kfree_skb_any(sys->prev_skb); + } + skb_pull(skb, sys->len_rem); + sys->prev_skb = NULL; + sys->len_rem = 0; + sys->len_pad = 0; + } else { + if (sys->prev_skb) { + skb2 = skb_copy_expand(sys->prev_skb, 0, + skb->len, GFP_KERNEL); + if (likely(skb2)) { + memcpy(skb_put(skb2, skb->len), + skb->data, skb->len); + } else { + IPAERR("copy expand failed\n"); + } + dev_kfree_skb_any(sys->prev_skb); + sys->prev_skb = skb2; + } + sys->len_rem -= skb->len; + return 0; + } + } + +begin: + pkt_status_sz = ipahal_pkt_status_get_size(); + while (skb->len) { + sys->drop_packet = false; + IPADBG_LOW("LEN_REM %d\n", skb->len); + + if (skb->len < pkt_status_sz) { + WARN_ON(sys->prev_skb != NULL); + IPADBG_LOW("status straddles buffer\n"); + sys->prev_skb = skb_copy(skb, GFP_KERNEL); + sys->len_partial = skb->len; + return 0; + } + + ipahal_pkt_status_parse(skb->data, &status); + IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n", + status.status_opcode, status.endp_src_idx, + status.endp_dest_idx, status.pkt_len); + if (sys->status_stat) { + sys->status_stat->status[sys->status_stat->curr] = + status; + sys->status_stat->curr++; + if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM) + sys->status_stat->curr = 0; + } + + if ((status.status_opcode != + IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) && + (status.status_opcode != + IPAHAL_PKT_STATUS_OPCODE_PACKET) && + (status.status_opcode != + IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET) && + (status.status_opcode != + IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) { + IPAERR("unsupported opcode(%d)\n", + status.status_opcode); + skb_pull(skb, pkt_status_sz); + continue; + } + IPA_STATS_EXCP_CNT(status.exception, + ipa3_ctx->stats.rx_excp_pkts); + if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes || + status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) { + IPAERR("status fields invalid\n"); + IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n", + status.status_opcode, status.endp_src_idx, + status.endp_dest_idx, status.pkt_len); + WARN_ON(1); + /* HW gave an unexpected status */ + ipa_assert(); + } + if (IPAHAL_PKT_STATUS_MASK_FLAG_VAL( + IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT, &status)) { + struct ipa3_tag_completion *comp; + + IPADBG_LOW("TAG packet arrived\n"); + if (status.tag_info == IPA_COOKIE) { + skb_pull(skb, pkt_status_sz); + if (skb->len < sizeof(comp)) { + IPAERR("TAG arrived without packet\n"); + return 0; + } + memcpy(&comp, skb->data, sizeof(comp)); + skb_pull(skb, sizeof(comp)); + complete(&comp->comp); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + continue; + } else { + ptr = tag_to_pointer_wa(status.tag_info); + tx_pkt = (struct ipa3_tx_pkt_wrapper *)ptr; + IPADBG_LOW("tx_pkt recv = %pK\n", tx_pkt); + } + } + if (status.pkt_len == 0) { + IPADBG_LOW("Skip aggr close status\n"); + skb_pull(skb, pkt_status_sz); + IPA_STATS_INC_CNT(ipa3_ctx->stats.aggr_close); + IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts + [IPAHAL_PKT_STATUS_EXCEPTION_NONE]); + continue; + } + + if (status.endp_dest_idx == (sys->ep - ipa3_ctx->ep)) { + /* RX data */ + src_pipe = status.endp_src_idx; + + /* + * A packet which is received back to the AP after + * there was no route match. + */ + if (status.exception == + IPAHAL_PKT_STATUS_EXCEPTION_NONE && + ipahal_is_rule_miss_id(status.rt_rule_id)) + sys->drop_packet = true; + + if (skb->len == pkt_status_sz && + status.exception == + IPAHAL_PKT_STATUS_EXCEPTION_NONE) { + WARN_ON(sys->prev_skb != NULL); + IPADBG_LOW("Ins header in next buffer\n"); + sys->prev_skb = skb_copy(skb, GFP_KERNEL); + sys->len_partial = skb->len; + return 0; + } + + pad_len_byte = ((status.pkt_len + 3) & ~3) - + status.pkt_len; + + len = status.pkt_len + pad_len_byte; + IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte, + status.pkt_len, len); + + if (status.exception == + IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR) { + IPADBG_LOW( + "Dropping packet on DeAggr Exception\n"); + sys->drop_packet = true; + } + + skb2 = ipa3_skb_copy_for_client(skb, + min(status.pkt_len + pkt_status_sz, skb->len)); + if (likely(skb2)) { + if (skb->len < len + pkt_status_sz) { + IPADBG_LOW("SPL skb len %d len %d\n", + skb->len, len); + sys->prev_skb = skb2; + sys->len_rem = len - skb->len + + pkt_status_sz; + sys->len_pad = pad_len_byte; + skb_pull(skb, skb->len); + } else { + skb_trim(skb2, status.pkt_len + + pkt_status_sz); + IPADBG_LOW("rx avail for %d\n", + status.endp_dest_idx); + if (sys->drop_packet) { + dev_kfree_skb_any(skb2); + } else if (status.pkt_len > + IPA_GENERIC_AGGR_BYTE_LIMIT * + 1024) { + IPAERR("packet size invalid\n"); + IPAERR("STATUS opcode=%d\n", + status.status_opcode); + IPAERR("src=%d dst=%d len=%d\n", + status.endp_src_idx, + status.endp_dest_idx, + status.pkt_len); + /* Unexpected HW status */ + ipa_assert(); + } else { + skb2->truesize = skb2->len + + sizeof(struct sk_buff) + + (ALIGN(len + + pkt_status_sz, 32) * + unused / used_align); + sys->ep->client_notify( + sys->ep->priv, + IPA_RECEIVE, + (unsigned long)(skb2)); + } + skb_pull(skb, len + pkt_status_sz); + } + } else { + IPAERR("fail to alloc skb\n"); + if (skb->len < len) { + sys->prev_skb = NULL; + sys->len_rem = len - skb->len + + pkt_status_sz; + sys->len_pad = pad_len_byte; + skb_pull(skb, skb->len); + } else { + skb_pull(skb, len + pkt_status_sz); + } + } + /* TX comp */ + ipa3_wq_write_done_status(src_pipe, tx_pkt); + IPADBG_LOW("tx comp imp for %d\n", src_pipe); + } else { + /* TX comp */ + ipa3_wq_write_done_status(status.endp_src_idx, tx_pkt); + IPADBG_LOW("tx comp exp for %d\n", + status.endp_src_idx); + skb_pull(skb, pkt_status_sz); + IPA_STATS_INC_CNT(ipa3_ctx->stats.stat_compl); + IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts + [IPAHAL_PKT_STATUS_EXCEPTION_NONE]); + } + tx_pkt = NULL; + } + + return 0; +} + +static struct sk_buff *ipa3_join_prev_skb(struct sk_buff *prev_skb, + struct sk_buff *skb, unsigned int len) +{ + struct sk_buff *skb2; + + skb2 = skb_copy_expand(prev_skb, 0, + len, GFP_KERNEL); + if (likely(skb2)) { + memcpy(skb_put(skb2, len), + skb->data, len); + } else { + IPAERR("copy expand failed\n"); + skb2 = NULL; + } + dev_kfree_skb_any(prev_skb); + + return skb2; +} + +static void ipa3_wan_rx_handle_splt_pyld(struct sk_buff *skb, + struct ipa3_sys_context *sys) +{ + struct sk_buff *skb2; + + IPADBG_LOW("rem %d skb %d\n", sys->len_rem, skb->len); + if (sys->len_rem <= skb->len) { + if (sys->prev_skb) { + skb2 = ipa3_join_prev_skb(sys->prev_skb, skb, + sys->len_rem); + if (likely(skb2)) { + IPADBG_LOW( + "removing Status element from skb and sending to WAN client"); + skb_pull(skb2, ipahal_pkt_status_get_size()); + skb2->truesize = skb2->len + + sizeof(struct sk_buff); + sys->ep->client_notify(sys->ep->priv, + IPA_RECEIVE, + (unsigned long)(skb2)); + } + } + skb_pull(skb, sys->len_rem); + sys->prev_skb = NULL; + sys->len_rem = 0; + } else { + if (sys->prev_skb) { + skb2 = ipa3_join_prev_skb(sys->prev_skb, skb, + skb->len); + sys->prev_skb = skb2; + } + sys->len_rem -= skb->len; + skb_pull(skb, skb->len); + } +} + +static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb, + struct ipa3_sys_context *sys) +{ + struct ipahal_pkt_status status; + unsigned char *skb_data; + u32 pkt_status_sz; + struct sk_buff *skb2; + u16 pkt_len_with_pad; + u32 qmap_hdr; + int checksum_trailer_exists; + int frame_len; + int ep_idx; + unsigned int used = *(unsigned int *)skb->cb; + unsigned int used_align = ALIGN(used, 32); + unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used; + + IPA_DUMP_BUFF(skb->data, 0, skb->len); + if (skb->len == 0) { + IPAERR("ZLT\n"); + goto bail; + } + + if (ipa3_ctx->ipa_client_apps_wan_cons_agg_gro) { + sys->ep->client_notify(sys->ep->priv, + IPA_RECEIVE, (unsigned long)(skb)); + return 0; + } + if (sys->repl_hdlr == ipa3_replenish_rx_cache_recycle) { + IPAERR("Recycle should enable only with GRO Aggr\n"); + ipa_assert(); + } + + /* + * payload splits across 2 buff or more, + * take the start of the payload from prev_skb + */ + if (sys->len_rem) + ipa3_wan_rx_handle_splt_pyld(skb, sys); + + pkt_status_sz = ipahal_pkt_status_get_size(); + while (skb->len) { + IPADBG_LOW("LEN_REM %d\n", skb->len); + if (skb->len < pkt_status_sz) { + IPAERR("status straddles buffer\n"); + WARN_ON(1); + goto bail; + } + ipahal_pkt_status_parse(skb->data, &status); + skb_data = skb->data; + IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n", + status.status_opcode, status.endp_src_idx, + status.endp_dest_idx, status.pkt_len); + + if (sys->status_stat) { + sys->status_stat->status[sys->status_stat->curr] = + status; + sys->status_stat->curr++; + if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM) + sys->status_stat->curr = 0; + } + + if ((status.status_opcode != + IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) && + (status.status_opcode != + IPAHAL_PKT_STATUS_OPCODE_PACKET) && + (status.status_opcode != + IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) { + IPAERR("unsupported opcode(%d)\n", + status.status_opcode); + skb_pull(skb, pkt_status_sz); + continue; + } + + IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_pkts); + if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes || + status.endp_src_idx >= ipa3_ctx->ipa_num_pipes || + status.pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) { + IPAERR("status fields invalid\n"); + WARN_ON(1); + goto bail; + } + if (status.pkt_len == 0) { + IPADBG_LOW("Skip aggr close status\n"); + skb_pull(skb, pkt_status_sz); + IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_pkts); + IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_aggr_close); + continue; + } + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + if (status.endp_dest_idx != ep_idx) { + IPAERR("expected endp_dest_idx %d received %d\n", + ep_idx, status.endp_dest_idx); + WARN_ON(1); + goto bail; + } + /* RX data */ + if (skb->len == pkt_status_sz) { + IPAERR("Ins header in next buffer\n"); + WARN_ON(1); + goto bail; + } + qmap_hdr = *(u32 *)(skb_data + pkt_status_sz); + /* + * Take the pkt_len_with_pad from the last 2 bytes of the QMAP + * header + */ + + /*QMAP is BE: convert the pkt_len field from BE to LE*/ + pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff); + IPADBG_LOW("pkt_len with pad %d\n", pkt_len_with_pad); + /*get the CHECKSUM_PROCESS bit*/ + checksum_trailer_exists = IPAHAL_PKT_STATUS_MASK_FLAG_VAL( + IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT, &status); + IPADBG_LOW("checksum_trailer_exists %d\n", + checksum_trailer_exists); + + frame_len = pkt_status_sz + IPA_QMAP_HEADER_LENGTH + + pkt_len_with_pad; + if (checksum_trailer_exists) + frame_len += IPA_DL_CHECKSUM_LENGTH; + IPADBG_LOW("frame_len %d\n", frame_len); + + skb2 = skb_clone(skb, GFP_KERNEL); + if (likely(skb2)) { + /* + * the len of actual data is smaller than expected + * payload split across 2 buff + */ + if (skb->len < frame_len) { + IPADBG_LOW("SPL skb len %d len %d\n", + skb->len, frame_len); + sys->prev_skb = skb2; + sys->len_rem = frame_len - skb->len; + skb_pull(skb, skb->len); + } else { + skb_trim(skb2, frame_len); + IPADBG_LOW("rx avail for %d\n", + status.endp_dest_idx); + IPADBG_LOW( + "removing Status element from skb and sending to WAN client"); + skb_pull(skb2, pkt_status_sz); + skb2->truesize = skb2->len + + sizeof(struct sk_buff) + + (ALIGN(frame_len, 32) * + unused / used_align); + sys->ep->client_notify(sys->ep->priv, + IPA_RECEIVE, (unsigned long)(skb2)); + skb_pull(skb, frame_len); + } + } else { + IPAERR("fail to clone\n"); + if (skb->len < frame_len) { + sys->prev_skb = NULL; + sys->len_rem = frame_len - skb->len; + skb_pull(skb, skb->len); + } else { + skb_pull(skb, frame_len); + } + } + } +bail: + sys->free_skb(skb); + return 0; +} + +static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags) +{ + return __dev_alloc_skb(len, flags); +} + +static void ipa3_free_skb_rx(struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); +} + +void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data) +{ + struct sk_buff *rx_skb = (struct sk_buff *)data; + struct ipahal_pkt_status status; + struct ipa3_ep_context *ep; + unsigned int src_pipe; + u32 metadata; + u8 ucp; + + ipahal_pkt_status_parse(rx_skb->data, &status); + src_pipe = status.endp_src_idx; + metadata = status.metadata; + ucp = status.ucp; + ep = &ipa3_ctx->ep[src_pipe]; + if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes || + !ep->valid || + !ep->client_notify)) { + IPAERR_RL("drop pipe=%d ep_valid=%d client_notify=%pK\n", + src_pipe, ep->valid, ep->client_notify); + dev_kfree_skb_any(rx_skb); + return; + } + if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE) + skb_pull(rx_skb, ipahal_pkt_status_get_size() + + IPA_LAN_RX_HEADER_LENGTH); + else + skb_pull(rx_skb, ipahal_pkt_status_get_size()); + + /* Metadata Info + * ------------------------------------------ + * | 3 | 2 | 1 | 0 | + * | fw_desc | vdev_id | qmap mux id | Resv | + * ------------------------------------------ + */ + *(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF); + *(u8 *)(rx_skb->cb + 4) = ucp; + IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n", + metadata, *(u32 *)rx_skb->cb); + IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4)); + + ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb)); +} + +static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt) +{ + rx_pkt->data.dma_addr = 0; + ipa3_skb_recycle(rx_pkt->data.skb); + INIT_LIST_HEAD(&rx_pkt->link); + spin_lock_bh(&rx_pkt->sys->spinlock); + list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list); + spin_unlock_bh(&rx_pkt->sys->spinlock); +} + +void ipa3_recycle_wan_skb(struct sk_buff *skb) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt; + int ep_idx = ipa3_get_ep_mapping( + IPA_CLIENT_APPS_WAN_CONS); + gfp_t flag = GFP_NOWAIT | __GFP_NOWARN; + + if (unlikely(ep_idx == -1)) { + IPAERR("dest EP does not exist\n"); + ipa_assert(); + } + + rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache, + flag); + if (!rx_pkt) + return; + + INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail); + rx_pkt->sys = ipa3_ctx->ep[ep_idx].sys; + + rx_pkt->data.skb = skb; + ipa3_recycle_rx_wrapper(rx_pkt); +} + +static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt_expected; + struct sk_buff *rx_skb; + + if (unlikely(list_empty(&sys->head_desc_list))) { + WARN_ON(1); + return; + } + spin_lock_bh(&sys->spinlock); + rx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa3_rx_pkt_wrapper, + link); + list_del(&rx_pkt_expected->link); + sys->len--; + if (size) + rx_pkt_expected->len = size; + spin_unlock_bh(&sys->spinlock); + rx_skb = rx_pkt_expected->data.skb; + dma_unmap_single(ipa3_ctx->pdev, rx_pkt_expected->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + skb_set_tail_pointer(rx_skb, rx_pkt_expected->len); + rx_skb->len = rx_pkt_expected->len; + *(unsigned int *)rx_skb->cb = rx_skb->len; + rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff); + sys->pyld_hdlr(rx_skb, sys); + sys->free_rx_wrapper(rx_pkt_expected); + sys->repl_hdlr(sys); +} + +static void ipa3_wq_rx_napi_chain(struct ipa3_sys_context *sys, + struct ipa_mem_buffer *mem_info, uint32_t num) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt_expected; + struct sk_buff *rx_skb, *first_skb = NULL, *prev_skb = NULL; + int i; + + if (unlikely(list_empty(&sys->head_desc_list)) || !mem_info || !num) { + WARN_ON(1); + return; + } + + for (i = 0; i < num; i++) { + spin_lock_bh(&sys->spinlock); + rx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa3_rx_pkt_wrapper, + link); + list_del(&rx_pkt_expected->link); + sys->len--; + if (mem_info[i].size) + rx_pkt_expected->len = mem_info[i].size; + spin_unlock_bh(&sys->spinlock); + rx_skb = rx_pkt_expected->data.skb; + dma_unmap_single(ipa3_ctx->pdev, rx_pkt_expected->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + skb_set_tail_pointer(rx_skb, rx_pkt_expected->len); + rx_skb->len = rx_pkt_expected->len; + + if (!first_skb) + first_skb = rx_skb; + + if (prev_skb) + skb_shinfo(prev_skb)->frag_list = rx_skb; + + prev_skb = rx_skb; + sys->free_rx_wrapper(rx_pkt_expected); + } + + skb_shinfo(prev_skb)->frag_list = NULL; + sys->pyld_hdlr(first_skb, sys); + sys->repl_hdlr(sys); +} + +static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys, u32 size) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt_expected; + struct sk_buff *rx_skb; + + if (unlikely(list_empty(&sys->head_desc_list))) { + WARN_ON(1); + return; + } + rx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa3_rx_pkt_wrapper, + link); + list_del(&rx_pkt_expected->link); + sys->len--; + + if (size) + rx_pkt_expected->len = size; + + rx_skb = rx_pkt_expected->data.skb; + skb_set_tail_pointer(rx_skb, rx_pkt_expected->len); + rx_skb->len = rx_pkt_expected->len; + rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff); + sys->ep->wstats.tx_pkts_rcvd++; + if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) { + ipa3_free_skb(&rx_pkt_expected->data); + sys->ep->wstats.tx_pkts_dropped++; + } else { + sys->ep->wstats.tx_pkts_sent++; + sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, + (unsigned long)(&rx_pkt_expected->data)); + } + ipa3_replenish_wlan_rx_cache(sys); +} + +static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys, + struct ipa_mem_buffer *mem_info) +{ + IPADBG_LOW("ENTER.\n"); + if (unlikely(list_empty(&sys->head_desc_list))) { + IPAERR("descriptor list is empty!\n"); + WARN_ON(1); + return; + } + sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, + (unsigned long)(mem_info)); + IPADBG_LOW("EXIT\n"); +} + +static void ipa3_wq_rx_avail(struct work_struct *work) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt; + struct ipa3_sys_context *sys; + + rx_pkt = container_of(work, struct ipa3_rx_pkt_wrapper, work); + WARN(unlikely(rx_pkt == NULL), "rx pkt is null"); + sys = rx_pkt->sys; + ipa3_wq_rx_common(sys, 0); +} + +static int ipa3_odu_rx_pyld_hdlr(struct sk_buff *rx_skb, + struct ipa3_sys_context *sys) +{ + if (sys->ep->client_notify) { + sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, + (unsigned long)(rx_skb)); + } else { + dev_kfree_skb_any(rx_skb); + WARN(1, "client notify is null"); + } + + return 0; +} + +static int ipa3_odl_dpl_rx_pyld_hdlr(struct sk_buff *rx_skb, + struct ipa3_sys_context *sys) +{ + if (WARN(!sys->ep->client_notify, "sys->ep->client_notify is NULL\n")) + dev_kfree_skb_any(rx_skb); + else + sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, + (unsigned long)(rx_skb)); + + return 0; +} +static void ipa3_free_rx_wrapper(struct ipa3_rx_pkt_wrapper *rk_pkt) +{ + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rk_pkt); +} + +static int ipa3_assign_policy(struct ipa_sys_connect_params *in, + struct ipa3_sys_context *sys) +{ + bool apps_wan_cons_agg_gro_flag; + unsigned long aggr_byte_limit; + + if (in->client == IPA_CLIENT_APPS_CMD_PROD) { + sys->policy = IPA_POLICY_INTR_MODE; + sys->use_comm_evt_ring = false; + return 0; + } + + if (in->client == IPA_CLIENT_APPS_WAN_PROD) { + sys->policy = IPA_POLICY_INTR_MODE; + sys->use_comm_evt_ring = true; + INIT_WORK(&sys->work, ipa3_send_nop_desc); + + /* + * enable source notification status for exception packets + * (i.e. QMAP commands) to be routed to modem. + */ + sys->ep->status.status_en = true; + sys->ep->status.status_ep = + ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_CONS); + return 0; + } + + if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client)) { + sys->policy = IPA_POLICY_NOINTR_MODE; + return 0; + } + + apps_wan_cons_agg_gro_flag = + ipa3_ctx->ipa_client_apps_wan_cons_agg_gro; + aggr_byte_limit = in->ipa_ep_cfg.aggr.aggr_byte_limit; + + if (IPA_CLIENT_IS_PROD(in->client)) { + if (sys->ep->skip_ep_cfg) { + sys->policy = IPA_POLICY_INTR_POLL_MODE; + sys->use_comm_evt_ring = true; + atomic_set(&sys->curr_polling_state, 0); + } else { + sys->policy = IPA_POLICY_INTR_MODE; + sys->use_comm_evt_ring = true; + INIT_WORK(&sys->work, ipa3_send_nop_desc); + } + } else { + if (in->client == IPA_CLIENT_APPS_LAN_CONS || + in->client == IPA_CLIENT_APPS_WAN_CONS) { + sys->ep->status.status_en = true; + sys->policy = IPA_POLICY_INTR_POLL_MODE; + INIT_WORK(&sys->work, ipa3_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + ipa3_switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + ipa3_replenish_rx_work_func); + INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ( + IPA_GENERIC_RX_BUFF_BASE_SZ); + sys->get_skb = ipa3_get_skb_ipa_rx; + sys->free_skb = ipa3_free_skb_rx; + in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR; + in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC; + in->ipa_ep_cfg.aggr.aggr_time_limit = + IPA_GENERIC_AGGR_TIME_LIMIT; + if (in->client == IPA_CLIENT_APPS_LAN_CONS) { + sys->pyld_hdlr = ipa3_lan_rx_pyld_hdlr; + sys->repl_hdlr = + ipa3_replenish_rx_cache_recycle; + sys->free_rx_wrapper = + ipa3_recycle_rx_wrapper; + sys->rx_pool_sz = + ipa3_ctx->lan_rx_ring_size; + in->ipa_ep_cfg.aggr.aggr_byte_limit = + IPA_GENERIC_AGGR_BYTE_LIMIT; + in->ipa_ep_cfg.aggr.aggr_pkt_limit = + IPA_GENERIC_AGGR_PKT_LIMIT; + } else if (in->client == + IPA_CLIENT_APPS_WAN_CONS) { + sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr; + sys->free_rx_wrapper = ipa3_free_rx_wrapper; + sys->rx_pool_sz = ipa3_ctx->wan_rx_ring_size; + if (nr_cpu_ids > 1) { + sys->repl_hdlr = + ipa3_fast_replenish_rx_cache; + } else { + sys->repl_hdlr = + ipa3_replenish_rx_cache; + } + if (in->napi_obj && in->recycle_enabled) + sys->repl_hdlr = + ipa3_replenish_rx_cache_recycle; + in->ipa_ep_cfg.aggr.aggr_sw_eof_active + = true; + if (apps_wan_cons_agg_gro_flag) { + IPAERR("get close-by %u\n", + ipa_adjust_ra_buff_base_sz( + in->ipa_ep_cfg.aggr.aggr_byte_limit)); + IPAERR("set rx_buff_sz %lu\n", + (unsigned long) + IPA_GENERIC_RX_BUFF_SZ( + ipa_adjust_ra_buff_base_sz( + in->ipa_ep_cfg.aggr.aggr_byte_limit))); + /* disable ipa_status */ + sys->ep->status.status_en = false; + sys->rx_buff_sz = + IPA_GENERIC_RX_BUFF_SZ( + ipa_adjust_ra_buff_base_sz( + in->ipa_ep_cfg.aggr.aggr_byte_limit)); + in->ipa_ep_cfg.aggr.aggr_byte_limit = + sys->rx_buff_sz < + aggr_byte_limit ? + IPA_ADJUST_AGGR_BYTE_LIMIT( + sys->rx_buff_sz) : + IPA_ADJUST_AGGR_BYTE_LIMIT( + in->ipa_ep_cfg.aggr.aggr_byte_limit); + IPAERR("set aggr_limit %lu\n", + (unsigned long) + in->ipa_ep_cfg.aggr.aggr_byte_limit); + } else { + in->ipa_ep_cfg.aggr.aggr_byte_limit = + IPA_GENERIC_AGGR_BYTE_LIMIT; + in->ipa_ep_cfg.aggr.aggr_pkt_limit = + IPA_GENERIC_AGGR_PKT_LIMIT; + } + } + } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) { + IPADBG("assigning policy to client:%d", + in->client); + + sys->policy = IPA_POLICY_INTR_POLL_MODE; + INIT_WORK(&sys->work, ipa3_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + ipa3_switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + ipa3_replenish_rx_work_func); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ; + sys->rx_pool_sz = in->desc_fifo_sz / + IPA_FIFO_ELEMENT_SIZE - 1; + if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ) + sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ; + sys->pyld_hdlr = NULL; + sys->repl_hdlr = ipa3_replenish_wlan_rx_cache; + sys->get_skb = ipa3_get_skb_ipa_rx; + sys->free_skb = ipa3_free_skb_rx; + sys->free_rx_wrapper = ipa3_free_rx_wrapper; + in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR; + } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) { + IPADBG("assigning policy to client:%d", + in->client); + + sys->policy = IPA_POLICY_INTR_POLL_MODE; + INIT_WORK(&sys->work, ipa3_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + ipa3_switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + ipa3_replenish_rx_work_func); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_pool_sz = in->desc_fifo_sz / + IPA_FIFO_ELEMENT_SIZE - 1; + if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ) + sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ; + sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr; + sys->get_skb = ipa3_get_skb_ipa_rx; + sys->free_skb = ipa3_free_skb_rx; + /* recycle skb for GSB use case */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + sys->free_rx_wrapper = + ipa3_free_rx_wrapper; + sys->repl_hdlr = + ipa3_replenish_rx_cache; + /* Overwrite buffer size & aggr limit for GSB */ + sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ( + IPA_GSB_RX_BUFF_BASE_SZ); + in->ipa_ep_cfg.aggr.aggr_byte_limit = + IPA_GSB_AGGR_BYTE_LIMIT; + } else { + sys->free_rx_wrapper = + ipa3_free_rx_wrapper; + sys->repl_hdlr = ipa3_replenish_rx_cache; + sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ; + } + } else if (in->client == + IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) { + IPADBG("assigning policy to client:%d", + in->client); + + sys->policy = IPA_POLICY_INTR_POLL_MODE; + INIT_WORK(&sys->work, ipa3_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + ipa3_switch_to_intr_rx_work_func); + } else if (in->client == + IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) { + IPADBG("assigning policy to client:%d", + in->client); + + sys->policy = IPA_POLICY_NOINTR_MODE; + } else if (in->client == IPA_CLIENT_ODL_DPL_CONS) { + IPADBG("assigning policy to ODL client:%d\n", + in->client); + sys->ep->status.status_en = true; + sys->policy = IPA_POLICY_INTR_POLL_MODE; + INIT_WORK(&sys->work, ipa3_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + ipa3_switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + ipa3_replenish_rx_work_func); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_buff_sz = + IPA_GENERIC_RX_BUFF_SZ(IPA_ODL_RX_BUFF_SZ); + sys->pyld_hdlr = ipa3_odl_dpl_rx_pyld_hdlr; + sys->get_skb = ipa3_get_skb_ipa_rx; + sys->free_skb = ipa3_free_skb_rx; + sys->free_rx_wrapper = ipa3_recycle_rx_wrapper; + sys->repl_hdlr = ipa3_replenish_rx_cache_recycle; + sys->rx_pool_sz = in->desc_fifo_sz / + IPA_FIFO_ELEMENT_SIZE - 1; + } else { + WARN(1, "Need to install a RX pipe hdlr\n"); + return -EINVAL; + } + } + + return 0; +} + +/** + * ipa3_tx_client_rx_notify_release() - Callback function + * which will call the user supplied callback function to + * release the skb, or release it on its own if no callback + * function was supplied + * + * @user1: [in] - Data Descriptor + * @user2: [in] - endpoint idx + * + * This notified callback is for the destination client + * This function is supplied in ipa3_tx_dp_mul + */ +static void ipa3_tx_client_rx_notify_release(void *user1, int user2) +{ + struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1; + int ep_idx = user2; + + IPADBG_LOW("Received data desc anchor:%pK\n", dd); + + atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc); + ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++; + + /* wlan host driver waits till tx complete before unload */ + IPADBG_LOW("ep=%d fifo_desc_free_count=%d\n", + ep_idx, atomic_read(&ipa3_ctx->ep[ep_idx].avail_fifo_desc)); + IPADBG_LOW("calling client notify callback with priv:%pK\n", + ipa3_ctx->ep[ep_idx].priv); + + if (ipa3_ctx->ep[ep_idx].client_notify) { + ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv, + IPA_WRITE_DONE, (unsigned long)user1); + ipa3_ctx->ep[ep_idx].wstats.rx_hd_reply++; + } +} +/** + * ipa3_tx_client_rx_pkt_status() - Callback function + * which will call the user supplied callback function to + * increase the available fifo descriptor + * + * @user1: [in] - Data Descriptor + * @user2: [in] - endpoint idx + * + * This notified callback is for the destination client + * This function is supplied in ipa3_tx_dp_mul + */ +static void ipa3_tx_client_rx_pkt_status(void *user1, int user2) +{ + int ep_idx = user2; + + atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc); + ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++; +} + + +/** + * ipa3_tx_dp_mul() - Data-path tx handler for multiple packets + * @src: [in] - Client that is sending data + * @ipa_tx_data_desc: [in] data descriptors from wlan + * + * this is used for to transfer data descriptors that received + * from WLAN1_PROD pipe to IPA HW + * + * The function will send data descriptors from WLAN1_PROD (one + * at a time). Will set EOT flag for last descriptor Once this send was done + * from transport point-of-view the IPA driver will get notified by the + * supplied callback - ipa_gsi_irq_tx_notify_cb() + * + * ipa_gsi_irq_tx_notify_cb will call to the user supplied callback + * + * Returns: 0 on success, negative on failure + */ +int ipa3_tx_dp_mul(enum ipa_client_type src, + struct ipa_tx_data_desc *data_desc) +{ + /* The second byte in wlan header holds qmap id */ +#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1 + struct ipa_tx_data_desc *entry; + struct ipa3_sys_context *sys; + struct ipa3_desc desc[2]; + u32 num_desc, cnt; + int ep_idx; + + IPADBG_LOW("Received data desc anchor:%pK\n", data_desc); + + spin_lock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock); + + ep_idx = ipa3_get_ep_mapping(src); + if (unlikely(ep_idx == -1)) { + IPAERR("dest EP does not exist.\n"); + goto fail_send; + } + IPADBG_LOW("ep idx:%d\n", ep_idx); + sys = ipa3_ctx->ep[ep_idx].sys; + + if (unlikely(ipa3_ctx->ep[ep_idx].valid == 0)) { + IPAERR("dest EP not valid.\n"); + goto fail_send; + } + sys->ep->wstats.rx_hd_rcvd++; + + /* Calculate the number of descriptors */ + num_desc = 0; + list_for_each_entry(entry, &data_desc->link, link) { + num_desc++; + } + IPADBG_LOW("Number of Data Descriptors:%d", num_desc); + + if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) { + IPAERR("Insufficient data descriptors available\n"); + goto fail_send; + } + + /* Assign callback only for last data descriptor */ + cnt = 0; + list_for_each_entry(entry, &data_desc->link, link) { + memset(desc, 0, 2 * sizeof(struct ipa3_desc)); + + IPADBG_LOW("Parsing data desc :%d\n", cnt); + cnt++; + ((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] = + (u8)sys->ep->cfg.meta.qmap_id; + + /* the tag field will be populated in ipa3_send() function */ + desc[0].is_tag_status = true; + desc[1].pyld = entry->pyld_buffer; + desc[1].len = entry->pyld_len; + desc[1].type = IPA_DATA_DESC_SKB; + desc[1].user1 = data_desc; + desc[1].user2 = ep_idx; + IPADBG_LOW("priv:%pK pyld_buf:0x%pK pyld_len:%d\n", + entry->priv, desc[1].pyld, desc[1].len); + + /* In case of last descriptor populate callback */ + if (cnt == num_desc) { + IPADBG_LOW("data desc:%pK\n", data_desc); + desc[1].callback = ipa3_tx_client_rx_notify_release; + } else { + desc[1].callback = ipa3_tx_client_rx_pkt_status; + } + + IPADBG_LOW("calling ipa3_send()\n"); + if (ipa3_send(sys, 2, desc, true)) { + IPAERR("fail to send skb\n"); + sys->ep->wstats.rx_pkt_leak += (cnt-1); + sys->ep->wstats.rx_dp_fail++; + goto fail_send; + } + + if (atomic_read(&sys->ep->avail_fifo_desc) >= 0) + atomic_dec(&sys->ep->avail_fifo_desc); + + sys->ep->wstats.rx_pkts_rcvd++; + IPADBG_LOW("ep=%d fifo desc=%d\n", + ep_idx, atomic_read(&sys->ep->avail_fifo_desc)); + } + + sys->ep->wstats.rx_hd_processed++; + spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock); + return 0; + +fail_send: + spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock); + return -EFAULT; + +} + +void ipa3_free_skb(struct ipa_rx_data *data) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt; + + spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); + + ipa3_ctx->wc_memb.total_tx_pkts_freed++; + rx_pkt = container_of(data, struct ipa3_rx_pkt_wrapper, data); + + ipa3_skb_recycle(rx_pkt->data.skb); + (void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ); + + list_add_tail(&rx_pkt->link, + &ipa3_ctx->wc_memb.wlan_comm_desc_list); + ipa3_ctx->wc_memb.wlan_comm_free_cnt++; + + spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); +} + +/* Functions added to support kernel tests */ + +int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in, + unsigned long *ipa_transport_hdl, + u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status) +{ + struct ipa3_ep_context *ep; + int ipa_ep_idx; + int result = -EINVAL; + + if (sys_in == NULL || clnt_hdl == NULL) { + IPAERR("NULL args\n"); + goto fail_gen; + } + + if (ipa_transport_hdl == NULL || ipa_pipe_num == NULL) { + IPAERR("NULL args\n"); + goto fail_gen; + } + if (sys_in->client >= IPA_CLIENT_MAX) { + IPAERR("bad parm client:%d\n", sys_in->client); + goto fail_gen; + } + + ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client :%d\n", sys_in->client); + goto fail_gen; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client); + + if (ep->valid == 1) { + if (sys_in->client != IPA_CLIENT_APPS_WAN_PROD) { + IPAERR("EP %d already allocated\n", ipa_ep_idx); + goto fail_and_disable_clocks; + } else { + if (ipa3_cfg_ep_hdr(ipa_ep_idx, + &sys_in->ipa_ep_cfg.hdr)) { + IPAERR("fail to configure hdr prop of EP %d\n", + ipa_ep_idx); + result = -EFAULT; + goto fail_and_disable_clocks; + } + if (ipa3_cfg_ep_hdr_ext(ipa_ep_idx, + &sys_in->ipa_ep_cfg.hdr_ext)) { + IPAERR("fail config hdr_ext prop of EP %d\n", + ipa_ep_idx); + result = -EFAULT; + goto fail_and_disable_clocks; + } + if (ipa3_cfg_ep_cfg(ipa_ep_idx, + &sys_in->ipa_ep_cfg.cfg)) { + IPAERR("fail to configure cfg prop of EP %d\n", + ipa_ep_idx); + result = -EFAULT; + goto fail_and_disable_clocks; + } + IPAERR("client %d (ep: %d) overlay ok sys=%pK\n", + sys_in->client, ipa_ep_idx, ep->sys); + ep->client_notify = sys_in->notify; + ep->priv = sys_in->priv; + *clnt_hdl = ipa_ep_idx; + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + + return 0; + } + } + + memset(ep, 0, offsetof(struct ipa3_ep_context, sys)); + + ep->valid = 1; + ep->client = sys_in->client; + ep->client_notify = sys_in->notify; + ep->priv = sys_in->priv; + ep->keep_ipa_awake = true; + if (en_status) { + ep->status.status_en = true; + ep->status.status_ep = ipa_ep_idx; + } + + result = ipa3_enable_data_path(ipa_ep_idx); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", + result, ipa_ep_idx); + goto fail_gen2; + } + + if (!ep->skip_ep_cfg) { + if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto fail_gen2; + } + if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) { + IPAERR("fail to configure status of EP.\n"); + goto fail_gen2; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("skipping ep configuration\n"); + } + + *clnt_hdl = ipa_ep_idx; + + *ipa_pipe_num = ipa_ep_idx; + *ipa_transport_hdl = ipa3_ctx->gsi_dev_hdl; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + + ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + IPADBG("client %d (ep: %d) connected sys=%pK\n", sys_in->client, + ipa_ep_idx, ep->sys); + + return 0; + +fail_gen2: +fail_and_disable_clocks: + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); +fail_gen: + return result; +} + +int ipa3_sys_teardown(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm(Either endpoint or client hdl invalid)\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipa3_disable_data_path(clnt_hdl); + ep->valid = 0; + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + + return 0; +} + +int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl, + unsigned long gsi_ev_hdl) +{ + struct ipa3_ep_context *ep; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm(Either endpoint or client hdl invalid)\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + ep->gsi_chan_hdl = gsi_ch_hdl; + ep->gsi_evt_ring_hdl = gsi_ev_hdl; + + return 0; +} + +static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify) +{ + switch (notify->evt_id) { + case GSI_EVT_OUT_OF_BUFFERS_ERR: + IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n"); + break; + case GSI_EVT_OUT_OF_RESOURCES_ERR: + IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n"); + break; + case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR: + IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n"); + break; + case GSI_EVT_EVT_RING_EMPTY_ERR: + IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n"); + break; + default: + IPAERR("Unexpected err evt: %d\n", notify->evt_id); + } +} + +static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify) +{ + switch (notify->evt_id) { + case GSI_CHAN_INVALID_TRE_ERR: + IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n"); + break; + case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR: + IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n"); + break; + case GSI_CHAN_OUT_OF_BUFFERS_ERR: + IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n"); + break; + case GSI_CHAN_OUT_OF_RESOURCES_ERR: + IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n"); + break; + case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR: + IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n"); + break; + case GSI_CHAN_HWO_1_ERR: + IPAERR("Got GSI_CHAN_HWO_1_ERR\n"); + break; + default: + IPAERR("Unexpected err evt: %d\n", notify->evt_id); + } +} + +static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify) +{ + struct ipa3_tx_pkt_wrapper *tx_pkt; + + IPADBG_LOW("event %d notified\n", notify->evt_id); + + switch (notify->evt_id) { + case GSI_CHAN_EVT_EOT: + atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1); + tx_pkt = notify->xfer_user_data; + queue_work(tx_pkt->sys->wq, &tx_pkt->work); + break; + default: + IPAERR("received unexpected event id %d\n", notify->evt_id); + } +} + +void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys) +{ + bool clk_off; + + atomic_set(&sys->curr_polling_state, 1); + ipa3_inc_acquire_wakelock(); + + /* + * pm deactivate is done in wq context + * or after NAPI poll + */ + if (ipa3_ctx->use_ipa_pm) { + clk_off = ipa_pm_activate(sys->pm_hdl); + if (!clk_off && sys->napi_obj) { + napi_schedule(sys->napi_obj); + return; + } + queue_work(sys->wq, &sys->work); + return; + } + + if (sys->napi_obj) { + struct ipa_active_client_logging_info log; + + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI"); + clk_off = ipa3_inc_client_enable_clks_no_block( + &log); + if (!clk_off) { + napi_schedule(sys->napi_obj); + return; + } + } + + queue_work(sys->wq, &sys->work); +} + +static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify) +{ + struct ipa3_sys_context *sys; + struct ipa3_rx_pkt_wrapper *rx_pkt_expected, *rx_pkt_rcvd; + + if (!notify) { + IPAERR("gsi notify is NULL.\n"); + return; + } + IPADBG_LOW("event %d notified\n", notify->evt_id); + + sys = (struct ipa3_sys_context *)notify->chan_user_data; + rx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa3_rx_pkt_wrapper, link); + rx_pkt_rcvd = (struct ipa3_rx_pkt_wrapper *)notify->xfer_user_data; + + if (rx_pkt_expected != rx_pkt_rcvd) { + IPAERR("Pkt was not filled in head of rx buffer.\n"); + WARN_ON(1); + return; + } + sys->ep->bytes_xfered_valid = true; + sys->ep->bytes_xfered = notify->bytes_xfered; + sys->ep->phys_base = rx_pkt_rcvd->data.dma_addr; + + switch (notify->evt_id) { + case GSI_CHAN_EVT_EOT: + case GSI_CHAN_EVT_EOB: + atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1); + if (!atomic_read(&sys->curr_polling_state)) { + /* put the gsi channel into polling mode */ + gsi_config_channel_mode(sys->ep->gsi_chan_hdl, + GSI_CHAN_MODE_POLL); + __ipa_gsi_irq_rx_scedule_poll(sys); + } + break; + default: + IPAERR("received unexpected event id %d\n", notify->evt_id); + } +} + +static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify) +{ + struct ipa3_sys_context *sys; + struct ipa3_dma_xfer_wrapper *rx_pkt_expected, *rx_pkt_rcvd; + + if (!notify) { + IPAERR("gsi notify is NULL.\n"); + return; + } + IPADBG_LOW("event %d notified\n", notify->evt_id); + + sys = (struct ipa3_sys_context *)notify->chan_user_data; + if (sys->ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) { + IPAERR("IRQ_RX Callback was called for DMA_SYNC_CONS.\n"); + return; + } + rx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa3_dma_xfer_wrapper, link); + rx_pkt_rcvd = (struct ipa3_dma_xfer_wrapper *)notify + ->xfer_user_data; + if (rx_pkt_expected != rx_pkt_rcvd) { + IPAERR("Pkt was not filled in head of rx buffer.\n"); + WARN_ON(1); + return; + } + + sys->ep->bytes_xfered_valid = true; + sys->ep->bytes_xfered = notify->bytes_xfered; + sys->ep->phys_base = rx_pkt_rcvd->phys_addr_dest; + + switch (notify->evt_id) { + case GSI_CHAN_EVT_EOT: + if (!atomic_read(&sys->curr_polling_state)) { + /* put the gsi channel into polling mode */ + gsi_config_channel_mode(sys->ep->gsi_chan_hdl, + GSI_CHAN_MODE_POLL); + ipa3_inc_acquire_wakelock(); + atomic_set(&sys->curr_polling_state, 1); + queue_work(sys->wq, &sys->work); + } + break; + default: + IPAERR("received unexpected event id %d\n", notify->evt_id); + } +} + +int ipa3_alloc_common_event_ring(void) +{ + struct gsi_evt_ring_props gsi_evt_ring_props; + dma_addr_t evt_dma_addr; + int result; + + memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props)); + gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV; + gsi_evt_ring_props.intr = GSI_INTR_IRQ; + gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B; + + gsi_evt_ring_props.ring_len = IPA_COMMON_EVENT_RING_SIZE; + + gsi_evt_ring_props.ring_base_vaddr = + dma_alloc_coherent(ipa3_ctx->pdev, + gsi_evt_ring_props.ring_len, &evt_dma_addr, GFP_KERNEL); + if (!gsi_evt_ring_props.ring_base_vaddr) { + IPAERR("fail to dma alloc %u bytes\n", + gsi_evt_ring_props.ring_len); + return -ENOMEM; + } + gsi_evt_ring_props.ring_base_addr = evt_dma_addr; + gsi_evt_ring_props.int_modt = 0; + gsi_evt_ring_props.int_modc = 1; /* moderation comes from channel*/ + gsi_evt_ring_props.rp_update_addr = 0; + gsi_evt_ring_props.exclusive = false; + gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb; + gsi_evt_ring_props.user_data = NULL; + + result = gsi_alloc_evt_ring(&gsi_evt_ring_props, + ipa3_ctx->gsi_dev_hdl, &ipa3_ctx->gsi_evt_comm_hdl); + if (result) { + IPAERR("gsi_alloc_evt_ring failed %d\n", result); + return result; + } + ipa3_ctx->gsi_evt_comm_ring_rem = IPA_COMMON_EVENT_RING_SIZE; + + return 0; +} + +static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in, + struct ipa3_ep_context *ep) +{ + struct gsi_evt_ring_props gsi_evt_ring_props; + struct gsi_chan_props gsi_channel_props; + union __packed gsi_channel_scratch ch_scratch; + const struct ipa_gsi_ep_config *gsi_ep_info; + dma_addr_t dma_addr; + dma_addr_t evt_dma_addr; + int result; + gfp_t mem_flag = GFP_KERNEL; + + if (in->client == IPA_CLIENT_APPS_WAN_CONS || + in->client == IPA_CLIENT_APPS_WAN_PROD) + mem_flag = GFP_ATOMIC; + + if (!ep) { + IPAERR("EP context is empty\n"); + return -EINVAL; + } + + evt_dma_addr = 0; + ep->gsi_evt_ring_hdl = ~0; + memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props)); + if (ep->sys->use_comm_evt_ring) { + if (ipa3_ctx->gsi_evt_comm_ring_rem < 2 * in->desc_fifo_sz) { + IPAERR("not enough space in common event ring\n"); + IPAERR("available: %d needed: %d\n", + ipa3_ctx->gsi_evt_comm_ring_rem, + 2 * in->desc_fifo_sz); + WARN_ON(1); + return -EFAULT; + } + ipa3_ctx->gsi_evt_comm_ring_rem -= (2 * in->desc_fifo_sz); + ep->gsi_evt_ring_hdl = ipa3_ctx->gsi_evt_comm_hdl; + } else if (ep->sys->policy != IPA_POLICY_NOINTR_MODE || + IPA_CLIENT_IS_CONS(ep->client)) { + gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV; + gsi_evt_ring_props.intr = GSI_INTR_IRQ; + gsi_evt_ring_props.re_size = + GSI_EVT_RING_RE_SIZE_16B; + + /* + * GSI ring length is calculated based on the desc_fifo_sz + * which was meant to define the BAM desc fifo. GSI descriptors + * are 16B as opposed to 8B for BAM. + */ + gsi_evt_ring_props.ring_len = 2 * in->desc_fifo_sz; + + gsi_evt_ring_props.ring_base_vaddr = + dma_alloc_coherent(ipa3_ctx->pdev, + gsi_evt_ring_props.ring_len, + &evt_dma_addr, mem_flag); + if (!gsi_evt_ring_props.ring_base_vaddr) { + IPAERR("fail to dma alloc %u bytes\n", + gsi_evt_ring_props.ring_len); + return -ENOMEM; + } + gsi_evt_ring_props.ring_base_addr = evt_dma_addr; + + /* copy mem info */ + ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len; + ep->gsi_mem_info.evt_ring_base_addr = + gsi_evt_ring_props.ring_base_addr; + ep->gsi_mem_info.evt_ring_base_vaddr = + gsi_evt_ring_props.ring_base_vaddr; + + if (ep->sys->napi_obj) { + gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT; + gsi_evt_ring_props.int_modc = IPA_GSI_EVT_RING_INT_MODC; + } else { + gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT; + gsi_evt_ring_props.int_modc = 1; + } + + IPADBG("client=%d moderation threshold cycles=%u cnt=%u\n", + ep->client, + gsi_evt_ring_props.int_modt, + gsi_evt_ring_props.int_modc); + gsi_evt_ring_props.rp_update_addr = 0; + gsi_evt_ring_props.exclusive = true; + gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb; + gsi_evt_ring_props.user_data = NULL; + + result = gsi_alloc_evt_ring(&gsi_evt_ring_props, + ipa3_ctx->gsi_dev_hdl, &ep->gsi_evt_ring_hdl); + if (result != GSI_STATUS_SUCCESS) + goto fail_alloc_evt_ring; + } + + memset(&gsi_channel_props, 0, sizeof(gsi_channel_props)); + gsi_channel_props.prot = GSI_CHAN_PROT_GPI; + if (IPA_CLIENT_IS_PROD(ep->client)) { + gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI; + } else { + gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI; + gsi_channel_props.max_re_expected = ep->sys->rx_pool_sz; + } + + gsi_ep_info = ipa3_get_gsi_ep_info(ep->client); + if (!gsi_ep_info) { + IPAERR("Failed getting GSI EP info for client=%d\n", + ep->client); + result = -EINVAL; + goto fail_get_gsi_ep_info; + } else + gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num; + + gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl; + gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B; + + /* + * GSI ring length is calculated based on the desc_fifo_sz which was + * meant to define the BAM desc fifo. GSI descriptors are 16B as opposed + * to 8B for BAM. For PROD pipes there is also an additional descriptor + * for TAG STATUS immediate command. APPS_WAN_PROD pipe is an exception + * as this pipe do not use TAG STATUS for completion. Instead it uses + * event ring based completions. + */ + if (ep->client == IPA_CLIENT_APPS_WAN_PROD) + gsi_channel_props.ring_len = 2 * in->desc_fifo_sz; + else if (IPA_CLIENT_IS_PROD(ep->client)) + gsi_channel_props.ring_len = 4 * in->desc_fifo_sz; + else + gsi_channel_props.ring_len = 2 * in->desc_fifo_sz; + gsi_channel_props.ring_base_vaddr = + dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len, + &dma_addr, mem_flag); + if (!gsi_channel_props.ring_base_vaddr) { + IPAERR("fail to dma alloc %u bytes\n", + gsi_channel_props.ring_len); + result = -ENOMEM; + goto fail_alloc_channel_ring; + } + gsi_channel_props.ring_base_addr = dma_addr; + + /* copy mem info */ + ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len; + ep->gsi_mem_info.chan_ring_base_addr = + gsi_channel_props.ring_base_addr; + ep->gsi_mem_info.chan_ring_base_vaddr = + gsi_channel_props.ring_base_vaddr; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + gsi_channel_props.use_db_eng = GSI_CHAN_DIRECT_MODE; + else + gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE; + gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG; + if (ep->client == IPA_CLIENT_APPS_CMD_PROD) + gsi_channel_props.low_weight = IPA_GSI_MAX_CH_LOW_WEIGHT; + else + gsi_channel_props.low_weight = 1; + gsi_channel_props.prefetch_mode = gsi_ep_info->prefetch_mode; + gsi_channel_props.empty_lvl_threshold = gsi_ep_info->prefetch_threshold; + gsi_channel_props.chan_user_data = ep->sys; + gsi_channel_props.err_cb = ipa_gsi_chan_err_cb; + if (IPA_CLIENT_IS_PROD(ep->client)) + gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb; + else + gsi_channel_props.xfer_cb = ipa_gsi_irq_rx_notify_cb; + if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(ep->client)) + gsi_channel_props.xfer_cb = ipa_dma_gsi_irq_rx_notify_cb; + result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl, + &ep->gsi_chan_hdl); + if (result != GSI_STATUS_SUCCESS) + goto fail_alloc_channel; + + memset(&ch_scratch, 0, sizeof(ch_scratch)); + /* + * Update scratch for MCS smart prefetch: + * Starting IPA4.5, smart prefetch implemented by H/W. + * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch + * so keep the fields zero. + */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + ch_scratch.gpi.max_outstanding_tre = + gsi_ep_info->ipa_if_tlv * GSI_CHAN_RE_SIZE_16B; + ch_scratch.gpi.outstanding_threshold = + 2 * GSI_CHAN_RE_SIZE_16B; + } + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) + ch_scratch.gpi.dl_nlo_channel = 0; + result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("failed to write scratch %d\n", result); + goto fail_write_channel_scratch; + } + + if (ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) + gsi_config_channel_mode(ep->gsi_chan_hdl, + GSI_CHAN_MODE_POLL); + return 0; + +fail_write_channel_scratch: + if (gsi_dealloc_channel(ep->gsi_chan_hdl) + != GSI_STATUS_SUCCESS) { + IPAERR("Failed to dealloc GSI chan.\n"); + WARN_ON(1); + } +fail_alloc_channel: + dma_free_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len, + gsi_channel_props.ring_base_vaddr, dma_addr); +fail_alloc_channel_ring: +fail_get_gsi_ep_info: + if (ep->gsi_evt_ring_hdl != ~0) { + gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); + ep->gsi_evt_ring_hdl = ~0; + } +fail_alloc_evt_ring: + if (gsi_evt_ring_props.ring_base_vaddr) + dma_free_coherent(ipa3_ctx->pdev, gsi_evt_ring_props.ring_len, + gsi_evt_ring_props.ring_base_vaddr, evt_dma_addr); + IPAERR("Return with err: %d\n", result); + return result; +} + +static int ipa_populate_tag_field(struct ipa3_desc *desc, + struct ipa3_tx_pkt_wrapper *tx_pkt, + struct ipahal_imm_cmd_pyld **tag_pyld_ret) +{ + struct ipahal_imm_cmd_pyld *tag_pyld; + struct ipahal_imm_cmd_ip_packet_tag_status tag_cmd = {0}; + + /* populate tag field only if it is NULL */ + if (desc->pyld == NULL) { + tag_cmd.tag = pointer_to_tag_wa(tx_pkt); + tag_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &tag_cmd, true); + if (unlikely(!tag_pyld)) { + IPAERR("Failed to construct ip_packet_tag_status\n"); + return -EFAULT; + } + /* + * This is for 32-bit pointer, will need special + * handling if 64-bit pointer is used + */ + IPADBG_LOW("tx_pkt sent in tag: 0x%pK\n", tx_pkt); + desc->pyld = tag_pyld->data; + desc->opcode = tag_pyld->opcode; + desc->len = tag_pyld->len; + desc->user1 = tag_pyld; + desc->type = IPA_IMM_CMD_DESC; + desc->callback = ipa3_tag_destroy_imm; + + *tag_pyld_ret = tag_pyld; + } + return 0; +} + +static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys, + struct ipa_mem_buffer *mem_info) +{ + int unused_var; + + return ipa_poll_gsi_n_pkt(sys, mem_info, 1, &unused_var); +} + + +static int ipa_poll_gsi_n_pkt(struct ipa3_sys_context *sys, + struct ipa_mem_buffer *mem_info, + int expected_num, int *actual_num) +{ + int ret; + int idx = 0; + int i; + struct gsi_chan_xfer_notify xfer_notify[IPA_WAN_NAPI_MAX_FRAMES]; + struct ipa3_rx_pkt_wrapper *rx_pkt; + int poll_num = 0; + + if (!actual_num || expected_num <= 0 || + expected_num > IPA_WAN_NAPI_MAX_FRAMES) { + IPAERR("bad params actual_num=%pK expected_num=%d\n", + actual_num, expected_num); + return GSI_STATUS_INVALID_PARAMS; + } + + if (sys->ep->bytes_xfered_valid) { + mem_info[idx].phys_base = sys->ep->phys_base; + mem_info[idx].size = (u32)sys->ep->bytes_xfered; + sys->ep->bytes_xfered_valid = false; + idx++; + } + if (expected_num == idx) { + *actual_num = idx; + return GSI_STATUS_SUCCESS; + } + + ret = gsi_poll_n_channel(sys->ep->gsi_chan_hdl, + xfer_notify, expected_num - idx, &poll_num); + if (ret == GSI_STATUS_POLL_EMPTY) { + if (idx) { + *actual_num = idx; + return GSI_STATUS_SUCCESS; + } + *actual_num = 0; + return ret; + } else if (ret != GSI_STATUS_SUCCESS) { + if (idx) { + *actual_num = idx; + return GSI_STATUS_SUCCESS; + } + *actual_num = 0; + IPAERR("Poll channel err: %d\n", ret); + return ret; + } + + for (i = 0; i < poll_num; i++) { + rx_pkt = (struct ipa3_rx_pkt_wrapper *) + xfer_notify[i].xfer_user_data; + mem_info[i+idx].phys_base = rx_pkt->data.dma_addr; + mem_info[i+idx].size = xfer_notify[i].bytes_xfered; + } + *actual_num = idx + poll_num; + return ret; +} + +/** + * ipa3_rx_poll() - Poll the rx packets from IPA HW. This + * function is exectued in the softirq context + * + * if input budget is zero, the driver switches back to + * interrupt mode. + * + * return number of polled packets, on error 0(zero) + */ +int ipa3_rx_poll(u32 clnt_hdl, int weight) +{ + struct ipa3_ep_context *ep; + int ret; + int cnt = 0; + int num = 0; + struct ipa_mem_buffer mem_info[IPA_WAN_NAPI_MAX_FRAMES]; + int remain_aggr_weight; + struct ipa_active_client_logging_info log; + + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI"); + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm 0x%x\n", clnt_hdl); + return cnt; + } + + remain_aggr_weight = weight / IPA_WAN_AGGR_PKT_CNT; + + if (remain_aggr_weight > IPA_WAN_NAPI_MAX_FRAMES) { + IPAERR("NAPI weight is higher than expected\n"); + IPAERR("expected %d got %d\n", + IPA_WAN_NAPI_MAX_FRAMES, remain_aggr_weight); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; +start_poll: + while (remain_aggr_weight > 0 && + atomic_read(&ep->sys->curr_polling_state)) { + atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1); + if (ipa3_ctx->enable_napi_chain) { + ret = ipa_poll_gsi_n_pkt(ep->sys, mem_info, + remain_aggr_weight, &num); + } else { + ret = ipa_poll_gsi_n_pkt(ep->sys, mem_info, + 1, &num); + } + if (ret) + break; + + trace_ipa3_rx_poll_num(num); + ipa3_wq_rx_napi_chain(ep->sys, mem_info, num); + remain_aggr_weight -= num; + + trace_ipa3_rx_poll_cnt(ep->sys->len); + if (ep->sys->len == 0) { + if (remain_aggr_weight == 0) + cnt--; + break; + } + } + cnt += weight - remain_aggr_weight * IPA_WAN_AGGR_PKT_CNT; + if (cnt < weight) { + napi_complete(ep->sys->napi_obj); + ret = ipa3_rx_switch_to_intr_mode(ep->sys); + if (ret == -GSI_STATUS_PENDING_IRQ && + napi_reschedule(ep->sys->napi_obj)) + goto start_poll; + + if (ipa3_ctx->use_ipa_pm) + ipa_pm_deferred_deactivate(ep->sys->pm_hdl); + else + ipa3_dec_client_disable_clks_no_block(&log); + } + + return cnt; +} + +static unsigned long tag_to_pointer_wa(uint64_t tag) +{ + return 0xFFFF000000000000 | (unsigned long) tag; +} + +static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt) +{ + u16 temp; + /* Add the check but it might have throughput issue */ + if (ipa3_is_msm_device()) { + temp = (u16) (~((unsigned long) tx_pkt & + 0xFFFF000000000000) >> 48); + if (temp) { + IPAERR("The 16 prefix is not all 1s (%pK)\n", + tx_pkt); + /* + * We need all addresses starting at 0xFFFF to + * pass it to HW. + */ + ipa_assert(); + } + } + return (unsigned long)tx_pkt & 0x0000FFFFFFFFFFFF; +} + +/** + * ipa_gsi_ch20_wa() - software workaround for IPA GSI channel 20 + * + * A hardware limitation requires to avoid using GSI physical channel 20. + * This function allocates GSI physical channel 20 and holds it to prevent + * others to use it. + * + * Return codes: 0 on success, negative on failure + */ +int ipa_gsi_ch20_wa(void) +{ + struct gsi_chan_props gsi_channel_props; + dma_addr_t dma_addr; + int result; + int i; + unsigned long chan_hdl[IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC]; + unsigned long chan_hdl_to_keep; + + + memset(&gsi_channel_props, 0, sizeof(gsi_channel_props)); + gsi_channel_props.prot = GSI_CHAN_PROT_GPI; + gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI; + gsi_channel_props.evt_ring_hdl = ~0; + gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B; + gsi_channel_props.ring_len = 4 * gsi_channel_props.re_size; + gsi_channel_props.ring_base_vaddr = + dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len, + &dma_addr, 0); + gsi_channel_props.ring_base_addr = dma_addr; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + gsi_channel_props.use_db_eng = GSI_CHAN_DIRECT_MODE; + else + gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE; + + gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG; + gsi_channel_props.low_weight = 1; + gsi_channel_props.err_cb = ipa_gsi_chan_err_cb; + gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb; + + /* first allocate channels up to channel 20 */ + for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) { + gsi_channel_props.ch_id = i; + result = gsi_alloc_channel(&gsi_channel_props, + ipa3_ctx->gsi_dev_hdl, + &chan_hdl[i]); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("failed to alloc channel %d err %d\n", + i, result); + return result; + } + } + + /* allocate channel 20 */ + gsi_channel_props.ch_id = IPA_GSI_CH_20_WA_VIRT_CHAN; + result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl, + &chan_hdl_to_keep); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("failed to alloc channel %d err %d\n", + i, result); + return result; + } + + /* release all other channels */ + for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) { + result = gsi_dealloc_channel(chan_hdl[i]); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("failed to dealloc channel %d err %d\n", + i, result); + return result; + } + } + + /* DMA memory shall not be freed as it is used by channel 20 */ + return 0; +} + +/** + * ipa_adjust_ra_buff_base_sz() + * + * Return value: the largest power of two which is smaller + * than the input value + */ +static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit) +{ + aggr_byte_limit += IPA_MTU; + aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT; + aggr_byte_limit--; + aggr_byte_limit |= aggr_byte_limit >> 1; + aggr_byte_limit |= aggr_byte_limit >> 2; + aggr_byte_limit |= aggr_byte_limit >> 4; + aggr_byte_limit |= aggr_byte_limit >> 8; + aggr_byte_limit |= aggr_byte_limit >> 16; + aggr_byte_limit++; + return aggr_byte_limit >> 1; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c new file mode 100644 index 000000000000..fa18001d9e80 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c @@ -0,0 +1,862 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include "ipa_i.h" +#include "ipa_emulation_stubs.h" + +# undef strsame +# define strsame(x, y) \ + (!strcmp((x), (y))) + +/* + * The following enum values used to index tables below. + */ +enum dtsi_index_e { + DTSI_INDEX_3_5_1 = 0, + DTSI_INDEX_4_0 = 1, + DTSI_INDEX_4_5 = 2, +}; + +struct dtsi_replacement_u32 { + char *key; + u32 value; +}; + +struct dtsi_replacement_u32_table { + struct dtsi_replacement_u32 *p_table; + u32 num_entries; +}; + +struct dtsi_replacement_bool { + char *key; + bool value; +}; + +struct dtsi_replacement_bool_table { + struct dtsi_replacement_bool *p_table; + u32 num_entries; +}; + +struct dtsi_replacement_u32_array { + char *key; + u32 *p_value; + u32 num_elements; +}; + +struct dtsi_replacement_u32_array_table { + struct dtsi_replacement_u32_array *p_table; + u32 num_entries; +}; + +struct dtsi_replacement_resource_table { + struct resource *p_table; + u32 num_entries; +}; + +/* + * Any of the data below with _4_5 in the name represent data taken + * from the 4.5 dtsi file. + * + * Any of the data below with _4_0 in the name represent data taken + * from the 4.0 dtsi file. + * + * Any of the data below with _3_5_1 in the name represent data taken + * from the 3.5.1 dtsi file. + */ +static struct dtsi_replacement_bool ipa3_plat_drv_bool_4_5[] = { + {"qcom,use-ipa-tethering-bridge", true}, + {"qcom,modem-cfg-emb-pipe-flt", true}, + {"qcom,ipa-wdi2", false}, + {"qcom,use-64-bit-dma-mask", false}, + {"qcom,bandwidth-vote-for-ipa", true}, + {"qcom,skip-uc-pipe-reset", false}, + {"qcom,tethered-flow-control", false}, + {"qcom,use-rg10-limitation-mitigation", false}, + {"qcom,do-not-use-ch-gsi-20", false}, + {"qcom,use-ipa-pm", true}, +}; + +static struct dtsi_replacement_bool ipa3_plat_drv_bool_4_0[] = { + {"qcom,use-ipa-tethering-bridge", true}, + {"qcom,modem-cfg-emb-pipe-flt", true}, + {"qcom,ipa-wdi2", true}, + {"qcom,use-64-bit-dma-mask", false}, + {"qcom,bandwidth-vote-for-ipa", false}, + {"qcom,skip-uc-pipe-reset", false}, + {"qcom,tethered-flow-control", true}, + {"qcom,use-rg10-limitation-mitigation", false}, + {"qcom,do-not-use-ch-gsi-20", false}, + {"qcom,use-ipa-pm", false}, +}; + +static struct dtsi_replacement_bool ipa3_plat_drv_bool_3_5_1[] = { + {"qcom,use-ipa-tethering-bridge", true}, + {"qcom,modem-cfg-emb-pipe-flt", true}, + {"qcom,ipa-wdi2", true}, + {"qcom,use-64-bit-dma-mask", false}, + {"qcom,bandwidth-vote-for-ipa", true}, + {"qcom,skip-uc-pipe-reset", false}, + {"qcom,tethered-flow-control", false}, + {"qcom,use-rg10-limitation-mitigation", false}, + {"qcom,do-not-use-ch-gsi-20", false}, + {"qcom,use-ipa-pm", false}, +}; + +static struct dtsi_replacement_bool_table +ipa3_plat_drv_bool_table[] = { + { ipa3_plat_drv_bool_3_5_1, + ARRAY_SIZE(ipa3_plat_drv_bool_3_5_1) }, + { ipa3_plat_drv_bool_4_0, + ARRAY_SIZE(ipa3_plat_drv_bool_4_0) }, + { ipa3_plat_drv_bool_4_5, + ARRAY_SIZE(ipa3_plat_drv_bool_4_5) }, +}; + +static struct dtsi_replacement_u32 ipa3_plat_drv_u32_4_5[] = { + {"qcom,ipa-hw-ver", IPA_HW_v4_5}, + {"qcom,ipa-hw-mode", 3}, + {"qcom,wan-rx-ring-size", 192}, + {"qcom,lan-rx-ring-size", 192}, + {"qcom,ee", 0}, + {"qcom,msm-bus,num-cases", 5}, + {"emulator-bar0-offset", 0x01C00000}, +}; + +static struct dtsi_replacement_u32 ipa3_plat_drv_u32_4_0[] = { + {"qcom,ipa-hw-ver", IPA_HW_v4_0}, + {"qcom,ipa-hw-mode", 3}, + {"qcom,wan-rx-ring-size", 192}, + {"qcom,lan-rx-ring-size", 192}, + {"qcom,ee", 0}, + {"emulator-bar0-offset", 0x01C00000}, +}; + +static struct dtsi_replacement_u32 ipa3_plat_drv_u32_3_5_1[] = { + {"qcom,ipa-hw-ver", IPA_HW_v3_5_1}, + {"qcom,ipa-hw-mode", 3}, + {"qcom,wan-rx-ring-size", 192}, + {"qcom,lan-rx-ring-size", 192}, + {"qcom,ee", 0}, + {"emulator-bar0-offset", 0x01C00000}, +}; + +static struct dtsi_replacement_u32_table ipa3_plat_drv_u32_table[] = { + { ipa3_plat_drv_u32_3_5_1, + ARRAY_SIZE(ipa3_plat_drv_u32_3_5_1) }, + { ipa3_plat_drv_u32_4_0, + ARRAY_SIZE(ipa3_plat_drv_u32_4_0) }, + { ipa3_plat_drv_u32_4_5, + ARRAY_SIZE(ipa3_plat_drv_u32_4_5) }, +}; + +static u32 mhi_event_ring_id_limits_array_4_5[] = { + 9, 10 +}; + +static u32 mhi_event_ring_id_limits_array_4_0[] = { + 9, 10 +}; + +static u32 mhi_event_ring_id_limits_array_3_5_1[] = { + IPA_MHI_GSI_EVENT_RING_ID_START, IPA_MHI_GSI_EVENT_RING_ID_END +}; + +static u32 ipa_tz_unlock_reg_array_4_5[] = { + 0x04043583c, 0x00001000 +}; + +static u32 ipa_throughput_thresh_array_4_5[] = { + 310, 600, 1000 +}; + +static u32 ipa_tz_unlock_reg_array_4_0[] = { + 0x04043583c, 0x00001000 +}; + +static u32 ipa_tz_unlock_reg_array_3_5_1[] = { + 0x04043583c, 0x00001000 +}; + +struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_4_5[] = { + {"qcom,mhi-event-ring-id-limits", + mhi_event_ring_id_limits_array_4_5, + ARRAY_SIZE(mhi_event_ring_id_limits_array_4_5) }, + {"qcom,ipa-tz-unlock-reg", + ipa_tz_unlock_reg_array_4_5, + ARRAY_SIZE(ipa_tz_unlock_reg_array_4_5) }, + {"qcom,throughput-threshold", + ipa_throughput_thresh_array_4_5, + ARRAY_SIZE(ipa_throughput_thresh_array_4_5) }, +}; + +struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_4_0[] = { + {"qcom,mhi-event-ring-id-limits", + mhi_event_ring_id_limits_array_4_0, + ARRAY_SIZE(mhi_event_ring_id_limits_array_4_0) }, + {"qcom,ipa-tz-unlock-reg", + ipa_tz_unlock_reg_array_4_0, + ARRAY_SIZE(ipa_tz_unlock_reg_array_4_0) }, +}; + +struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_3_5_1[] = { + {"qcom,mhi-event-ring-id-limits", + mhi_event_ring_id_limits_array_3_5_1, + ARRAY_SIZE(mhi_event_ring_id_limits_array_3_5_1) }, + {"qcom,ipa-tz-unlock-reg", + ipa_tz_unlock_reg_array_3_5_1, + ARRAY_SIZE(ipa_tz_unlock_reg_array_3_5_1) }, +}; + +struct dtsi_replacement_u32_array_table +ipa3_plat_drv_u32_array_table[] = { + { ipa3_plat_drv_u32_array_3_5_1, + ARRAY_SIZE(ipa3_plat_drv_u32_array_3_5_1) }, + { ipa3_plat_drv_u32_array_4_0, + ARRAY_SIZE(ipa3_plat_drv_u32_array_4_0) }, + { ipa3_plat_drv_u32_array_4_5, + ARRAY_SIZE(ipa3_plat_drv_u32_array_4_5) }, +}; + +#define INTCTRL_OFFSET 0x083C0000 +#define INTCTRL_SIZE 0x00000110 + +#define IPA_BASE_OFFSET_4_5 0x01e00000 +#define IPA_BASE_SIZE_4_5 0x000c0000 +#define GSI_BASE_OFFSET_4_5 0x01e04000 +#define GSI_BASE_SIZE_4_5 0x00023000 + +struct resource ipa3_plat_drv_resource_4_5[] = { + /* + * PLEASE NOTE: The following offset values below ("ipa-base", + * "gsi-base", and "intctrl-base") are used to calculate + * offsets relative to the PCI BAR0 address provided by the + * PCI probe. After their use to calculate the offsets, they + * are not used again, since PCI ultimately dictates where + * things live. + */ + { + IPA_BASE_OFFSET_4_5, + (IPA_BASE_OFFSET_4_5 + IPA_BASE_SIZE_4_5), + "ipa-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + GSI_BASE_OFFSET_4_5, + (GSI_BASE_OFFSET_4_5 + GSI_BASE_SIZE_4_5), + "gsi-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + /* + * The following entry is germane only to the emulator + * environment. It is needed to locate the emulator's PCI + * interrupt controller... + */ + { + INTCTRL_OFFSET, + (INTCTRL_OFFSET + INTCTRL_SIZE), + "intctrl-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + IPA_PIPE_MEM_START_OFST, + (IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE), + "ipa-pipe-mem", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + 0, + 0, + "gsi-irq", + IORESOURCE_IRQ, + 0, + NULL, + NULL, + NULL + }, + + { + 0, + 0, + "ipa-irq", + IORESOURCE_IRQ, + 0, + NULL, + NULL, + NULL + }, +}; + +#define IPA_BASE_OFFSET_4_0 0x01e00000 +#define IPA_BASE_SIZE_4_0 0x00034000 +#define GSI_BASE_OFFSET_4_0 0x01e04000 +#define GSI_BASE_SIZE_4_0 0x00028000 + +struct resource ipa3_plat_drv_resource_4_0[] = { + /* + * PLEASE NOTE: The following offset values below ("ipa-base", + * "gsi-base", and "intctrl-base") are used to calculate + * offsets relative to the PCI BAR0 address provided by the + * PCI probe. After their use to calculate the offsets, they + * are not used again, since PCI ultimately dictates where + * things live. + */ + { + IPA_BASE_OFFSET_4_0, + (IPA_BASE_OFFSET_4_0 + IPA_BASE_SIZE_4_0), + "ipa-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + GSI_BASE_OFFSET_4_0, + (GSI_BASE_OFFSET_4_0 + GSI_BASE_SIZE_4_0), + "gsi-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + /* + * The following entry is germane only to the emulator + * environment. It is needed to locate the emulator's PCI + * interrupt controller... + */ + { + INTCTRL_OFFSET, + (INTCTRL_OFFSET + INTCTRL_SIZE), + "intctrl-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + IPA_PIPE_MEM_START_OFST, + (IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE), + "ipa-pipe-mem", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + 0, + 0, + "gsi-irq", + IORESOURCE_IRQ, + 0, + NULL, + NULL, + NULL + }, + + { + 0, + 0, + "ipa-irq", + IORESOURCE_IRQ, + 0, + NULL, + NULL, + NULL + }, +}; + +#define IPA_BASE_OFFSET_3_5_1 0x01e00000 +#define IPA_BASE_SIZE_3_5_1 0x00034000 +#define GSI_BASE_OFFSET_3_5_1 0x01e04000 +#define GSI_BASE_SIZE_3_5_1 0x0002c000 + +struct resource ipa3_plat_drv_resource_3_5_1[] = { + /* + * PLEASE NOTE: The following offset values below ("ipa-base", + * "gsi-base", and "intctrl-base") are used to calculate + * offsets relative to the PCI BAR0 address provided by the + * PCI probe. After their use to calculate the offsets, they + * are not used again, since PCI ultimately dictates where + * things live. + */ + { + IPA_BASE_OFFSET_3_5_1, + (IPA_BASE_OFFSET_3_5_1 + IPA_BASE_SIZE_3_5_1), + "ipa-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + GSI_BASE_OFFSET_3_5_1, + (GSI_BASE_OFFSET_3_5_1 + GSI_BASE_SIZE_3_5_1), + "gsi-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + /* + * The following entry is germane only to the emulator + * environment. It is needed to locate the emulator's PCI + * interrupt controller... + */ + { + INTCTRL_OFFSET, + (INTCTRL_OFFSET + INTCTRL_SIZE), + "intctrl-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + IPA_PIPE_MEM_START_OFST, + (IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE), + "ipa-pipe-mem", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + 0, + 0, + "gsi-irq", + IORESOURCE_IRQ, + 0, + NULL, + NULL, + NULL + }, + + { + 0, + 0, + "ipa-irq", + IORESOURCE_IRQ, + 0, + NULL, + NULL, + NULL + }, +}; + +struct dtsi_replacement_resource_table +ipa3_plat_drv_resource_table[] = { + { ipa3_plat_drv_resource_3_5_1, + ARRAY_SIZE(ipa3_plat_drv_resource_3_5_1) }, + { ipa3_plat_drv_resource_4_0, + ARRAY_SIZE(ipa3_plat_drv_resource_4_0) }, + { ipa3_plat_drv_resource_4_5, + ARRAY_SIZE(ipa3_plat_drv_resource_4_5) }, +}; + +/* + * The following code uses the data above... + */ +static u32 emulator_type_to_index(void) +{ + /* + * Use the input parameter to the IPA driver loadable module, + * which specifies the type of hardware the driver is running + * on. + */ + u32 index = DTSI_INDEX_4_0; + uint emulation_type = ipa3_get_emulation_type(); + + switch (emulation_type) { + case IPA_HW_v3_5_1: + index = DTSI_INDEX_3_5_1; + break; + case IPA_HW_v4_0: + index = DTSI_INDEX_4_0; + break; + case IPA_HW_v4_5: + index = DTSI_INDEX_4_5; + break; + default: + break; + } + + IPADBG("emulation_type(%u) emulation_index(%u)\n", + emulation_type, index); + + return index; +} + +/* From include/linux/of.h */ +/** + * emulator_of_property_read_bool - Find from a property + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * + * Search for a property in a device node. + * Returns true if the property exists false otherwise. + */ +bool emulator_of_property_read_bool( + const struct device_node *np, + const char *propname) +{ + u16 i; + u32 index; + struct dtsi_replacement_bool *ipa3_plat_drv_boolP; + + /* + * Get the index for the type of hardware we're running on. + * This is used as a table index. + */ + index = emulator_type_to_index(); + if (index >= ARRAY_SIZE(ipa3_plat_drv_bool_table)) { + IPADBG( + "Did not find ipa3_plat_drv_bool_table for index %u\n", + index); + return false; + } + + ipa3_plat_drv_boolP = + ipa3_plat_drv_bool_table[index].p_table; + + for (i = 0; + i < ipa3_plat_drv_bool_table[index].num_entries; + i++) { + if (strsame(ipa3_plat_drv_boolP[i].key, propname)) { + IPADBG( + "Found value %u for propname %s index %u\n", + ipa3_plat_drv_boolP[i].value, + propname, + index); + return ipa3_plat_drv_boolP[i].value; + } + } + + IPADBG("Did not find match for propname %s index %u\n", + propname, + index); + + return false; +} + +/* From include/linux/of.h */ +int emulator_of_property_read_u32( + const struct device_node *np, + const char *propname, + u32 *out_value) +{ + u16 i; + u32 index; + struct dtsi_replacement_u32 *ipa3_plat_drv_u32P; + + /* + * Get the index for the type of hardware we're running on. + * This is used as a table index. + */ + index = emulator_type_to_index(); + if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_table)) { + IPADBG( + "Did not find ipa3_plat_drv_u32_table for index %u\n", + index); + return false; + } + + ipa3_plat_drv_u32P = + ipa3_plat_drv_u32_table[index].p_table; + + for (i = 0; + i < ipa3_plat_drv_u32_table[index].num_entries; + i++) { + if (strsame(ipa3_plat_drv_u32P[i].key, propname)) { + *out_value = ipa3_plat_drv_u32P[i].value; + IPADBG( + "Found value %u for propname %s index %u\n", + ipa3_plat_drv_u32P[i].value, + propname, + index); + return 0; + } + } + + IPADBG("Did not find match for propname %s index %u\n", + propname, + index); + + return -EINVAL; +} + +/* From include/linux/of.h */ +/** + * emulator_of_property_read_u32_array - Find and read an array of 32 + * bit integers from a property. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_values: pointer to return value, modified only if return value is 0. + * @sz: number of array elements to read + * + * Search for a property in a device node and read 32-bit value(s) from + * it. Returns 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * The out_values is modified only if a valid u32 value can be decoded. + */ +int emulator_of_property_read_u32_array( + const struct device_node *np, + const char *propname, + u32 *out_values, + size_t sz) +{ + u16 i; + u32 index; + struct dtsi_replacement_u32_array *u32_arrayP; + + /* + * Get the index for the type of hardware we're running on. + * This is used as a table index. + */ + index = emulator_type_to_index(); + if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_array_table)) { + IPADBG( + "Did not find ipa3_plat_drv_u32_array_table for index %u\n", + index); + return false; + } + + u32_arrayP = + ipa3_plat_drv_u32_array_table[index].p_table; + for (i = 0; + i < ipa3_plat_drv_u32_array_table[index].num_entries; + i++) { + if (strsame( + u32_arrayP[i].key, propname)) { + u32 num_elements = + u32_arrayP[i].num_elements; + u32 *p_element = + &u32_arrayP[i].p_value[0]; + size_t j = 0; + + if (num_elements > sz) { + IPAERR( + "Found array of %u values for propname %s; only room for %u elements in copy buffer\n", + num_elements, + propname, + (unsigned int) sz); + return -EOVERFLOW; + } + + while (j++ < num_elements) + *out_values++ = *p_element++; + + IPADBG( + "Found array of values starting with %u for propname %s index %u\n", + u32_arrayP[i].p_value[0], + propname, + index); + + return 0; + } + } + + IPADBG("Did not find match for propname %s index %u\n", + propname, + index); + + return -EINVAL; +} + +/* From drivers/base/platform.c */ +/** + * emulator_platform_get_resource_byname - get a resource for a device by name + * @dev: platform device + * @type: resource type + * @name: resource name + */ +struct resource *emulator_platform_get_resource_byname( + struct platform_device *dev, + unsigned int type, + const char *name) +{ + u16 i; + u32 index; + struct resource *ipa3_plat_drv_resourceP; + + /* + * Get the index for the type of hardware we're running on. + * This is used as a table index. + */ + index = emulator_type_to_index(); + if (index >= ARRAY_SIZE(ipa3_plat_drv_resource_table)) { + IPADBG( + "Did not find ipa3_plat_drv_resource_table for index %u\n", + index); + return false; + } + + ipa3_plat_drv_resourceP = + ipa3_plat_drv_resource_table[index].p_table; + for (i = 0; + i < ipa3_plat_drv_resource_table[index].num_entries; + i++) { + struct resource *r = &ipa3_plat_drv_resourceP[i]; + + if (type == resource_type(r) && strsame(r->name, name)) { + IPADBG( + "Found start 0x%x size %u for name %s index %u\n", + (unsigned int) (r->start), + (unsigned int) (resource_size(r)), + name, + index); + return r; + } + } + + IPADBG("Did not find match for name %s index %u\n", + name, + index); + + return NULL; +} + +/* From drivers/of/base.c */ +/** + * emulator_of_property_count_elems_of_size - Count the number of + * elements in a property + * + * @np: device node from which the property value is to + * be read. Not used. + * @propname: name of the property to be searched. + * @elem_size: size of the individual element + * + * Search for a property and count the number of elements of size + * elem_size in it. Returns number of elements on success, -EINVAL if + * the property does not exist or its length does not match a multiple + * of elem_size and -ENODATA if the property does not have a value. + */ +int emulator_of_property_count_elems_of_size( + const struct device_node *np, + const char *propname, + int elem_size) +{ + u32 index; + + /* + * Get the index for the type of hardware we're running on. + * This is used as a table index. + */ + index = emulator_type_to_index(); + + /* + * Use elem_size to determine which table to search for the + * specified property name + */ + if (elem_size == sizeof(u32)) { + u16 i; + struct dtsi_replacement_u32_array *u32_arrayP; + + if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_array_table)) { + IPADBG( + "Did not find ipa3_plat_drv_u32_array_table for index %u\n", + index); + return false; + } + + u32_arrayP = + ipa3_plat_drv_u32_array_table[index].p_table; + + for (i = 0; + i < ipa3_plat_drv_u32_array_table[index].num_entries; + i++) { + if (strsame(u32_arrayP[i].key, propname)) { + if (u32_arrayP[i].p_value == NULL) { + IPADBG( + "Found no elements for propname %s index %u\n", + propname, + index); + return -ENODATA; + } + + IPADBG( + "Found %u elements for propname %s index %u\n", + u32_arrayP[i].num_elements, + propname, + index); + + return u32_arrayP[i].num_elements; + } + } + + IPADBG( + "Found no match in table with elem_size %d for propname %s index %u\n", + elem_size, + propname, + index); + + return -EINVAL; + } + + IPAERR( + "Found no tables with element size %u to search for propname %s index %u\n", + elem_size, + propname, + index); + + return -EINVAL; +} + +int emulator_of_property_read_variable_u32_array( + const struct device_node *np, + const char *propname, + u32 *out_values, + size_t sz_min, + size_t sz_max) +{ + return emulator_of_property_read_u32_array( + np, propname, out_values, sz_max); +} + +resource_size_t emulator_resource_size(const struct resource *res) +{ + return res->end - res->start + 1; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h b/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h new file mode 100644 index 000000000000..d0cda1c60a42 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#if !defined(_IPA_EMULATION_STUBS_H_) +# define _IPA_EMULATION_STUBS_H_ + +# define outer_flush_range(x, y) +# define __flush_dcache_area(x, y) +# define __cpuc_flush_dcache_area(x, y) __flush_dcache_area(x, y) + +/* Point several API calls to these new EMULATION functions */ +# define of_property_read_bool(np, propname) \ + emulator_of_property_read_bool(NULL, propname) +# define of_property_read_u32(np, propname, out_value) \ + emulator_of_property_read_u32(NULL, propname, out_value) +# define of_property_read_u32_array(np, propname, out_values, sz) \ + emulator_of_property_read_u32_array(NULL, propname, out_values, sz) +# define platform_get_resource_byname(dev, type, name) \ + emulator_platform_get_resource_byname(NULL, type, name) +# define of_property_count_elems_of_size(np, propname, elem_size) \ + emulator_of_property_count_elems_of_size(NULL, propname, elem_size) +# define of_property_read_variable_u32_array( \ + np, propname, out_values, sz_min, sz_max) \ + emulator_of_property_read_variable_u32_array( \ + NULL, propname, out_values, sz_min, sz_max) +# define resource_size(res) \ + emulator_resource_size(res) + +/** + * emulator_of_property_read_bool - Findfrom a property + * @np: device node used to find the property value. (not used) + * @propname: name of the property to be searched. + * + * Search for a property in a device node. + * Returns true if the property exists false otherwise. + */ +bool emulator_of_property_read_bool( + const struct device_node *np, + const char *propname); + +int emulator_of_property_read_u32( + const struct device_node *np, + const char *propname, + u32 *out_value); + +/** + * emulator_of_property_read_u32_array - Find and read an array of 32 + * bit integers from a property. + * + * @np: device node used to find the property value. (not used) + * @propname: name of the property to be searched. + * @out_values: pointer to return value, modified only if return value is 0. + * @sz: number of array elements to read + * + * Search for a property in a device node and read 32-bit value(s) from + * it. Returns 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * The out_values is modified only if a valid u32 value can be decoded. + */ +int emulator_of_property_read_u32_array( + const struct device_node *np, + const char *propname, + u32 *out_values, + size_t sz); + +/** + * emulator_platform_get_resource_byname - get a resource for a device + * by name + * + * @dev: platform device + * @type: resource type + * @name: resource name + */ +struct resource *emulator_platform_get_resource_byname( + struct platform_device *dev, + unsigned int type, + const char *name); + +/** + * emulator_of_property_count_elems_of_size - Count the number of + * elements in a property + * + * @np: device node used to find the property value. (not used) + * @propname: name of the property to be searched. + * @elem_size: size of the individual element + * + * Search for a property and count the number of elements of size + * elem_size in it. Returns number of elements on success, -EINVAL if + * the property does not exist or its length does not match a multiple + * of elem_size and -ENODATA if the property does not have a value. + */ +int emulator_of_property_count_elems_of_size( + const struct device_node *np, + const char *propname, + int elem_size); + +int emulator_of_property_read_variable_u32_array( + const struct device_node *np, + const char *propname, + u32 *out_values, + size_t sz_min, + size_t sz_max); + +resource_size_t emulator_resource_size( + const struct resource *res); + +static inline bool is_device_dma_coherent(struct device *dev) +{ + return false; +} + +static inline phys_addr_t qcom_smem_virt_to_phys(void *addr) +{ + return 0; +} + +#endif /* #if !defined(_IPA_EMULATION_STUBS_H_) */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c new file mode 100644 index 000000000000..8a7c8d86febe --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c @@ -0,0 +1,1733 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#include "ipa_i.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_fltrt.h" + +#define IPA_FLT_STATUS_OF_ADD_FAILED (-1) +#define IPA_FLT_STATUS_OF_DEL_FAILED (-1) +#define IPA_FLT_STATUS_OF_MDFY_FAILED (-1) + +#define IPA_FLT_GET_RULE_TYPE(__entry) \ + ( \ + ((__entry)->rule.hashable) ? \ + (IPA_RULE_HASHABLE):(IPA_RULE_NON_HASHABLE) \ + ) + +/** + * ipa3_generate_flt_hw_rule() - generates the filtering hardware rule + * @ip: the ip address family type + * @entry: filtering entry + * @buf: output buffer, buf == NULL means + * caller wants to know the size of the rule as seen + * by HW so they did not pass a valid buffer, we will use a + * scratch buffer instead. + * With this scheme we are going to + * generate the rule twice, once to know size using scratch + * buffer and second to write the rule to the actual caller + * supplied buffer which is of required size + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip, + struct ipa3_flt_entry *entry, u8 *buf) +{ + struct ipahal_flt_rule_gen_params gen_params; + int res = 0; + + memset(&gen_params, 0, sizeof(gen_params)); + + if (entry->rule.hashable) { + if (entry->rule.attrib.attrib_mask & IPA_FLT_IS_PURE_ACK + && !entry->rule.eq_attrib_type) { + IPAERR_RL("PURE_ACK rule atrb used with hash rule\n"); + WARN_ON_RATELIMIT_IPA(1); + return -EPERM; + } + /* + * tos_eq_present field has two meanings: + * tos equation for IPA ver < 4.5 (as the field name reveals) + * pure_ack equation for IPA ver >= 4.5 + */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 && + entry->rule.eq_attrib_type && + entry->rule.eq_attrib.tos_eq_present) { + IPAERR_RL("PURE_ACK rule eq used with hash rule\n"); + return -EPERM; + } + } + + gen_params.ipt = ip; + if (entry->rt_tbl) + gen_params.rt_tbl_idx = entry->rt_tbl->idx; + else + gen_params.rt_tbl_idx = entry->rule.rt_tbl_idx; + + gen_params.priority = entry->prio; + gen_params.id = entry->rule_id; + gen_params.rule = (const struct ipa_flt_rule *)&entry->rule; + + res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf); + if (res) { + IPAERR_RL("failed to generate flt h/w rule\n"); + return res; + } + + return 0; +} + +static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt) +{ + struct ipa3_flt_tbl *tbl; + int i; + + IPADBG_LOW("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt); + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa_is_ep_support_flt(i)) + continue; + + tbl = &ipa3_ctx->flt_tbl[i][ip]; + if (tbl->prev_mem[rlt].phys_base) { + IPADBG_LOW("reaping flt tbl (prev) pipe=%d\n", i); + ipahal_free_dma_mem(&tbl->prev_mem[rlt]); + } + + if (list_empty(&tbl->head_flt_rule_list)) { + if (tbl->curr_mem[rlt].phys_base) { + IPADBG_LOW("reaping flt tbl (curr) pipe=%d\n", + i); + ipahal_free_dma_mem(&tbl->curr_mem[rlt]); + } + } + } +} + +/** + * ipa_prep_flt_tbl_for_cmt() - preparing the flt table for commit + * assign priorities to the rules, calculate their sizes and calculate + * the overall table size + * @ip: the ip address family type + * @tbl: the flt tbl to be prepared + * @pipe_idx: the ep pipe appropriate for the given tbl + * + * Return: 0 on success, negative on failure + */ +static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip, + struct ipa3_flt_tbl *tbl, int pipe_idx) +{ + struct ipa3_flt_entry *entry; + int prio_i; + int max_prio; + u32 hdr_width; + + tbl->sz[IPA_RULE_HASHABLE] = 0; + tbl->sz[IPA_RULE_NON_HASHABLE] = 0; + + max_prio = ipahal_get_rule_max_priority(); + + prio_i = max_prio; + list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { + + if (entry->rule.max_prio) { + entry->prio = max_prio; + } else { + if (ipahal_rule_decrease_priority(&prio_i)) { + IPAERR("cannot decrease rule priority - %d\n", + prio_i); + return -EPERM; + } + entry->prio = prio_i; + } + + if (ipa3_generate_flt_hw_rule(ip, entry, NULL)) { + IPAERR("failed to calculate HW FLT rule size\n"); + return -EPERM; + } + IPADBG_LOW("pipe %d rule_id(handle) %u hw_len %d priority %u\n", + pipe_idx, entry->rule_id, entry->hw_len, entry->prio); + + if (entry->rule.hashable) + tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len; + else + tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len; + } + + if ((tbl->sz[IPA_RULE_HASHABLE] + + tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) { + IPADBG_LOW("flt tbl pipe %d is with zero total size\n", + pipe_idx); + return 0; + } + + hdr_width = ipahal_get_hw_tbl_hdr_width(); + + /* for the header word */ + if (tbl->sz[IPA_RULE_HASHABLE]) + tbl->sz[IPA_RULE_HASHABLE] += hdr_width; + if (tbl->sz[IPA_RULE_NON_HASHABLE]) + tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width; + + IPADBG_LOW("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx, + tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]); + + return 0; +} + +/** + * ipa_translate_flt_tbl_to_hw_fmt() - translate the flt driver structures + * (rules and tables) to HW format and fill it in the given buffers + * @ip: the ip address family type + * @rlt: the type of the rules to translate (hashable or non-hashable) + * @base: the rules body buffer to be filled + * @hdr: the rules header (addresses/offsets) buffer to be filled + * @body_ofst: the offset of the rules body from the rules header at + * ipa sram + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip, + enum ipa_rule_type rlt, u8 *base, u8 *hdr, u32 body_ofst) +{ + u64 offset; + u8 *body_i; + int res; + struct ipa3_flt_entry *entry; + u8 *tbl_mem_buf; + struct ipa_mem_buffer tbl_mem; + struct ipa3_flt_tbl *tbl; + int i; + int hdr_idx = 0; + + body_i = base; + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa_is_ep_support_flt(i)) + continue; + tbl = &ipa3_ctx->flt_tbl[i][ip]; + if (tbl->sz[rlt] == 0) { + hdr_idx++; + continue; + } + if (tbl->in_sys[rlt]) { + /* only body (no header) */ + tbl_mem.size = tbl->sz[rlt] - + ipahal_get_hw_tbl_hdr_width(); + if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) { + IPAERR("fail to alloc sys tbl of size %d\n", + tbl_mem.size); + goto err; + } + + if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base, + hdr, hdr_idx, true)) { + IPAERR("fail to wrt sys tbl addr to hdr\n"); + goto hdr_update_fail; + } + + tbl_mem_buf = tbl_mem.base; + + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_flt_rule_list, + link) { + if (IPA_FLT_GET_RULE_TYPE(entry) != rlt) + continue; + res = ipa3_generate_flt_hw_rule( + ip, entry, tbl_mem_buf); + if (res) { + IPAERR("failed to gen HW FLT rule\n"); + goto hdr_update_fail; + } + tbl_mem_buf += entry->hw_len; + } + + if (tbl->curr_mem[rlt].phys_base) { + WARN_ON(tbl->prev_mem[rlt].phys_base); + tbl->prev_mem[rlt] = tbl->curr_mem[rlt]; + } + tbl->curr_mem[rlt] = tbl_mem; + } else { + offset = body_i - base + body_ofst; + + /* update the hdr at the right index */ + if (ipahal_fltrt_write_addr_to_hdr(offset, hdr, + hdr_idx, true)) { + IPAERR("fail to wrt lcl tbl ofst to hdr\n"); + goto hdr_update_fail; + } + + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_flt_rule_list, + link) { + if (IPA_FLT_GET_RULE_TYPE(entry) != rlt) + continue; + res = ipa3_generate_flt_hw_rule( + ip, entry, body_i); + if (res) { + IPAERR("failed to gen HW FLT rule\n"); + goto err; + } + body_i += entry->hw_len; + } + + /** + * advance body_i to next table alignment as local + * tables are order back-to-back + */ + body_i += ipahal_get_lcl_tbl_addr_alignment(); + body_i = (u8 *)((long)body_i & + ~ipahal_get_lcl_tbl_addr_alignment()); + } + hdr_idx++; + } + + return 0; + +hdr_update_fail: + ipahal_free_dma_mem(&tbl_mem); +err: + return -EPERM; +} + +/** + * ipa_generate_flt_hw_tbl_img() - generates the flt hw tbls. + * headers and bodies are being created into buffers that will be filled into + * the local memory (sram) + * @ip: the ip address family type + * @alloc_params: In and Out parameters for the allocations of the buffers + * 4 buffers: hdr and bdy, each hashable and non-hashable + * + * Return: 0 on success, negative on failure + */ +static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip, + struct ipahal_fltrt_alloc_imgs_params *alloc_params) +{ + u32 hash_bdy_start_ofst, nhash_bdy_start_ofst; + int rc = 0; + + if (ip == IPA_IP_v4) { + nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_nhash_ofst) - + IPA_MEM_PART(v4_flt_nhash_ofst); + hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_hash_ofst) - + IPA_MEM_PART(v4_flt_hash_ofst); + } else { + nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_nhash_ofst) - + IPA_MEM_PART(v6_flt_nhash_ofst); + hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_hash_ofst) - + IPA_MEM_PART(v6_flt_hash_ofst); + } + + if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) { + IPAERR_RL("fail to allocate FLT HW TBL images. IP %d\n", ip); + rc = -ENOMEM; + goto allocate_failed; + } + + if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE, + alloc_params->hash_bdy.base, alloc_params->hash_hdr.base, + hash_bdy_start_ofst)) { + IPAERR_RL("fail to translate hashable flt tbls to hw format\n"); + rc = -EPERM; + goto translate_fail; + } + if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE, + alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base, + nhash_bdy_start_ofst)) { + IPAERR_RL("fail to translate non-hash flt tbls to hw format\n"); + rc = -EPERM; + goto translate_fail; + } + + return rc; + +translate_fail: + if (alloc_params->hash_hdr.size) + ipahal_free_dma_mem(&alloc_params->hash_hdr); + ipahal_free_dma_mem(&alloc_params->nhash_hdr); + if (alloc_params->hash_bdy.size) + ipahal_free_dma_mem(&alloc_params->hash_bdy); + if (alloc_params->nhash_bdy.size) + ipahal_free_dma_mem(&alloc_params->nhash_bdy); +allocate_failed: + return rc; +} + +/** + * ipa_flt_valid_lcl_tbl_size() - validate if the space allocated for flt + * tbl bodies at the sram is enough for the commit + * @ipt: the ip address family type + * @rlt: the rule type (hashable or non-hashable) + * + * Return: true if enough space available or false in other cases + */ +static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt, + enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy) +{ + u16 avail; + + if (!bdy) { + IPAERR("Bad parameters, bdy = NULL\n"); + return false; + } + + if (ipt == IPA_IP_v4) + avail = (rlt == IPA_RULE_HASHABLE) ? + IPA_MEM_PART(apps_v4_flt_hash_size) : + IPA_MEM_PART(apps_v4_flt_nhash_size); + else + avail = (rlt == IPA_RULE_HASHABLE) ? + IPA_MEM_PART(apps_v6_flt_hash_size) : + IPA_MEM_PART(apps_v6_flt_nhash_size); + + if (bdy->size <= avail) + return true; + + IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n", + bdy->size, avail, ipt, rlt); + return false; +} + +/** + * ipa_flt_alloc_cmd_buffers() - alloc descriptors and imm cmds + * payload pointers buffers for headers and bodies of flt structure + * as well as place for flush imm. + * @ipt: the ip address family type + * @entries: the number of entries + * @desc: [OUT] descriptor buffer + * @cmd: [OUT] imm commands payload pointers buffer + * + * Return: 0 on success, negative on failure + */ +static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip, u16 entries, + struct ipa3_desc **desc, struct ipahal_imm_cmd_pyld ***cmd_pyld) +{ + *desc = kcalloc(entries, sizeof(**desc), GFP_ATOMIC); + if (*desc == NULL) { + IPAERR("fail to alloc desc blob ip %d\n", ip); + goto fail_desc_alloc; + } + + *cmd_pyld = kcalloc(entries, sizeof(**cmd_pyld), GFP_ATOMIC); + if (*cmd_pyld == NULL) { + IPAERR("fail to alloc cmd pyld blob ip %d\n", ip); + goto fail_cmd_alloc; + } + + return 0; + +fail_cmd_alloc: + kfree(*desc); +fail_desc_alloc: + return -ENOMEM; +} + +/** + * ipa_flt_skip_pipe_config() - skip ep flt configuration or not? + * will skip according to pre-configuration or modem pipes + * @pipe: the EP pipe index + * + * Return: true if to skip, false otherwize + */ +static bool ipa_flt_skip_pipe_config(int pipe) +{ + struct ipa3_ep_context *ep; + + if (ipa_is_modem_pipe(pipe)) { + IPADBG_LOW("skip %d - modem owned pipe\n", pipe); + return true; + } + + if (ipa3_ctx->skip_ep_cfg_shadow[pipe]) { + IPADBG_LOW("skip %d\n", pipe); + return true; + } + + ep = &ipa3_ctx->ep[pipe]; + + if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD) == pipe + && ipa3_ctx->modem_cfg_emb_pipe_flt) + && ep->client == IPA_CLIENT_APPS_WAN_PROD) { + IPADBG_LOW("skip %d\n", pipe); + return true; + } + + return false; +} + +/** + * __ipa_commit_flt_v3() - commit flt tables to the hw + * commit the headers and the bodies if are local with internal cache flushing. + * The headers (and local bodies) will first be created into dma buffers and + * then written via IC to the SRAM + * @ipt: the ip address family type + * + * Return: 0 on success, negative on failure + */ +int __ipa_commit_flt_v3(enum ipa_ip_type ip) +{ + struct ipahal_fltrt_alloc_imgs_params alloc_params; + int rc = 0; + struct ipa3_desc *desc; + struct ipahal_imm_cmd_register_write reg_write_cmd = {0}; + struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0}; + struct ipahal_imm_cmd_pyld **cmd_pyld; + int num_cmd = 0; + int i; + int hdr_idx; + u32 lcl_hash_hdr, lcl_nhash_hdr; + u32 lcl_hash_bdy, lcl_nhash_bdy; + bool lcl_hash, lcl_nhash; + struct ipahal_reg_fltrt_hash_flush flush; + struct ipahal_reg_valmask valmask; + u32 tbl_hdr_width; + struct ipa3_flt_tbl *tbl; + u16 entries; + + tbl_hdr_width = ipahal_get_hw_tbl_hdr_width(); + memset(&alloc_params, 0, sizeof(alloc_params)); + alloc_params.ipt = ip; + alloc_params.tbls_num = ipa3_ctx->ep_flt_num; + + if (ip == IPA_IP_v4) { + lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_hash_ofst) + + tbl_hdr_width; /* to skip the bitmap */ + lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_nhash_ofst) + + tbl_hdr_width; /* to skip the bitmap */ + lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v4_flt_hash_ofst); + lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v4_flt_nhash_ofst); + lcl_hash = ipa3_ctx->ip4_flt_tbl_hash_lcl; + lcl_nhash = ipa3_ctx->ip4_flt_tbl_nhash_lcl; + } else { + lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_hash_ofst) + + tbl_hdr_width; /* to skip the bitmap */ + lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_nhash_ofst) + + tbl_hdr_width; /* to skip the bitmap */ + lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v6_flt_hash_ofst); + lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v6_flt_nhash_ofst); + lcl_hash = ipa3_ctx->ip6_flt_tbl_hash_lcl; + lcl_nhash = ipa3_ctx->ip6_flt_tbl_nhash_lcl; + } + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa_is_ep_support_flt(i)) + continue; + tbl = &ipa3_ctx->flt_tbl[i][ip]; + if (ipa_prep_flt_tbl_for_cmt(ip, tbl, i)) { + rc = -EPERM; + goto prep_failed; + } + if (!tbl->in_sys[IPA_RULE_HASHABLE] && + tbl->sz[IPA_RULE_HASHABLE]) { + alloc_params.num_lcl_hash_tbls++; + alloc_params.total_sz_lcl_hash_tbls += + tbl->sz[IPA_RULE_HASHABLE]; + alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width; + + } + if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] && + tbl->sz[IPA_RULE_NON_HASHABLE]) { + alloc_params.num_lcl_nhash_tbls++; + alloc_params.total_sz_lcl_nhash_tbls += + tbl->sz[IPA_RULE_NON_HASHABLE]; + alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width; + } + } + + if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) { + IPAERR_RL("fail to generate FLT HW TBL image. IP %d\n", ip); + rc = -EFAULT; + goto prep_failed; + } + + if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE, + &alloc_params.hash_bdy)) { + rc = -EFAULT; + goto fail_size_valid; + } + if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE, + &alloc_params.nhash_bdy)) { + rc = -EFAULT; + goto fail_size_valid; + } + + /* +3: 2 for bodies (hashable and non-hashable) and 1 for flushing */ + entries = (ipa3_ctx->ep_flt_num) * 2 + 3; + + if (ipa_flt_alloc_cmd_buffers(ip, entries, &desc, &cmd_pyld)) { + rc = -ENOMEM; + goto fail_size_valid; + } + + /* + * SRAM memory not allocated to hash tables. Sending + * command to hash tables(filer/routing) operation not supported. + */ + if (!ipa3_ctx->ipa_fltrt_not_hashable) { + /* flushing ipa internal hashable flt rules cache */ + memset(&flush, 0, sizeof(flush)); + if (ip == IPA_IP_v4) + flush.v4_flt = true; + else + flush.v6_flt = true; + ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask); + reg_write_cmd.skip_pipeline_clear = false; + reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + reg_write_cmd.offset = ipahal_get_reg_ofst( + IPA_FILT_ROUT_HASH_FLUSH); + reg_write_cmd.value = valmask.val; + reg_write_cmd.value_mask = valmask.mask; + cmd_pyld[0] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, ®_write_cmd, + false); + if (!cmd_pyld[0]) { + IPAERR( + "fail construct register_write imm cmd: IP %d\n", ip); + rc = -EFAULT; + goto fail_reg_write_construct; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + ++num_cmd; + } + + hdr_idx = 0; + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa_is_ep_support_flt(i)) { + IPADBG_LOW("skip %d - not filtering pipe\n", i); + continue; + } + + if (ipa_flt_skip_pipe_config(i)) { + hdr_idx++; + continue; + } + + if (num_cmd + 1 >= entries) { + IPAERR("number of commands is out of range: IP = %d\n", + ip); + rc = -ENOBUFS; + goto fail_imm_cmd_construct; + } + + IPADBG_LOW("Prepare imm cmd for hdr at index %d for pipe %d\n", + hdr_idx, i); + + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = tbl_hdr_width; + mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base + + hdr_idx * tbl_hdr_width; + mem_cmd.local_addr = lcl_nhash_hdr + + hdr_idx * tbl_hdr_width; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct dma_shared_mem cmd: IP = %d\n", + ip); + rc = -ENOMEM; + goto fail_imm_cmd_construct; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + ++num_cmd; + + /* + * SRAM memory not allocated to hash tables. Sending command + * to hash tables(filer/routing) operation not supported. + */ + if (!ipa3_ctx->ipa_fltrt_not_hashable) { + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = tbl_hdr_width; + mem_cmd.system_addr = alloc_params.hash_hdr.phys_base + + hdr_idx * tbl_hdr_width; + mem_cmd.local_addr = lcl_hash_hdr + + hdr_idx * tbl_hdr_width; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, + &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR( + "fail construct dma_shared_mem cmd: IP = %d\n", + ip); + rc = -ENOMEM; + goto fail_imm_cmd_construct; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], + cmd_pyld[num_cmd]); + ++num_cmd; + } + ++hdr_idx; + } + + if (lcl_nhash) { + if (num_cmd >= entries) { + IPAERR("number of commands is out of range: IP = %d\n", + ip); + rc = -ENOBUFS; + goto fail_imm_cmd_construct; + } + + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = alloc_params.nhash_bdy.size; + mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base; + mem_cmd.local_addr = lcl_nhash_bdy; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct dma_shared_mem cmd: IP = %d\n", + ip); + rc = -ENOMEM; + goto fail_imm_cmd_construct; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + ++num_cmd; + } + if (lcl_hash) { + if (num_cmd >= entries) { + IPAERR("number of commands is out of range: IP = %d\n", + ip); + rc = -ENOBUFS; + goto fail_imm_cmd_construct; + } + + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = alloc_params.hash_bdy.size; + mem_cmd.system_addr = alloc_params.hash_bdy.phys_base; + mem_cmd.local_addr = lcl_hash_bdy; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct dma_shared_mem cmd: IP = %d\n", + ip); + rc = -ENOMEM; + goto fail_imm_cmd_construct; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + ++num_cmd; + } + + if (ipa3_send_cmd(num_cmd, desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + goto fail_imm_cmd_construct; + } + + IPADBG_LOW("Hashable HEAD\n"); + IPA_DUMP_BUFF(alloc_params.hash_hdr.base, + alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size); + + IPADBG_LOW("Non-Hashable HEAD\n"); + IPA_DUMP_BUFF(alloc_params.nhash_hdr.base, + alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size); + + if (alloc_params.hash_bdy.size) { + IPADBG_LOW("Hashable BODY\n"); + IPA_DUMP_BUFF(alloc_params.hash_bdy.base, + alloc_params.hash_bdy.phys_base, + alloc_params.hash_bdy.size); + } + + if (alloc_params.nhash_bdy.size) { + IPADBG_LOW("Non-Hashable BODY\n"); + IPA_DUMP_BUFF(alloc_params.nhash_bdy.base, + alloc_params.nhash_bdy.phys_base, + alloc_params.nhash_bdy.size); + } + + __ipa_reap_sys_flt_tbls(ip, IPA_RULE_HASHABLE); + __ipa_reap_sys_flt_tbls(ip, IPA_RULE_NON_HASHABLE); + +fail_imm_cmd_construct: + for (i = 0 ; i < num_cmd ; i++) + ipahal_destroy_imm_cmd(cmd_pyld[i]); +fail_reg_write_construct: + kfree(desc); + kfree(cmd_pyld); +fail_size_valid: + if (alloc_params.hash_hdr.size) + ipahal_free_dma_mem(&alloc_params.hash_hdr); + ipahal_free_dma_mem(&alloc_params.nhash_hdr); + if (alloc_params.hash_bdy.size) + ipahal_free_dma_mem(&alloc_params.hash_bdy); + if (alloc_params.nhash_bdy.size) + ipahal_free_dma_mem(&alloc_params.nhash_bdy); +prep_failed: + return rc; +} + +static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule, + struct ipa3_rt_tbl **rt_tbl, enum ipa_ip_type ip) +{ + if (rule->action != IPA_PASS_TO_EXCEPTION) { + if (!rule->eq_attrib_type) { + if (!rule->rt_tbl_hdl) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + + *rt_tbl = ipa3_id_find(rule->rt_tbl_hdl); + if (*rt_tbl == NULL) { + IPAERR_RL("RT tbl not found\n"); + goto error; + } + + if ((*rt_tbl)->cookie != IPA_RT_TBL_COOKIE) { + IPAERR_RL("RT table cookie is invalid\n"); + goto error; + } + } else { + if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ? + IPA_MEM_PART(v4_modem_rt_index_hi) : + IPA_MEM_PART(v6_modem_rt_index_hi))) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + } + } else { + if (rule->rt_tbl_idx > 0) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + if (rule->pdn_idx) { + if (rule->action == IPA_PASS_TO_EXCEPTION || + rule->action == IPA_PASS_TO_ROUTING) { + IPAERR_RL( + "PDN index should be 0 when action is not pass to NAT\n"); + goto error; + } else { + if (rule->pdn_idx >= IPA_MAX_PDN_NUM) { + IPAERR_RL("PDN index %d is too large\n", + rule->pdn_idx); + goto error; + } + } + } + } + + if (rule->rule_id) { + if ((rule->rule_id < ipahal_get_rule_id_hi_bit()) || + (rule->rule_id >= ((ipahal_get_rule_id_hi_bit()<<1)-1))) { + IPAERR_RL("invalid rule_id provided 0x%x\n" + "rule_id with bit 0x%x are auto generated\n", + rule->rule_id, ipahal_get_rule_id_hi_bit()); + goto error; + } + } + + return 0; + +error: + return -EPERM; +} + +static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry, + const struct ipa_flt_rule *rule, struct ipa3_rt_tbl *rt_tbl, + struct ipa3_flt_tbl *tbl, bool user) +{ + int id; + + *entry = kmem_cache_zalloc(ipa3_ctx->flt_rule_cache, GFP_KERNEL); + if (!*entry) + goto error; + INIT_LIST_HEAD(&((*entry)->link)); + (*entry)->rule = *rule; + (*entry)->cookie = IPA_FLT_COOKIE; + (*entry)->rt_tbl = rt_tbl; + (*entry)->tbl = tbl; + if (rule->rule_id) { + id = rule->rule_id; + } else { + id = ipa3_alloc_rule_id(tbl->rule_ids); + if (id < 0) { + IPAERR_RL("failed to allocate rule id\n"); + WARN_ON_RATELIMIT_IPA(1); + goto rule_id_fail; + } + } + (*entry)->rule_id = id; + (*entry)->ipacm_installed = user; + + return 0; + +rule_id_fail: + kmem_cache_free(ipa3_ctx->flt_rule_cache, *entry); +error: + return -EPERM; +} + +static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl, + struct ipa3_flt_entry *entry, u32 *rule_hdl) +{ + int id; + + tbl->rule_cnt++; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt++; + id = ipa3_id_alloc(entry); + if (id < 0) { + IPAERR_RL("failed to add to tree\n"); + WARN_ON_RATELIMIT_IPA(1); + goto ipa_insert_failed; + } + *rule_hdl = id; + entry->id = id; + IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt); + + return 0; +ipa_insert_failed: + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + tbl->rule_cnt--; + return -EPERM; +} + +static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip, + const struct ipa_flt_rule *rule, u8 add_rear, + u32 *rule_hdl, bool user) +{ + struct ipa3_flt_entry *entry; + struct ipa3_rt_tbl *rt_tbl = NULL; + + if (__ipa_validate_flt_rule(rule, &rt_tbl, ip)) + goto error; + + if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl, user)) + goto error; + + if (add_rear) { + if (tbl->sticky_rear) + list_add_tail(&entry->link, + tbl->head_flt_rule_list.prev); + else + list_add_tail(&entry->link, &tbl->head_flt_rule_list); + } else { + list_add(&entry->link, &tbl->head_flt_rule_list); + } + + if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl)) + goto ipa_insert_failed; + + return 0; +ipa_insert_failed: + list_del(&entry->link); + /* if rule id was allocated from idr, remove it */ + if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) && + (entry->rule_id >= ipahal_get_low_rule_id())) + idr_remove(entry->tbl->rule_ids, entry->rule_id); + kmem_cache_free(ipa3_ctx->flt_rule_cache, entry); + +error: + return -EPERM; +} + +static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl, + const struct ipa_flt_rule *rule, + u32 *rule_hdl, + enum ipa_ip_type ip, + struct ipa3_flt_entry **add_after_entry) +{ + struct ipa3_flt_entry *entry; + struct ipa3_rt_tbl *rt_tbl = NULL; + + if (!*add_after_entry) + goto error; + + if (rule == NULL || rule_hdl == NULL) { + IPAERR_RL("bad parms rule=%pK rule_hdl=%pK\n", rule, + rule_hdl); + goto error; + } + + if (__ipa_validate_flt_rule(rule, &rt_tbl, ip)) + goto error; + + if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl, true)) + goto error; + + list_add(&entry->link, &((*add_after_entry)->link)); + + if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl)) + goto ipa_insert_failed; + + /* + * prepare for next insertion + */ + *add_after_entry = entry; + + return 0; + +ipa_insert_failed: + list_del(&entry->link); + /* if rule id was allocated from idr, remove it */ + if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) && + (entry->rule_id >= ipahal_get_low_rule_id())) + idr_remove(entry->tbl->rule_ids, entry->rule_id); + kmem_cache_free(ipa3_ctx->flt_rule_cache, entry); + +error: + *add_after_entry = NULL; + return -EPERM; +} + +static int __ipa_del_flt_rule(u32 rule_hdl) +{ + struct ipa3_flt_entry *entry; + int id; + + entry = ipa3_id_find(rule_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + return -EINVAL; + } + + if (entry->cookie != IPA_FLT_COOKIE) { + IPAERR_RL("bad params\n"); + return -EINVAL; + } + id = entry->id; + + list_del(&entry->link); + entry->tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + IPADBG("del flt rule rule_cnt=%d rule_id=%d\n", + entry->tbl->rule_cnt, entry->rule_id); + entry->cookie = 0; + /* if rule id was allocated from idr, remove it */ + if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) && + (entry->rule_id >= ipahal_get_low_rule_id())) + idr_remove(entry->tbl->rule_ids, entry->rule_id); + + kmem_cache_free(ipa3_ctx->flt_rule_cache, entry); + + /* remove the handle from the database */ + ipa3_id_remove(id); + + return 0; +} + +static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule, + enum ipa_ip_type ip) +{ + struct ipa3_flt_entry *entry; + struct ipa3_rt_tbl *rt_tbl = NULL; + + entry = ipa3_id_find(frule->rule_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + goto error; + } + + if (entry->cookie != IPA_FLT_COOKIE) { + IPAERR_RL("bad params\n"); + goto error; + } + + if (__ipa_validate_flt_rule(&frule->rule, &rt_tbl, ip)) + goto error; + + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + + entry->rule = frule->rule; + entry->rt_tbl = rt_tbl; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt++; + entry->hw_len = 0; + entry->prio = 0; + + return 0; + +error: + return -EPERM; +} + +static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx) +{ + *ipa_ep_idx = ipa3_get_ep_mapping(ep); + if (*ipa_ep_idx < 0) { + IPAERR_RL("ep not valid ep=%d\n", ep); + return -EINVAL; + } + if (ipa3_ctx->ep[*ipa_ep_idx].valid == 0) + IPADBG("ep not connected ep_idx=%d\n", *ipa_ep_idx); + + if (!ipa_is_ep_support_flt(*ipa_ep_idx)) { + IPAERR("ep do not support filtering ep=%d\n", ep); + return -EINVAL; + } + + return 0; +} + +static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep, + const struct ipa_flt_rule *rule, u8 add_rear, + u32 *rule_hdl, bool user) +{ + struct ipa3_flt_tbl *tbl; + int ipa_ep_idx; + + if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) { + IPAERR_RL("bad parms rule=%pK rule_hdl=%pK ep=%d\n", rule, + rule_hdl, ep); + + return -EINVAL; + } + + if (__ipa_add_flt_get_ep_idx(ep, &ipa_ep_idx)) + return -EINVAL; + + tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip]; + IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep); + + return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, user); +} + +/** + * ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of filtering rules to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) +{ + return ipa3_add_flt_rule_usr(rules, false); +} + +/** + * ipa3_add_flt_rule_usr() - Add the specified filtering rules to + * SW and optionally commit to IPA HW + * @rules: [inout] set of filtering rules to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only) +{ + int i; + int result; + + if (rules == NULL || rules->num_rules == 0 || + rules->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < rules->num_rules; i++) { + if (!rules->global) { + /* if hashing not supported, all table entry + * are non-hash tables + */ + if (ipa3_ctx->ipa_fltrt_not_hashable) + rules->rules[i].rule.hashable = false; + result = __ipa_add_ep_flt_rule(rules->ip, + rules->ep, + &rules->rules[i].rule, + rules->rules[i].at_rear, + &rules->rules[i].flt_rule_hdl, + user_only); + } else { + result = -1; + } + + if (result) { + IPAERR_RL("failed to add flt rule %d\n", i); + rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED; + } else { + rules->rules[i].status = 0; + } + } + + if (rules->global) { + IPAERR_RL("no support for global filter rules\n"); + result = -EPERM; + goto bail; + } + + if (rules->commit) + if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_add_flt_rule_after() - Add the specified filtering rules to SW after + * the rule which its handle is given and optionally commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules) +{ + int i; + int result; + struct ipa3_flt_tbl *tbl; + int ipa_ep_idx; + struct ipa3_flt_entry *entry; + + if (rules == NULL || rules->num_rules == 0 || + rules->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + if (rules->ep >= IPA_CLIENT_MAX) { + IPAERR_RL("bad parms ep=%d\n", rules->ep); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + + if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) { + result = -EINVAL; + goto bail; + } + + tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip]; + + entry = ipa3_id_find(rules->add_after_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + result = -EINVAL; + goto bail; + } + + if (entry->cookie != IPA_FLT_COOKIE) { + IPAERR_RL("Invalid cookie value = %u flt hdl id = %d\n", + entry->cookie, rules->add_after_hdl); + result = -EINVAL; + goto bail; + } + + if (entry->tbl != tbl) { + IPAERR_RL("given entry does not match the table\n"); + result = -EINVAL; + goto bail; + } + + if (tbl->sticky_rear) + if (&entry->link == tbl->head_flt_rule_list.prev) { + IPAERR_RL("cannot add rule at end of a sticky table"); + result = -EINVAL; + goto bail; + } + + IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n", + rules->ip, rules->ep, rules->add_after_hdl); + + /* + * we add all rules one after the other, if one insertion fails, it cuts + * the chain (all following will receive fail status) following calls to + * __ipa_add_flt_rule_after will fail (entry == NULL) + */ + + for (i = 0; i < rules->num_rules; i++) { + /* if hashing not supported, all tables are non-hash tables*/ + if (ipa3_ctx->ipa_fltrt_not_hashable) + rules->rules[i].rule.hashable = false; + result = __ipa_add_flt_rule_after(tbl, + &rules->rules[i].rule, + &rules->rules[i].flt_rule_hdl, + rules->ip, + &entry); + + if (result) { + IPAERR_RL("failed to add flt rule %d\n", i); + rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED; + } else { + rules->rules[i].status = 0; + } + } + + if (rules->commit) + if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) { + IPAERR("failed to commit flt rules\n"); + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_del_flt_rule() - Remove the specified filtering rules from SW and + * optionally commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls) +{ + int i; + int result; + + if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) { + IPAERR_RL("failed to del flt rule %i\n", i); + hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) + if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_mdfy_flt_rule() - Modify the specified filtering rules in SW and + * optionally commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls) +{ + int i; + int result; + + if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < hdls->num_rules; i++) { + /* if hashing not supported, all tables are non-hash tables*/ + if (ipa3_ctx->ipa_fltrt_not_hashable) + hdls->rules[i].rule.hashable = false; + if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) { + IPAERR_RL("failed to mdfy flt rule %i\n", i); + hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED; + } else { + hdls->rules[i].status = 0; + } + } + + if (hdls->commit) + if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + + +/** + * ipa3_commit_flt() - Commit the current SW filtering table of specified type + * to IPA HW + * @ip: [in] the family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_commit_flt(enum ipa_ip_type ip) +{ + int result; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + + if (ipa3_ctx->ctrl->ipa3_commit_flt(ip)) { + result = -EPERM; + goto bail; + } + result = 0; + +bail: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_reset_flt() - Reset the current SW filtering table of specified type + * (does not commit to HW) + * @ip: [in] the family of routing tables + * @user_only: [in] indicate rules deleted by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only) +{ + struct ipa3_flt_tbl *tbl; + struct ipa3_flt_entry *entry; + struct ipa3_flt_entry *next; + int i; + int id; + int rule_id; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa_is_ep_support_flt(i)) + continue; + + tbl = &ipa3_ctx->flt_tbl[i][ip]; + list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, + link) { + if (ipa3_id_find(entry->id) == NULL) { + WARN_ON_RATELIMIT_IPA(1); + mutex_unlock(&ipa3_ctx->lock); + return -EFAULT; + } + + if (!user_only || + entry->ipacm_installed) { + list_del(&entry->link); + entry->tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + /* if rule id was allocated from idr, remove */ + rule_id = entry->rule_id; + id = entry->id; + if ((rule_id < ipahal_get_rule_id_hi_bit()) && + (rule_id >= ipahal_get_low_rule_id())) + idr_remove(entry->tbl->rule_ids, + rule_id); + entry->cookie = 0; + kmem_cache_free(ipa3_ctx->flt_rule_cache, + entry); + + /* remove the handle from the database */ + ipa3_id_remove(id); + } + } + } + + /* commit the change to IPA-HW */ + if (ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4) || + ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6)) { + IPAERR("fail to commit flt-rule\n"); + WARN_ON_RATELIMIT_IPA(1); + mutex_unlock(&ipa3_ctx->lock); + return -EPERM; + } + mutex_unlock(&ipa3_ctx->lock); + return 0; +} + +void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx) +{ + struct ipa3_flt_tbl *tbl; + struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx]; + struct ipa_flt_rule rule; + + if (!ipa_is_ep_support_flt(ipa_ep_idx)) { + IPADBG("cannot add flt rules to non filtering pipe num %d\n", + ipa_ep_idx); + return; + } + + memset(&rule, 0, sizeof(rule)); + + mutex_lock(&ipa3_ctx->lock); + tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4]; + rule.action = IPA_PASS_TO_EXCEPTION; + __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true, + &ep->dflt_flt4_rule_hdl, false); + ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4); + tbl->sticky_rear = true; + + tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6]; + rule.action = IPA_PASS_TO_EXCEPTION; + __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true, + &ep->dflt_flt6_rule_hdl, false); + ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6); + tbl->sticky_rear = true; + mutex_unlock(&ipa3_ctx->lock); +} + +void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx) +{ + struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx]; + struct ipa3_flt_tbl *tbl; + + mutex_lock(&ipa3_ctx->lock); + if (ep->dflt_flt4_rule_hdl) { + tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4]; + __ipa_del_flt_rule(ep->dflt_flt4_rule_hdl); + ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4); + /* Reset the sticky flag. */ + tbl->sticky_rear = false; + ep->dflt_flt4_rule_hdl = 0; + } + if (ep->dflt_flt6_rule_hdl) { + tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6]; + __ipa_del_flt_rule(ep->dflt_flt6_rule_hdl); + ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6); + /* Reset the sticky flag. */ + tbl->sticky_rear = false; + ep->dflt_flt6_rule_hdl = 0; + } + mutex_unlock(&ipa3_ctx->lock); +} + +/** + * ipa3_set_flt_tuple_mask() - Sets the flt tuple masking for the given pipe + * Pipe must be for AP EP (not modem) and support filtering + * updates the the filtering masking values without changing the rt ones. + * + * @pipe_idx: filter pipe index to configure the tuple masking + * @tuple: the tuple members masking + * Returns: 0 on success, negative on failure + * + */ +int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple) +{ + struct ipahal_reg_fltrt_hash_tuple fltrt_tuple; + + if (!tuple) { + IPAERR_RL("bad tuple\n"); + return -EINVAL; + } + + if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) { + IPAERR("bad pipe index!\n"); + return -EINVAL; + } + + if (!ipa_is_ep_support_flt(pipe_idx)) { + IPAERR("pipe %d not filtering pipe\n", pipe_idx); + return -EINVAL; + } + + if (ipa_is_modem_pipe(pipe_idx)) { + IPAERR("modem pipe tuple is not configured by AP\n"); + return -EINVAL; + } + + ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n, + pipe_idx, &fltrt_tuple); + fltrt_tuple.flt = *tuple; + ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n, + pipe_idx, &fltrt_tuple); + + return 0; +} + +/** + * ipa3_flt_read_tbl_from_hw() -Read filtering table from IPA HW + * @pipe_idx: IPA endpoint index + * @ip_type: IPv4 or IPv6 table + * @hashable: hashable or non-hashable table + * @entry: array to fill the table entries + * @num_entry: number of entries in entry array. set by the caller to indicate + * entry array size. Then set by this function as an output parameter to + * indicate the number of entries in the array + * + * This function reads the filtering table from IPA SRAM and prepares an array + * of entries. This function is mainly used for debugging purposes. + * + * If empty table or Modem Apps table, zero entries will be returned. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, enum ipa_ip_type ip_type, + bool hashable, struct ipahal_flt_rule_entry entry[], int *num_entry) +{ + void *ipa_sram_mmio; + u64 hdr_base_ofst; + int tbl_entry_idx; + int i; + int res = 0; + u64 tbl_addr; + bool is_sys; + u8 *rule_addr; + struct ipa_mem_buffer *sys_tbl_mem; + int rule_idx; + struct ipa3_flt_tbl *flt_tbl_ptr; + + IPADBG("pipe_idx=%d ip=%d hashable=%d entry=0x%pK num_entry=0x%pK\n", + pipe_idx, ip_type, hashable, entry, num_entry); + + /* + * SRAM memory not allocated to hash tables. Reading of hash table + * rules operation not supported + */ + if (hashable && ipa3_ctx->ipa_fltrt_not_hashable) { + IPAERR_RL("Reading hashable rules not supported\n"); + *num_entry = 0; + return 0; + } + + if (pipe_idx >= ipa3_ctx->ipa_num_pipes || ip_type >= IPA_IP_MAX || + !entry || !num_entry) { + IPAERR_RL("Invalid params\n"); + return -EFAULT; + } + + if (!ipa_is_ep_support_flt(pipe_idx)) { + IPAERR_RL("pipe %d does not support filtering\n", pipe_idx); + return -EINVAL; + } + + flt_tbl_ptr = &ipa3_ctx->flt_tbl[pipe_idx][ip_type]; + /* map IPA SRAM */ + ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, + ipa3_ctx->smem_restricted_bytes / 4), + ipa3_ctx->smem_sz); + if (!ipa_sram_mmio) { + IPAERR("fail to ioremap IPA SRAM\n"); + return -ENOMEM; + } + + memset(entry, 0, sizeof(*entry) * (*num_entry)); + if (hashable) { + if (ip_type == IPA_IP_v4) + hdr_base_ofst = + IPA_MEM_PART(v4_flt_hash_ofst); + else + hdr_base_ofst = + IPA_MEM_PART(v6_flt_hash_ofst); + } else { + if (ip_type == IPA_IP_v4) + hdr_base_ofst = + IPA_MEM_PART(v4_flt_nhash_ofst); + else + hdr_base_ofst = + IPA_MEM_PART(v6_flt_nhash_ofst); + } + + /* calculate the index of the tbl entry */ + tbl_entry_idx = 1; /* skip the bitmap */ + for (i = 0; i < pipe_idx; i++) + if (ipa3_ctx->ep_flt_bitmap & (1 << i)) + tbl_entry_idx++; + + IPADBG("hdr_base_ofst=0x%llx tbl_entry_idx=%d\n", + hdr_base_ofst, tbl_entry_idx); + + res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst, + tbl_entry_idx, &tbl_addr, &is_sys); + if (res) { + IPAERR("failed to read table address from header structure\n"); + goto bail; + } + IPADBG("flt tbl ep=%d: tbl_addr=0x%llx is_sys=%d\n", + pipe_idx, tbl_addr, is_sys); + if (!tbl_addr) { + IPAERR("invalid flt tbl addr\n"); + res = -EFAULT; + goto bail; + } + + /* for tables resides in DDR access it from the virtual memory */ + if (is_sys) { + sys_tbl_mem = + &flt_tbl_ptr->curr_mem[hashable ? IPA_RULE_HASHABLE : + IPA_RULE_NON_HASHABLE]; + if (sys_tbl_mem->phys_base && + sys_tbl_mem->phys_base != tbl_addr) { + IPAERR("mismatch addr: parsed=%llx sw=%pad\n", + tbl_addr, &sys_tbl_mem->phys_base); + } + if (sys_tbl_mem->phys_base) + rule_addr = sys_tbl_mem->base; + else + rule_addr = NULL; + } else { + rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr; + } + + IPADBG("First rule addr 0x%pK\n", rule_addr); + + if (!rule_addr) { + /* Modem table in system memory or empty table */ + *num_entry = 0; + goto bail; + } + + rule_idx = 0; + while (rule_idx < *num_entry) { + res = ipahal_flt_parse_hw_rule(rule_addr, &entry[rule_idx]); + if (res) { + IPAERR("failed parsing flt rule\n"); + goto bail; + } + + IPADBG("rule_size=%d\n", entry[rule_idx].rule_size); + if (!entry[rule_idx].rule_size) + break; + + rule_addr += entry[rule_idx].rule_size; + rule_idx++; + } + *num_entry = rule_idx; +bail: + iounmap(ipa_sram_mmio); + return 0; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c new file mode 100644 index 000000000000..fe3f700308e9 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c @@ -0,0 +1,1328 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#include "ipa_i.h" +#include "ipahal/ipahal.h" + +static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 64}; +static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64}; + +#define HDR_TYPE_IS_VALID(type) \ + ((type) >= 0 && (type) < IPA_HDR_L2_MAX) + +#define HDR_PROC_TYPE_IS_VALID(type) \ + ((type) >= 0 && (type) < IPA_HDR_PROC_MAX) + +/** + * ipa3_generate_hdr_hw_tbl() - generates the headers table + * @mem: [out] buffer to put the header table + * + * Returns: 0 on success, negative on failure + */ +static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem) +{ + struct ipa3_hdr_entry *entry; + + mem->size = ipa3_ctx->hdr_tbl.end; + + if (mem->size == 0) { + IPAERR("hdr tbl empty\n"); + return -EPERM; + } + IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end); + + mem->base = dma_zalloc_coherent(ipa3_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem->size); + return -ENOMEM; + } + + list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list, + link) { + if (entry->is_hdr_proc_ctx) + continue; + IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len, + entry->offset_entry->offset); + ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset, + entry->hdr, entry->hdr_len); + } + + return 0; +} + +static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem, + u32 hdr_base_addr) +{ + struct ipa3_hdr_proc_ctx_entry *entry; + int ret; + int ep; + struct ipa_ep_cfg *cfg_ptr; + struct ipa_l2tp_header_remove_procparams *l2p_hdr_rm_ptr; + + list_for_each_entry(entry, + &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list, + link) { + IPADBG_LOW("processing type %d ofst=%d\n", + entry->type, entry->offset_entry->offset); + + if (entry->l2tp_params.is_dst_pipe_valid) { + ep = ipa3_get_ep_mapping(entry->l2tp_params.dst_pipe); + + if (ep >= 0) { + cfg_ptr = &ipa3_ctx->ep[ep].cfg; + l2p_hdr_rm_ptr = + &entry->l2tp_params.hdr_remove_param; + l2p_hdr_rm_ptr->hdr_ofst_pkt_size_valid = + cfg_ptr->hdr.hdr_ofst_pkt_size_valid; + l2p_hdr_rm_ptr->hdr_ofst_pkt_size = + cfg_ptr->hdr.hdr_ofst_pkt_size; + l2p_hdr_rm_ptr->hdr_endianness = + cfg_ptr->hdr_ext.hdr_little_endian ? + 0 : 1; + } + } + + ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base, + entry->offset_entry->offset, + entry->hdr->hdr_len, + entry->hdr->is_hdr_proc_ctx, + entry->hdr->phys_base, + hdr_base_addr, + entry->hdr->offset_entry, + entry->l2tp_params); + if (ret) + return ret; + } + + return 0; +} + +/** + * ipa3_generate_hdr_proc_ctx_hw_tbl() - + * generates the headers processing context table. + * @mem: [out] buffer to put the processing context table + * @aligned_mem: [out] actual processing context table (with alignment). + * Processing context table needs to be 8 Bytes aligned. + * + * Returns: 0 on success, negative on failure + */ +static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr, + struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem) +{ + u32 hdr_base_addr; + + mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4; + + /* make sure table is aligned */ + mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE; + + IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end); + + mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem->size); + return -ENOMEM; + } + + aligned_mem->phys_base = + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base); + aligned_mem->base = mem->base + + (aligned_mem->phys_base - mem->phys_base); + aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE; + memset(aligned_mem->base, 0, aligned_mem->size); + hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) : + hdr_sys_addr; + return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr); +} + +/** + * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW + * + * Returns: 0 on success, negative on failure + */ +int __ipa_commit_hdr_v3_0(void) +{ + struct ipa3_desc desc[2]; + struct ipa_mem_buffer hdr_mem; + struct ipa_mem_buffer ctx_mem; + struct ipa_mem_buffer aligned_ctx_mem; + struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0}; + struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0}; + struct ipahal_imm_cmd_register_write reg_write_cmd = {0}; + struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0}; + struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL; + struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL; + int rc = -EFAULT; + u32 proc_ctx_size; + u32 proc_ctx_ofst; + u32 proc_ctx_size_ddr; + + memset(desc, 0, 2 * sizeof(struct ipa3_desc)); + + if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) { + IPAERR("fail to generate HDR HW TBL\n"); + goto end; + } + + if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem, + &aligned_ctx_mem)) { + IPAERR("fail to generate HDR PROC CTX HW TBL\n"); + goto end; + } + + if (ipa3_ctx->hdr_tbl_lcl) { + if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) { + IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size, + IPA_MEM_PART(apps_hdr_size)); + goto end; + } else { + dma_cmd_hdr.is_read = false; /* write operation */ + dma_cmd_hdr.skip_pipeline_clear = false; + dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR; + dma_cmd_hdr.system_addr = hdr_mem.phys_base; + dma_cmd_hdr.size = hdr_mem.size; + dma_cmd_hdr.local_addr = + ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_hdr_ofst); + hdr_cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, + &dma_cmd_hdr, false); + if (!hdr_cmd_pyld) { + IPAERR("fail construct dma_shared_mem cmd\n"); + goto end; + } + } + } else { + if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) { + IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size, + IPA_MEM_PART(apps_hdr_size_ddr)); + goto end; + } else { + hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base; + hdr_cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_HDR_INIT_SYSTEM, + &hdr_init_cmd, false); + if (!hdr_cmd_pyld) { + IPAERR("fail construct hdr_init_system cmd\n"); + goto end; + } + } + } + ipa3_init_imm_cmd_desc(&desc[0], hdr_cmd_pyld); + IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size); + + proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size); + proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst); + if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) { + if (aligned_ctx_mem.size > proc_ctx_size) { + IPAERR("tbl too big needed %d avail %d\n", + aligned_ctx_mem.size, + proc_ctx_size); + goto end; + } else { + dma_cmd_ctx.is_read = false; /* Write operation */ + dma_cmd_ctx.skip_pipeline_clear = false; + dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR; + dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base; + dma_cmd_ctx.size = aligned_ctx_mem.size; + dma_cmd_ctx.local_addr = + ipa3_ctx->smem_restricted_bytes + + proc_ctx_ofst; + ctx_cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, + &dma_cmd_ctx, false); + if (!ctx_cmd_pyld) { + IPAERR("fail construct dma_shared_mem cmd\n"); + goto end; + } + } + } else { + proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr); + if (aligned_ctx_mem.size > proc_ctx_size_ddr) { + IPAERR("tbl too big, needed %d avail %d\n", + aligned_ctx_mem.size, + proc_ctx_size_ddr); + goto end; + } else { + reg_write_cmd.skip_pipeline_clear = false; + reg_write_cmd.pipeline_clear_options = + IPAHAL_HPS_CLEAR; + reg_write_cmd.offset = + ipahal_get_reg_ofst( + IPA_SYS_PKT_PROC_CNTXT_BASE); + reg_write_cmd.value = aligned_ctx_mem.phys_base; + reg_write_cmd.value_mask = + ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1); + ctx_cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, + ®_write_cmd, false); + if (!ctx_cmd_pyld) { + IPAERR("fail construct register_write cmd\n"); + goto end; + } + } + } + ipa3_init_imm_cmd_desc(&desc[1], ctx_cmd_pyld); + IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size); + + if (ipa3_send_cmd(2, desc)) + IPAERR("fail to send immediate command\n"); + else + rc = 0; + + if (ipa3_ctx->hdr_tbl_lcl) { + dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base, + hdr_mem.phys_base); + } else { + if (!rc) { + if (ipa3_ctx->hdr_mem.phys_base) + dma_free_coherent(ipa3_ctx->pdev, + ipa3_ctx->hdr_mem.size, + ipa3_ctx->hdr_mem.base, + ipa3_ctx->hdr_mem.phys_base); + ipa3_ctx->hdr_mem = hdr_mem; + } + } + + if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) { + dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base, + ctx_mem.phys_base); + } else { + if (!rc) { + if (ipa3_ctx->hdr_proc_ctx_mem.phys_base) + dma_free_coherent(ipa3_ctx->pdev, + ipa3_ctx->hdr_proc_ctx_mem.size, + ipa3_ctx->hdr_proc_ctx_mem.base, + ipa3_ctx->hdr_proc_ctx_mem.phys_base); + ipa3_ctx->hdr_proc_ctx_mem = ctx_mem; + } + } + +end: + if (ctx_cmd_pyld) + ipahal_destroy_imm_cmd(ctx_cmd_pyld); + + if (hdr_cmd_pyld) + ipahal_destroy_imm_cmd(hdr_cmd_pyld); + + return rc; +} + +static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, + bool add_ref_hdr, bool user_only) +{ + struct ipa3_hdr_entry *hdr_entry; + struct ipa3_hdr_proc_ctx_entry *entry; + struct ipa3_hdr_proc_ctx_offset_entry *offset; + u32 bin; + struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl; + int id; + int needed_len; + int mem_size; + + IPADBG_LOW("Add processing type %d hdr_hdl %d\n", + proc_ctx->type, proc_ctx->hdr_hdl); + + if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) { + IPAERR_RL("invalid processing type %d\n", proc_ctx->type); + return -EINVAL; + } + + hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl); + if (!hdr_entry) { + IPAERR_RL("hdr_hdl is invalid\n"); + return -EINVAL; + } + if (hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie); + WARN_ON_RATELIMIT_IPA(1); + return -EINVAL; + } + IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n", + hdr_entry->name, hdr_entry->is_hdr_proc_ctx); + + entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL); + if (!entry) { + IPAERR("failed to alloc proc_ctx object\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&entry->link); + + entry->type = proc_ctx->type; + entry->hdr = hdr_entry; + entry->l2tp_params = proc_ctx->l2tp_params; + if (add_ref_hdr) + hdr_entry->ref_cnt++; + entry->cookie = IPA_PROC_HDR_COOKIE; + entry->ipacm_installed = user_only; + + needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type); + + if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) { + bin = IPA_HDR_PROC_CTX_BIN0; + } else if (needed_len <= + ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) { + bin = IPA_HDR_PROC_CTX_BIN1; + } else { + IPAERR_RL("unexpected needed len %d\n", needed_len); + WARN_ON_RATELIMIT_IPA(1); + goto bad_len; + } + + mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ? + IPA_MEM_PART(apps_hdr_proc_ctx_size) : + IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr); + if (list_empty(&htbl->head_free_offset_list[bin])) { + if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) { + IPAERR_RL("hdr proc ctx table overflow\n"); + goto bad_len; + } + + offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache, + GFP_KERNEL); + if (!offset) { + IPAERR("failed to alloc offset object\n"); + goto bad_len; + } + INIT_LIST_HEAD(&offset->link); + /* + * for a first item grow, set the bin and offset which are set + * in stone + */ + offset->offset = htbl->end; + offset->bin = bin; + offset->ipacm_installed = user_only; + htbl->end += ipa_hdr_proc_ctx_bin_sz[bin]; + list_add(&offset->link, + &htbl->head_offset_list[bin]); + } else { + /* get the first free slot */ + offset = + list_first_entry(&htbl->head_free_offset_list[bin], + struct ipa3_hdr_proc_ctx_offset_entry, link); + offset->ipacm_installed = user_only; + list_move(&offset->link, &htbl->head_offset_list[bin]); + } + + entry->offset_entry = offset; + list_add(&entry->link, &htbl->head_proc_ctx_entry_list); + htbl->proc_ctx_cnt++; + IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len, + htbl->proc_ctx_cnt, offset->offset); + + id = ipa3_id_alloc(entry); + if (id < 0) { + IPAERR_RL("failed to alloc id\n"); + WARN_ON_RATELIMIT_IPA(1); + goto ipa_insert_failed; + } + entry->id = id; + proc_ctx->proc_ctx_hdl = id; + entry->ref_cnt++; + + return 0; + +ipa_insert_failed: + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + list_del(&entry->link); + htbl->proc_ctx_cnt--; + +bad_len: + if (add_ref_hdr) + hdr_entry->ref_cnt--; + entry->cookie = 0; + kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry); + return -EPERM; +} + + +static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user) +{ + struct ipa3_hdr_entry *entry; + struct ipa_hdr_offset_entry *offset = NULL; + u32 bin; + struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl; + int id; + int mem_size; + + if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) { + IPAERR_RL("bad param\n"); + goto error; + } + + if (!HDR_TYPE_IS_VALID(hdr->type)) { + IPAERR_RL("invalid hdr type %d\n", hdr->type); + goto error; + } + + entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL); + if (!entry) + goto error; + + INIT_LIST_HEAD(&entry->link); + + memcpy(entry->hdr, hdr->hdr, hdr->hdr_len); + entry->hdr_len = hdr->hdr_len; + strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX); + entry->is_partial = hdr->is_partial; + entry->type = hdr->type; + entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid; + entry->eth2_ofst = hdr->eth2_ofst; + entry->cookie = IPA_HDR_COOKIE; + entry->ipacm_installed = user; + + if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0]) + bin = IPA_HDR_BIN0; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1]) + bin = IPA_HDR_BIN1; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2]) + bin = IPA_HDR_BIN2; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3]) + bin = IPA_HDR_BIN3; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4]) + bin = IPA_HDR_BIN4; + else { + IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len); + goto bad_hdr_len; + } + + mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) : + IPA_MEM_PART(apps_hdr_size_ddr); + + if (list_empty(&htbl->head_free_offset_list[bin])) { + /* if header does not fit to table, place it in DDR */ + if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) { + entry->is_hdr_proc_ctx = true; + entry->phys_base = dma_map_single(ipa3_ctx->pdev, + entry->hdr, + entry->hdr_len, + DMA_TO_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, + entry->phys_base)) { + IPAERR("dma_map_single failure for entry\n"); + goto fail_dma_mapping; + } + } else { + entry->is_hdr_proc_ctx = false; + offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache, + GFP_KERNEL); + if (!offset) { + IPAERR("failed to alloc hdr offset object\n"); + goto bad_hdr_len; + } + INIT_LIST_HEAD(&offset->link); + /* + * for a first item grow, set the bin and offset which + * are set in stone + */ + offset->offset = htbl->end; + offset->bin = bin; + htbl->end += ipa_hdr_bin_sz[bin]; + list_add(&offset->link, + &htbl->head_offset_list[bin]); + entry->offset_entry = offset; + offset->ipacm_installed = user; + } + } else { + entry->is_hdr_proc_ctx = false; + /* get the first free slot */ + offset = list_first_entry(&htbl->head_free_offset_list[bin], + struct ipa_hdr_offset_entry, link); + list_move(&offset->link, &htbl->head_offset_list[bin]); + entry->offset_entry = offset; + offset->ipacm_installed = user; + } + + list_add(&entry->link, &htbl->head_hdr_entry_list); + htbl->hdr_cnt++; + if (entry->is_hdr_proc_ctx) + IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n", + hdr->hdr_len, + htbl->hdr_cnt, + &entry->phys_base); + else + IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n", + hdr->hdr_len, + htbl->hdr_cnt, + entry->offset_entry->offset); + + id = ipa3_id_alloc(entry); + if (id < 0) { + IPAERR_RL("failed to alloc id\n"); + WARN_ON_RATELIMIT_IPA(1); + goto ipa_insert_failed; + } + entry->id = id; + hdr->hdr_hdl = id; + entry->ref_cnt++; + + if (entry->is_hdr_proc_ctx) { + struct ipa_hdr_proc_ctx_add proc_ctx; + + IPADBG("adding processing context for header %s\n", hdr->name); + proc_ctx.type = IPA_HDR_PROC_NONE; + proc_ctx.hdr_hdl = id; + if (__ipa_add_hdr_proc_ctx(&proc_ctx, false, user)) { + IPAERR("failed to add hdr proc ctx\n"); + goto fail_add_proc_ctx; + } + entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl); + } + + return 0; + +fail_add_proc_ctx: + entry->ref_cnt--; + hdr->hdr_hdl = 0; + ipa3_id_remove(id); +ipa_insert_failed: + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa3_ctx->pdev, entry->phys_base, + entry->hdr_len, DMA_TO_DEVICE); + } else { + if (offset) + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + } + htbl->hdr_cnt--; + list_del(&entry->link); + +fail_dma_mapping: + entry->is_hdr_proc_ctx = false; + +bad_hdr_len: + entry->cookie = 0; + kmem_cache_free(ipa3_ctx->hdr_cache, entry); +error: + return -EPERM; +} + +static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl, + bool release_hdr, bool by_user) +{ + struct ipa3_hdr_proc_ctx_entry *entry; + struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl; + + entry = ipa3_id_find(proc_ctx_hdl); + if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + IPADBG("del proc ctx cnt=%d ofst=%d\n", + htbl->proc_ctx_cnt, entry->offset_entry->offset); + + if (by_user && entry->user_deleted) { + IPAERR_RL("proc_ctx already deleted by user\n"); + return -EINVAL; + } + + if (by_user) + entry->user_deleted = true; + + if (--entry->ref_cnt) { + IPADBG("proc_ctx_hdl %x ref_cnt %d\n", + proc_ctx_hdl, entry->ref_cnt); + return 0; + } + + if (release_hdr) + __ipa3_del_hdr(entry->hdr->id, false); + + /* move the offset entry to appropriate free list */ + list_move(&entry->offset_entry->link, + &htbl->head_free_offset_list[entry->offset_entry->bin]); + list_del(&entry->link); + htbl->proc_ctx_cnt--; + entry->cookie = 0; + kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry); + + /* remove the handle from the database */ + ipa3_id_remove(proc_ctx_hdl); + + return 0; +} + +int __ipa3_del_hdr(u32 hdr_hdl, bool by_user) +{ + struct ipa3_hdr_entry *entry; + struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl; + + entry = ipa3_id_find(hdr_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + return -EINVAL; + } + + if (entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + if (entry->is_hdr_proc_ctx) + IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n", + entry->hdr_len, htbl->hdr_cnt, &entry->phys_base); + else + IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n", + entry->hdr_len, htbl->hdr_cnt, + entry->offset_entry->offset); + + if (by_user && entry->user_deleted) { + IPAERR_RL("proc_ctx already deleted by user\n"); + return -EINVAL; + } + + if (by_user) { + if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) { + IPADBG("Trying to delete hdr %s offset=%u\n", + entry->name, entry->offset_entry->offset); + if (!entry->offset_entry->offset) { + IPAERR_RL( + "User cannot delete default header\n"); + return -EPERM; + } + } + entry->user_deleted = true; + } + + if (--entry->ref_cnt) { + IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt); + return 0; + } + + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa3_ctx->pdev, + entry->phys_base, + entry->hdr_len, + DMA_TO_DEVICE); + __ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false, false); + } else { + /* move the offset entry to appropriate free list */ + list_move(&entry->offset_entry->link, + &htbl->head_free_offset_list[entry->offset_entry->bin]); + } + list_del(&entry->link); + htbl->hdr_cnt--; + entry->cookie = 0; + kmem_cache_free(ipa3_ctx->hdr_cache, entry); + + /* remove the handle from the database */ + ipa3_id_remove(hdr_hdl); + + return 0; +} + +/** + * ipa3_add_hdr() - add the specified headers to SW and optionally commit them + * to IPA HW + * @hdrs: [inout] set of headers to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs) +{ + return ipa3_add_hdr_usr(hdrs, false); +} + +/** + * ipa3_add_hdr_usr() - add the specified headers to SW + * and optionally commit them to IPA HW + * @hdrs: [inout] set of headers to add + * @user_only: [in] indicate installed from user + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only) +{ + int i; + int result = -EFAULT; + + if (hdrs == NULL || hdrs->num_hdrs == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + IPADBG("adding %d headers to IPA driver internal data struct\n", + hdrs->num_hdrs); + for (i = 0; i < hdrs->num_hdrs; i++) { + if (__ipa_add_hdr(&hdrs->hdr[i], user_only)) { + IPAERR_RL("failed to add hdr %d\n", i); + hdrs->hdr[i].status = -1; + } else { + hdrs->hdr[i].status = 0; + } + } + + if (hdrs->commit) { + IPADBG("committing all headers to IPA core"); + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return result; +} + +/** + * ipa3_del_hdr_by_user() - Remove the specified headers + * from SW and optionally commit them to IPA HW + * @hdls: [inout] set of headers to delete + * @by_user: Operation requested by user? + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user) +{ + int i; + int result = -EFAULT; + + if (hdls == NULL || hdls->num_hdls == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) { + IPAERR_RL("failed to del hdr %i\n", i); + hdls->hdl[i].status = -1; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) { + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return result; +} + +/** + * ipa3_del_hdr() - Remove the specified headers from SW + * and optionally commit them to IPA HW + * @hdls: [inout] set of headers to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls) +{ + return ipa3_del_hdr_by_user(hdls, false); +} + +/** + * ipa3_add_hdr_proc_ctx() - add the specified headers to SW + * and optionally commit them to IPA HW + * @proc_ctxs: [inout] set of processing context headers to add + * @user_only: [in] indicate installed by user-space module + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only) +{ + int i; + int result = -EFAULT; + + if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + IPADBG("adding %d header processing contextes to IPA driver\n", + proc_ctxs->num_proc_ctxs); + for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) { + if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], + true, user_only)) { + IPAERR_RL("failed to add hdr pric ctx %d\n", i); + proc_ctxs->proc_ctx[i].status = -1; + } else { + proc_ctxs->proc_ctx[i].status = 0; + } + } + + if (proc_ctxs->commit) { + IPADBG("committing all headers to IPA core"); + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return result; +} + +/** + * ipa3_del_hdr_proc_ctx_by_user() - + * Remove the specified processing context headers from SW and + * optionally commit them to IPA HW. + * @hdls: [inout] set of processing context headers to delete + * @by_user: Operation requested by user? + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls, + bool by_user) +{ + int i; + int result; + + if (hdls == NULL || hdls->num_hdls == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) { + IPAERR_RL("failed to del hdr %i\n", i); + hdls->hdl[i].status = -1; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) { + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return result; +} + +/** + * ipa3_del_hdr_proc_ctx() - + * Remove the specified processing context headers from SW and + * optionally commit them to IPA HW. + * @hdls: [inout] set of processing context headers to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls) +{ + return ipa3_del_hdr_proc_ctx_by_user(hdls, false); +} + +/** + * ipa3_commit_hdr() - commit to IPA HW the current header table in SW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_commit_hdr(void) +{ + int result = -EFAULT; + + /* + * issue a commit on the routing module since routing rules point to + * header table entries + */ + if (ipa3_commit_rt(IPA_IP_v4)) + return -EPERM; + if (ipa3_commit_rt(IPA_IP_v6)) + return -EPERM; + + mutex_lock(&ipa3_ctx->lock); + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return result; +} + +/** + * ipa3_reset_hdr() - reset the current header table in SW (does not commit to + * HW) + * + * @user_only: [in] indicate delete rules installed by userspace + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_reset_hdr(bool user_only) +{ + struct ipa3_hdr_entry *entry; + struct ipa3_hdr_entry *next; + struct ipa3_hdr_proc_ctx_entry *ctx_entry; + struct ipa3_hdr_proc_ctx_entry *ctx_next; + struct ipa_hdr_offset_entry *off_entry; + struct ipa_hdr_offset_entry *off_next; + struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry; + struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next; + struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl; + struct ipa3_hdr_proc_ctx_tbl *htbl_proc = &ipa3_ctx->hdr_proc_ctx_tbl; + int i; + + /* + * issue a reset on the routing module since routing rules point to + * header table entries + */ + if (ipa3_reset_rt(IPA_IP_v4, user_only)) + IPAERR_RL("fail to reset v4 rt\n"); + if (ipa3_reset_rt(IPA_IP_v6, user_only)) + IPAERR_RL("fail to reset v6 rt\n"); + + mutex_lock(&ipa3_ctx->lock); + IPADBG("reset hdr\n"); + list_for_each_entry_safe(entry, next, + &ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) { + + /* do not remove the default header */ + if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) { + IPADBG("Trying to remove hdr %s offset=%u\n", + entry->name, entry->offset_entry->offset); + if (!entry->offset_entry->offset) { + if (entry->is_hdr_proc_ctx) { + IPAERR("default header is proc ctx\n"); + mutex_unlock(&ipa3_ctx->lock); + WARN_ON_RATELIMIT_IPA(1); + return -EFAULT; + } + IPADBG("skip default header\n"); + continue; + } + } + + if (ipa3_id_find(entry->id) == NULL) { + mutex_unlock(&ipa3_ctx->lock); + WARN_ON_RATELIMIT_IPA(1); + return -EFAULT; + } + + if (!user_only || entry->ipacm_installed) { + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa3_ctx->pdev, + entry->phys_base, + entry->hdr_len, + DMA_TO_DEVICE); + entry->proc_ctx = NULL; + } else { + /* move the offset entry to free list */ + entry->offset_entry->ipacm_installed = false; + list_move(&entry->offset_entry->link, + &htbl->head_free_offset_list[ + entry->offset_entry->bin]); + } + list_del(&entry->link); + htbl->hdr_cnt--; + entry->ref_cnt = 0; + entry->cookie = 0; + + /* remove the handle from the database */ + ipa3_id_remove(entry->id); + kmem_cache_free(ipa3_ctx->hdr_cache, entry); + } + } + + /* only clean up offset_list and free_offset_list on global reset */ + if (!user_only) { + for (i = 0; i < IPA_HDR_BIN_MAX; i++) { + list_for_each_entry_safe(off_entry, off_next, + &ipa3_ctx->hdr_tbl.head_offset_list[i], + link) { + /** + * do not remove the default exception + * header which is at offset 0 + */ + if (off_entry->offset == 0) + continue; + list_del(&off_entry->link); + kmem_cache_free(ipa3_ctx->hdr_offset_cache, + off_entry); + } + list_for_each_entry_safe(off_entry, off_next, + &ipa3_ctx->hdr_tbl.head_free_offset_list[i], + link) { + list_del(&off_entry->link); + kmem_cache_free(ipa3_ctx->hdr_offset_cache, + off_entry); + } + } + /* there is one header of size 8 */ + ipa3_ctx->hdr_tbl.end = 8; + ipa3_ctx->hdr_tbl.hdr_cnt = 1; + } + + IPADBG("reset hdr proc ctx\n"); + list_for_each_entry_safe( + ctx_entry, + ctx_next, + &(htbl_proc->head_proc_ctx_entry_list), + link) { + + if (ipa3_id_find(ctx_entry->id) == NULL) { + mutex_unlock(&ipa3_ctx->lock); + WARN_ON_RATELIMIT_IPA(1); + return -EFAULT; + } + + if (!user_only || + ctx_entry->ipacm_installed) { + /* move the offset entry to appropriate free list */ + list_move(&ctx_entry->offset_entry->link, + &htbl_proc->head_free_offset_list[ + ctx_entry->offset_entry->bin]); + list_del(&ctx_entry->link); + htbl_proc->proc_ctx_cnt--; + ctx_entry->ref_cnt = 0; + ctx_entry->cookie = 0; + + /* remove the handle from the database */ + ipa3_id_remove(ctx_entry->id); + kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, + ctx_entry); + } + } + /* only clean up offset_list and free_offset_list on global reset */ + if (!user_only) { + for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) { + list_for_each_entry_safe(ctx_off_entry, ctx_off_next, + &(htbl_proc->head_offset_list[i]), link) { + list_del(&ctx_off_entry->link); + kmem_cache_free( + ipa3_ctx->hdr_proc_ctx_offset_cache, + ctx_off_entry); + } + list_for_each_entry_safe(ctx_off_entry, ctx_off_next, + &(htbl_proc->head_free_offset_list[i]), link) { + list_del(&ctx_off_entry->link); + kmem_cache_free( + ipa3_ctx->hdr_proc_ctx_offset_cache, + ctx_off_entry); + } + } + htbl_proc->end = 0; + htbl_proc->proc_ctx_cnt = 0; + } + + /* commit the change to IPA-HW */ + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + IPAERR("fail to commit hdr\n"); + WARN_ON_RATELIMIT_IPA(1); + mutex_unlock(&ipa3_ctx->lock); + return -EFAULT; + } + + mutex_unlock(&ipa3_ctx->lock); + return 0; +} + +static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name) +{ + struct ipa3_hdr_entry *entry; + + if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) { + IPAERR_RL("Header name too long: %s\n", name); + return NULL; + } + + list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list, + link) { + if (!strcmp(name, entry->name)) + return entry; + } + + return NULL; +} + +/** + * ipa3_get_hdr() - Lookup the specified header resource + * @lookup: [inout] header to lookup and its handle + * + * lookup the specified header resource and return handle if it exists + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + * Caller should call ipa3_put_hdr later if this function succeeds + */ +int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup) +{ + struct ipa3_hdr_entry *entry; + int result = -1; + + if (lookup == NULL) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + mutex_lock(&ipa3_ctx->lock); + lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + entry = __ipa_find_hdr(lookup->name); + if (entry) { + lookup->hdl = entry->id; + result = 0; + } + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * __ipa3_release_hdr() - drop reference to header and cause + * deletion if reference count permits + * @hdr_hdl: [in] handle of header to be released + * + * Returns: 0 on success, negative on failure + */ +int __ipa3_release_hdr(u32 hdr_hdl) +{ + int result = 0; + + if (__ipa3_del_hdr(hdr_hdl, false)) { + IPADBG("fail to del hdr %x\n", hdr_hdl); + result = -EFAULT; + goto bail; + } + + /* commit for put */ + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + IPAERR("fail to commit hdr\n"); + result = -EFAULT; + goto bail; + } + +bail: + return result; +} + +/** + * __ipa3_release_hdr_proc_ctx() - drop reference to processing context + * and cause deletion if reference count permits + * @proc_ctx_hdl: [in] handle of processing context to be released + * + * Returns: 0 on success, negative on failure + */ +int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl) +{ + int result = 0; + + if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true, false)) { + IPADBG("fail to del hdr %x\n", proc_ctx_hdl); + result = -EFAULT; + goto bail; + } + + /* commit for put */ + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + IPAERR("fail to commit hdr\n"); + result = -EFAULT; + goto bail; + } + +bail: + return result; +} + +/** + * ipa3_put_hdr() - Release the specified header handle + * @hdr_hdl: [in] the header handle to release + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_put_hdr(u32 hdr_hdl) +{ + struct ipa3_hdr_entry *entry; + int result = -EFAULT; + + mutex_lock(&ipa3_ctx->lock); + + entry = ipa3_id_find(hdr_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + result = -EINVAL; + goto bail; + } + + if (entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("invalid header entry\n"); + result = -EINVAL; + goto bail; + } + + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return result; +} + +/** + * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of + * it + * @copy: [inout] header to lookup and its copy + * + * lookup the specified header resource and return a copy of it (along with its + * attributes) if it exists, this would be called for partial headers + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy) +{ + struct ipa3_hdr_entry *entry; + int result = -EFAULT; + + if (copy == NULL) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + mutex_lock(&ipa3_ctx->lock); + copy->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + entry = __ipa_find_hdr(copy->name); + if (entry) { + memcpy(copy->hdr, entry->hdr, entry->hdr_len); + copy->hdr_len = entry->hdr_len; + copy->type = entry->type; + copy->is_partial = entry->is_partial; + copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid; + copy->eth2_ofst = entry->eth2_ofst; + result = 0; + } + mutex_unlock(&ipa3_ctx->lock); + + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c new file mode 100644 index 000000000000..1e74db7afe07 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c @@ -0,0 +1,2073 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include "ipa_i.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_hw_stats.h" + +#define IPA_CLIENT_BIT_32(client) \ + ((ipa3_get_ep_mapping(client) >= 0 && \ + ipa3_get_ep_mapping(client) < IPA_STATS_MAX_PIPE_BIT) ? \ + (1 << ipa3_get_ep_mapping(client)) : 0) + +int ipa_hw_stats_init(void) +{ + int ret = 0, ep_index; + struct ipa_teth_stats_endpoints *teth_stats_init; + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) + return 0; + + /* initialize stats here */ + ipa3_ctx->hw_stats.enabled = true; + + teth_stats_init = kzalloc(sizeof(*teth_stats_init), GFP_KERNEL); + if (!teth_stats_init) { + IPAERR("mem allocated failed!\n"); + return -ENOMEM; + } + /* enable prod mask */ + teth_stats_init->prod_mask = ( + IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_PROD) | + IPA_CLIENT_BIT_32(IPA_CLIENT_USB_PROD) | + IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_PROD)); + + if (IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_PROD)) { + ep_index = ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_PROD); + if (ep_index == -1) { + IPAERR("Invalid client.\n"); + kfree(teth_stats_init); + return -EINVAL; + } + teth_stats_init->dst_ep_mask[ep_index] = + (IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_CONS) | + IPA_CLIENT_BIT_32(IPA_CLIENT_USB_CONS)); + } + + if (IPA_CLIENT_BIT_32(IPA_CLIENT_USB_PROD)) { + ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD); + if (ep_index == -1) { + IPAERR("Invalid client.\n"); + kfree(teth_stats_init); + return -EINVAL; + } + teth_stats_init->dst_ep_mask[ep_index] = + IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS); + } + + if (IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_PROD)) { + ep_index = ipa3_get_ep_mapping(IPA_CLIENT_WLAN1_PROD); + if (ep_index == -1) { + IPAERR("Invalid client.\n"); + kfree(teth_stats_init); + return -EINVAL; + } + teth_stats_init->dst_ep_mask[ep_index] = + IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS); + } + + ret = ipa_init_teth_stats(teth_stats_init); + kfree(teth_stats_init); + return ret; +} + +int ipa_init_quota_stats(u32 pipe_bitmask) +{ + struct ipahal_stats_init_pyld *pyld; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipahal_imm_cmd_register_write quota_base = {0}; + struct ipahal_imm_cmd_pyld *quota_base_pyld; + struct ipahal_imm_cmd_register_write quota_mask = {0}; + struct ipahal_imm_cmd_pyld *quota_mask_pyld; + struct ipa3_desc desc[3] = { {0} }; + dma_addr_t dma_address; + int ret; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + /* reset driver's cache */ + memset(&ipa3_ctx->hw_stats.quota, 0, sizeof(ipa3_ctx->hw_stats.quota)); + ipa3_ctx->hw_stats.quota.init.enabled_bitmask = pipe_bitmask; + IPADBG_LOW("pipe_bitmask=0x%x\n", pipe_bitmask); + + pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_QUOTA, + &ipa3_ctx->hw_stats.quota.init, false); + if (!pyld) { + IPAERR("failed to generate pyld\n"); + return -EPERM; + } + + if (pyld->len > IPA_MEM_PART(stats_quota_size)) { + IPAERR("SRAM partition too small: %d needed %d\n", + IPA_MEM_PART(stats_quota_size), pyld->len); + ret = -EPERM; + goto destroy_init_pyld; + } + + dma_address = dma_map_single(ipa3_ctx->pdev, + pyld->data, + pyld->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) { + IPAERR("failed to DMA map\n"); + ret = -EPERM; + goto destroy_init_pyld; + } + + /* setting the registers and init the stats pyld are done atomically */ + quota_mask.skip_pipeline_clear = false; + quota_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + quota_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_MASK_n, + ipa3_ctx->ee); + quota_mask.value = pipe_bitmask; + quota_mask.value_mask = ~0; + quota_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + "a_mask, false); + if (!quota_mask_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto unmap; + } + desc[0].opcode = quota_mask_pyld->opcode; + desc[0].pyld = quota_mask_pyld->data; + desc[0].len = quota_mask_pyld->len; + desc[0].type = IPA_IMM_CMD_DESC; + + quota_base.skip_pipeline_clear = false; + quota_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + quota_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_BASE_n, + ipa3_ctx->ee); + quota_base.value = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_quota_ofst); + quota_base.value_mask = ~0; + quota_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + "a_base, false); + if (!quota_base_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto destroy_quota_mask; + } + desc[1].opcode = quota_base_pyld->opcode; + desc[1].pyld = quota_base_pyld->data; + desc[1].len = quota_base_pyld->len; + desc[1].type = IPA_IMM_CMD_DESC; + + cmd.is_read = false; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + cmd.size = pyld->len; + cmd.system_addr = dma_address; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_quota_ofst); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto destroy_quota_base; + } + desc[2].opcode = cmd_pyld->opcode; + desc[2].pyld = cmd_pyld->data; + desc[2].len = cmd_pyld->len; + desc[2].type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(3, desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + ret = 0; + +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +destroy_quota_base: + ipahal_destroy_imm_cmd(quota_base_pyld); +destroy_quota_mask: + ipahal_destroy_imm_cmd(quota_mask_pyld); +unmap: + dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE); +destroy_init_pyld: + ipahal_destroy_stats_init_pyld(pyld); + return ret; +} + +int ipa_get_quota_stats(struct ipa_quota_stats_all *out) +{ + int i; + int ret; + struct ipahal_stats_get_offset_quota get_offset = { { 0 } }; + struct ipahal_stats_offset offset = { 0 }; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipa_mem_buffer mem; + struct ipa3_desc desc = { 0 }; + struct ipahal_stats_quota_all *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + get_offset.init = ipa3_ctx->hw_stats.quota.init; + ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_QUOTA, &get_offset, + &offset); + if (ret) { + IPAERR("failed to get offset from hal %d\n", ret); + return ret; + } + + IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size); + + if (offset.size == 0) + return 0; + + mem.size = offset.size; + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, + mem.size, + &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA memory"); + return ret; + } + + cmd.is_read = true; + cmd.clear_after_read = true; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + cmd.size = mem.size; + cmd.system_addr = mem.phys_base; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_quota_ofst) + offset.offset; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto free_dma_mem; + } + desc.opcode = cmd_pyld->opcode; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + desc.type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(1, &desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) { + ret = -ENOMEM; + goto destroy_imm; + } + + ret = ipahal_parse_stats(IPAHAL_HW_STATS_QUOTA, + &ipa3_ctx->hw_stats.quota.init, mem.base, stats); + if (ret) { + IPAERR("failed to parse stats (error %d)\n", ret); + goto free_stats; + } + + /* + * update driver cache. + * the stats were read from hardware with clear_after_read meaning + * hardware stats are 0 now + */ + for (i = 0; i < IPA_CLIENT_MAX; i++) { + int ep_idx = ipa3_get_ep_mapping(i); + + if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES) + continue; + + if (ipa3_ctx->ep[ep_idx].client != i) + continue; + + ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv4_bytes += + stats->stats[ep_idx].num_ipv4_bytes; + ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv4_pkts += + stats->stats[ep_idx].num_ipv4_pkts; + ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv6_bytes += + stats->stats[ep_idx].num_ipv6_bytes; + ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv6_pkts += + stats->stats[ep_idx].num_ipv6_pkts; + } + + /* copy results to out parameter */ + if (out) + *out = ipa3_ctx->hw_stats.quota.stats; + ret = 0; +free_stats: + kfree(stats); +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +free_dma_mem: + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + return ret; + +} + +int ipa_reset_quota_stats(enum ipa_client_type client) +{ + int ret; + struct ipa_quota_stats *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (client >= IPA_CLIENT_MAX) { + IPAERR("invalid client %d\n", client); + return -EINVAL; + } + + /* reading stats will reset them in hardware */ + ret = ipa_get_quota_stats(NULL); + if (ret) { + IPAERR("ipa_get_quota_stats failed %d\n", ret); + return ret; + } + + /* reset driver's cache */ + stats = &ipa3_ctx->hw_stats.quota.stats.client[client]; + memset(stats, 0, sizeof(*stats)); + return 0; +} + +int ipa_reset_all_quota_stats(void) +{ + int ret; + struct ipa_quota_stats_all *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + /* reading stats will reset them in hardware */ + ret = ipa_get_quota_stats(NULL); + if (ret) { + IPAERR("ipa_get_quota_stats failed %d\n", ret); + return ret; + } + + /* reset driver's cache */ + stats = &ipa3_ctx->hw_stats.quota.stats; + memset(stats, 0, sizeof(*stats)); + return 0; +} + +int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in) +{ + struct ipahal_stats_init_pyld *pyld; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipahal_imm_cmd_register_write teth_base = {0}; + struct ipahal_imm_cmd_pyld *teth_base_pyld; + struct ipahal_imm_cmd_register_write teth_mask = { 0 }; + struct ipahal_imm_cmd_pyld *teth_mask_pyld; + struct ipa3_desc desc[3] = { {0} }; + dma_addr_t dma_address; + int ret; + int i; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (!in || !in->prod_mask) { + IPAERR("invalid params\n"); + return -EINVAL; + } + + for (i = 0; i < IPA_STATS_MAX_PIPE_BIT; i++) { + if ((in->prod_mask & (1 << i)) && !in->dst_ep_mask[i]) { + IPAERR("prod %d doesn't have cons\n", i); + return -EINVAL; + } + } + IPADBG_LOW("prod_mask=0x%x\n", in->prod_mask); + + /* reset driver's cache */ + memset(&ipa3_ctx->hw_stats.teth.init, 0, + sizeof(ipa3_ctx->hw_stats.teth.init)); + for (i = 0; i < IPA_CLIENT_MAX; i++) { + memset(&ipa3_ctx->hw_stats.teth.prod_stats_sum[i], 0, + sizeof(ipa3_ctx->hw_stats.teth.prod_stats_sum[i])); + memset(&ipa3_ctx->hw_stats.teth.prod_stats[i], 0, + sizeof(ipa3_ctx->hw_stats.teth.prod_stats[i])); + } + ipa3_ctx->hw_stats.teth.init.prod_bitmask = in->prod_mask; + memcpy(ipa3_ctx->hw_stats.teth.init.cons_bitmask, in->dst_ep_mask, + sizeof(ipa3_ctx->hw_stats.teth.init.cons_bitmask)); + + + pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_TETHERING, + &ipa3_ctx->hw_stats.teth.init, false); + if (!pyld) { + IPAERR("failed to generate pyld\n"); + return -EPERM; + } + + if (pyld->len > IPA_MEM_PART(stats_tethering_size)) { + IPAERR("SRAM partition too small: %d needed %d\n", + IPA_MEM_PART(stats_tethering_size), pyld->len); + ret = -EPERM; + goto destroy_init_pyld; + } + + dma_address = dma_map_single(ipa3_ctx->pdev, + pyld->data, + pyld->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) { + IPAERR("failed to DMA map\n"); + ret = -EPERM; + goto destroy_init_pyld; + } + + /* setting the registers and init the stats pyld are done atomically */ + teth_mask.skip_pipeline_clear = false; + teth_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + teth_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_TETHERING_MASK_n, + ipa3_ctx->ee); + teth_mask.value = in->prod_mask; + teth_mask.value_mask = ~0; + teth_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + &teth_mask, false); + if (!teth_mask_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto unmap; + } + desc[0].opcode = teth_mask_pyld->opcode; + desc[0].pyld = teth_mask_pyld->data; + desc[0].len = teth_mask_pyld->len; + desc[0].type = IPA_IMM_CMD_DESC; + + teth_base.skip_pipeline_clear = false; + teth_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + teth_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_TETHERING_BASE_n, + ipa3_ctx->ee); + teth_base.value = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_tethering_ofst); + teth_base.value_mask = ~0; + teth_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + &teth_base, false); + if (!teth_base_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto destroy_teth_mask; + } + desc[1].opcode = teth_base_pyld->opcode; + desc[1].pyld = teth_base_pyld->data; + desc[1].len = teth_base_pyld->len; + desc[1].type = IPA_IMM_CMD_DESC; + + cmd.is_read = false; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + cmd.size = pyld->len; + cmd.system_addr = dma_address; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_tethering_ofst); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto destroy_teth_base; + } + desc[2].opcode = cmd_pyld->opcode; + desc[2].pyld = cmd_pyld->data; + desc[2].len = cmd_pyld->len; + desc[2].type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(3, desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + ret = 0; + +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +destroy_teth_base: + ipahal_destroy_imm_cmd(teth_base_pyld); +destroy_teth_mask: + ipahal_destroy_imm_cmd(teth_mask_pyld); +unmap: + dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE); +destroy_init_pyld: + ipahal_destroy_stats_init_pyld(pyld); + return ret; +} + +int ipa_get_teth_stats(void) +{ + int i, j; + int ret; + struct ipahal_stats_get_offset_tethering get_offset = { { 0 } }; + struct ipahal_stats_offset offset = {0}; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipa_mem_buffer mem; + struct ipa3_desc desc = { 0 }; + struct ipahal_stats_tethering_all *stats_all; + struct ipa_hw_stats_teth *sw_stats = &ipa3_ctx->hw_stats.teth; + struct ipahal_stats_tethering *stats; + struct ipa_quota_stats *quota_stats; + struct ipahal_stats_init_tethering *init = + (struct ipahal_stats_init_tethering *) + &ipa3_ctx->hw_stats.teth.init; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + get_offset.init = ipa3_ctx->hw_stats.teth.init; + ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_TETHERING, &get_offset, + &offset); + if (ret) { + IPAERR("failed to get offset from hal %d\n", ret); + return ret; + } + + IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size); + + if (offset.size == 0) + return 0; + + mem.size = offset.size; + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, + mem.size, + &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA memory\n"); + return ret; + } + + cmd.is_read = true; + cmd.clear_after_read = true; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + cmd.size = mem.size; + cmd.system_addr = mem.phys_base; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_tethering_ofst) + offset.offset; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto free_dma_mem; + } + desc.opcode = cmd_pyld->opcode; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + desc.type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(1, &desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + stats_all = kzalloc(sizeof(*stats_all), GFP_KERNEL); + if (!stats_all) { + IPADBG("failed to alloc memory\n"); + ret = -ENOMEM; + goto destroy_imm; + } + + ret = ipahal_parse_stats(IPAHAL_HW_STATS_TETHERING, + &ipa3_ctx->hw_stats.teth.init, mem.base, stats_all); + if (ret) { + IPAERR("failed to parse stats_all (error %d)\n", ret); + goto free_stats; + } + + /* reset prod_stats cache */ + for (i = 0; i < IPA_CLIENT_MAX; i++) { + memset(&ipa3_ctx->hw_stats.teth.prod_stats[i], 0, + sizeof(ipa3_ctx->hw_stats.teth.prod_stats[i])); + } + + /* + * update driver cache. + * the stats were read from hardware with clear_after_read meaning + * hardware stats are 0 now + */ + for (i = 0; i < IPA_CLIENT_MAX; i++) { + for (j = 0; j < IPA_CLIENT_MAX; j++) { + int prod_idx = ipa3_get_ep_mapping(i); + int cons_idx = ipa3_get_ep_mapping(j); + + if (prod_idx == -1 || prod_idx >= IPA3_MAX_NUM_PIPES) + continue; + + if (cons_idx == -1 || cons_idx >= IPA3_MAX_NUM_PIPES) + continue; + + /* save hw-query result */ + if ((init->prod_bitmask & (1 << prod_idx)) && + (init->cons_bitmask[prod_idx] + & (1 << cons_idx))) { + IPADBG_LOW("prod %d cons %d\n", + prod_idx, cons_idx); + stats = &stats_all->stats[prod_idx][cons_idx]; + IPADBG_LOW("num_ipv4_bytes %lld\n", + stats->num_ipv4_bytes); + IPADBG_LOW("num_ipv4_pkts %lld\n", + stats->num_ipv4_pkts); + IPADBG_LOW("num_ipv6_pkts %lld\n", + stats->num_ipv6_pkts); + IPADBG_LOW("num_ipv6_bytes %lld\n", + stats->num_ipv6_bytes); + + /* update stats*/ + quota_stats = + &sw_stats->prod_stats[i].client[j]; + quota_stats->num_ipv4_bytes = + stats->num_ipv4_bytes; + quota_stats->num_ipv4_pkts = + stats->num_ipv4_pkts; + quota_stats->num_ipv6_bytes = + stats->num_ipv6_bytes; + quota_stats->num_ipv6_pkts = + stats->num_ipv6_pkts; + + /* Accumulated stats */ + quota_stats = + &sw_stats->prod_stats_sum[i].client[j]; + quota_stats->num_ipv4_bytes += + stats->num_ipv4_bytes; + quota_stats->num_ipv4_pkts += + stats->num_ipv4_pkts; + quota_stats->num_ipv6_bytes += + stats->num_ipv6_bytes; + quota_stats->num_ipv6_pkts += + stats->num_ipv6_pkts; + } + } + } + + ret = 0; +free_stats: + kfree(stats_all); + stats = NULL; +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +free_dma_mem: + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + return ret; + +} + +int ipa_query_teth_stats(enum ipa_client_type prod, + struct ipa_quota_stats_all *out, bool reset) +{ + if (!IPA_CLIENT_IS_PROD(prod) || ipa3_get_ep_mapping(prod) == -1) { + IPAERR("invalid prod %d\n", prod); + return -EINVAL; + } + + /* copy results to out parameter */ + if (reset) + *out = ipa3_ctx->hw_stats.teth.prod_stats[prod]; + else + *out = ipa3_ctx->hw_stats.teth.prod_stats_sum[prod]; + return 0; +} + +int ipa_reset_teth_stats(enum ipa_client_type prod, enum ipa_client_type cons) +{ + int ret; + struct ipa_quota_stats *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (!IPA_CLIENT_IS_PROD(prod) || !IPA_CLIENT_IS_CONS(cons)) { + IPAERR("invalid prod %d or cons %d\n", prod, cons); + return -EINVAL; + } + + /* reading stats will reset them in hardware */ + ret = ipa_get_teth_stats(); + if (ret) { + IPAERR("ipa_get_teth_stats failed %d\n", ret); + return ret; + } + + /* reset driver's cache */ + stats = &ipa3_ctx->hw_stats.teth.prod_stats_sum[prod].client[cons]; + memset(stats, 0, sizeof(*stats)); + return 0; +} + +int ipa_reset_all_cons_teth_stats(enum ipa_client_type prod) +{ + int ret; + int i; + struct ipa_quota_stats *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (!IPA_CLIENT_IS_PROD(prod)) { + IPAERR("invalid prod %d\n", prod); + return -EINVAL; + } + + /* reading stats will reset them in hardware */ + ret = ipa_get_teth_stats(); + if (ret) { + IPAERR("ipa_get_teth_stats failed %d\n", ret); + return ret; + } + + /* reset driver's cache */ + for (i = 0; i < IPA_CLIENT_MAX; i++) { + stats = &ipa3_ctx->hw_stats.teth.prod_stats_sum[prod].client[i]; + memset(stats, 0, sizeof(*stats)); + } + + return 0; +} + +int ipa_reset_all_teth_stats(void) +{ + int i; + int ret; + struct ipa_quota_stats_all *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + /* reading stats will reset them in hardware */ + for (i = 0; i < IPA_CLIENT_MAX; i++) { + if (IPA_CLIENT_IS_PROD(i) && ipa3_get_ep_mapping(i) != -1) { + ret = ipa_get_teth_stats(); + if (ret) { + IPAERR("ipa_get_teth_stats failed %d\n", ret); + return ret; + } + /* a single iteration will reset all hardware stats */ + break; + } + } + + /* reset driver's cache */ + for (i = 0; i < IPA_CLIENT_MAX; i++) { + stats = &ipa3_ctx->hw_stats.teth.prod_stats_sum[i]; + memset(stats, 0, sizeof(*stats)); + } + + return 0; +} + +int ipa_flt_rt_stats_add_rule_id(enum ipa_ip_type ip, bool filtering, + u16 rule_id) +{ + int rule_idx, rule_bit; + u32 *bmsk_ptr; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (ip < 0 || ip >= IPA_IP_MAX) { + IPAERR("wrong ip type %d\n", ip); + return -EINVAL; + } + + rule_idx = rule_id / 32; + rule_bit = rule_id % 32; + + if (rule_idx >= IPAHAL_MAX_RULE_ID_32) { + IPAERR("invalid rule_id %d\n", rule_id); + return -EINVAL; + } + + if (ip == IPA_IP_v4 && filtering) + bmsk_ptr = + ipa3_ctx->hw_stats.flt_rt.flt_v4_init.rule_id_bitmask; + else if (ip == IPA_IP_v4) + bmsk_ptr = + ipa3_ctx->hw_stats.flt_rt.rt_v4_init.rule_id_bitmask; + else if (ip == IPA_IP_v6 && filtering) + bmsk_ptr = + ipa3_ctx->hw_stats.flt_rt.flt_v6_init.rule_id_bitmask; + else + bmsk_ptr = + ipa3_ctx->hw_stats.flt_rt.rt_v6_init.rule_id_bitmask; + + bmsk_ptr[rule_idx] |= (1 << rule_bit); + + return 0; +} + +int ipa_flt_rt_stats_start(enum ipa_ip_type ip, bool filtering) +{ + struct ipahal_stats_init_pyld *pyld; + int smem_ofst, smem_size, stats_base, start_id_ofst, end_id_ofst; + int start_id, end_id; + struct ipahal_stats_init_flt_rt *init; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipahal_imm_cmd_register_write flt_rt_base = {0}; + struct ipahal_imm_cmd_pyld *flt_rt_base_pyld; + struct ipahal_imm_cmd_register_write flt_rt_start_id = {0}; + struct ipahal_imm_cmd_pyld *flt_rt_start_id_pyld; + struct ipahal_imm_cmd_register_write flt_rt_end_id = { 0 }; + struct ipahal_imm_cmd_pyld *flt_rt_end_id_pyld; + struct ipa3_desc desc[4] = { {0} }; + dma_addr_t dma_address; + int ret; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (ip == IPA_IP_v4 && filtering) { + init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init; + smem_ofst = IPA_MEM_PART(stats_flt_v4_ofst); + smem_size = IPA_MEM_PART(stats_flt_v4_size); + stats_base = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_BASE); + start_id_ofst = + ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_START_ID); + end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_END_ID); + } else if (ip == IPA_IP_v4) { + init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init; + smem_ofst = IPA_MEM_PART(stats_rt_v4_ofst); + smem_size = IPA_MEM_PART(stats_rt_v4_size); + stats_base = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_BASE); + start_id_ofst = + ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_START_ID); + end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_END_ID); + } else if (ip == IPA_IP_v6 && filtering) { + init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init; + smem_ofst = IPA_MEM_PART(stats_flt_v6_ofst); + smem_size = IPA_MEM_PART(stats_flt_v6_size); + stats_base = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_BASE); + start_id_ofst = + ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_START_ID); + end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_END_ID); + } else { + init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init; + smem_ofst = IPA_MEM_PART(stats_rt_v6_ofst); + smem_size = IPA_MEM_PART(stats_rt_v6_size); + stats_base = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_BASE); + start_id_ofst = + ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_START_ID); + end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_END_ID); + } + + for (start_id = 0; start_id < IPAHAL_MAX_RULE_ID_32; start_id++) { + if (init->rule_id_bitmask[start_id]) + break; + } + + if (start_id == IPAHAL_MAX_RULE_ID_32) { + IPAERR("empty rule ids\n"); + return -EINVAL; + } + + /* every rule_id_bitmask contains 32 rules */ + start_id *= 32; + + for (end_id = IPAHAL_MAX_RULE_ID_32 - 1; end_id >= 0; end_id--) { + if (init->rule_id_bitmask[end_id]) + break; + } + end_id = (end_id + 1) * 32 - 1; + + pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_FNR, init, + false); + if (!pyld) { + IPAERR("failed to generate pyld\n"); + return -EPERM; + } + + if (pyld->len > smem_size) { + IPAERR("SRAM partition too small: %d needed %d\n", + smem_size, pyld->len); + ret = -EPERM; + goto destroy_init_pyld; + } + + dma_address = dma_map_single(ipa3_ctx->pdev, + pyld->data, + pyld->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) { + IPAERR("failed to DMA map\n"); + ret = -EPERM; + goto destroy_init_pyld; + } + + /* setting the registers and init the stats pyld are done atomically */ + flt_rt_start_id.skip_pipeline_clear = false; + flt_rt_start_id.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + flt_rt_start_id.offset = start_id_ofst; + flt_rt_start_id.value = start_id; + flt_rt_start_id.value_mask = 0x3FF; + flt_rt_start_id_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, &flt_rt_start_id, false); + if (!flt_rt_start_id_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto unmap; + } + desc[0].opcode = flt_rt_start_id_pyld->opcode; + desc[0].pyld = flt_rt_start_id_pyld->data; + desc[0].len = flt_rt_start_id_pyld->len; + desc[0].type = IPA_IMM_CMD_DESC; + + flt_rt_end_id.skip_pipeline_clear = false; + flt_rt_end_id.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + flt_rt_end_id.offset = end_id_ofst; + flt_rt_end_id.value = end_id; + flt_rt_end_id.value_mask = 0x3FF; + flt_rt_end_id_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, &flt_rt_end_id, false); + if (!flt_rt_end_id_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto destroy_flt_rt_start_id; + } + desc[1].opcode = flt_rt_end_id_pyld->opcode; + desc[1].pyld = flt_rt_end_id_pyld->data; + desc[1].len = flt_rt_end_id_pyld->len; + desc[1].type = IPA_IMM_CMD_DESC; + + flt_rt_base.skip_pipeline_clear = false; + flt_rt_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + flt_rt_base.offset = stats_base; + flt_rt_base.value = ipa3_ctx->smem_restricted_bytes + + smem_ofst; + flt_rt_base.value_mask = ~0; + flt_rt_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + &flt_rt_base, false); + if (!flt_rt_base_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto destroy_flt_rt_end_id; + } + desc[2].opcode = flt_rt_base_pyld->opcode; + desc[2].pyld = flt_rt_base_pyld->data; + desc[2].len = flt_rt_base_pyld->len; + desc[2].type = IPA_IMM_CMD_DESC; + + cmd.is_read = false; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + cmd.size = pyld->len; + cmd.system_addr = dma_address; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + smem_ofst; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto destroy_flt_rt_base; + } + desc[3].opcode = cmd_pyld->opcode; + desc[3].pyld = cmd_pyld->data; + desc[3].len = cmd_pyld->len; + desc[3].type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(4, desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + ret = 0; + +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +destroy_flt_rt_base: + ipahal_destroy_imm_cmd(flt_rt_base_pyld); +destroy_flt_rt_end_id: + ipahal_destroy_imm_cmd(flt_rt_end_id_pyld); +destroy_flt_rt_start_id: + ipahal_destroy_imm_cmd(flt_rt_start_id_pyld); +unmap: + dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE); +destroy_init_pyld: + ipahal_destroy_stats_init_pyld(pyld); + return ret; +} + +int ipa_flt_rt_stats_clear_rule_ids(enum ipa_ip_type ip, bool filtering) +{ + struct ipahal_stats_init_flt_rt *init; + int i; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (ip < 0 || ip >= IPA_IP_MAX) { + IPAERR("wrong ip type %d\n", ip); + return -EINVAL; + } + + if (ip == IPA_IP_v4 && filtering) + init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init; + else if (ip == IPA_IP_v4) + init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init; + else if (ip == IPA_IP_v6 && filtering) + init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init; + else + init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init; + + for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) + init->rule_id_bitmask[i] = 0; + + return 0; +} + +static int __ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering, + u16 rule_id, struct ipa_flt_rt_stats *out) +{ + int ret; + int smem_ofst; + bool clear = false; + struct ipahal_stats_get_offset_flt_rt *get_offset; + struct ipahal_stats_offset offset = { 0 }; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipa_mem_buffer mem; + struct ipa3_desc desc = { 0 }; + struct ipahal_stats_flt_rt stats; + + if (rule_id >= IPAHAL_MAX_RULE_ID_32 * 32) { + IPAERR("invalid rule_id %d\n", rule_id); + return -EINVAL; + } + + if (out == NULL) + clear = true; + + get_offset = kzalloc(sizeof(*get_offset), GFP_KERNEL); + if (!get_offset) { + IPADBG("no mem\n"); + return -ENOMEM; + } + + if (ip == IPA_IP_v4 && filtering) { + get_offset->init = ipa3_ctx->hw_stats.flt_rt.flt_v4_init; + smem_ofst = IPA_MEM_PART(stats_flt_v4_ofst); + } else if (ip == IPA_IP_v4) { + get_offset->init = ipa3_ctx->hw_stats.flt_rt.rt_v4_init; + smem_ofst = IPA_MEM_PART(stats_rt_v4_ofst); + } else if (ip == IPA_IP_v6 && filtering) { + get_offset->init = ipa3_ctx->hw_stats.flt_rt.flt_v6_init; + smem_ofst = IPA_MEM_PART(stats_flt_v6_ofst); + } else { + get_offset->init = ipa3_ctx->hw_stats.flt_rt.rt_v6_init; + smem_ofst = IPA_MEM_PART(stats_rt_v6_ofst); + } + + get_offset->rule_id = rule_id; + + ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_FNR, get_offset, + &offset); + if (ret) { + IPAERR("failed to get offset from hal %d\n", ret); + goto free_offset; + } + + IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size); + + if (offset.size == 0) { + ret = 0; + goto free_offset; + } + + mem.size = offset.size; + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, + mem.size, + &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA memory\n"); + goto free_offset; + } + + cmd.is_read = true; + cmd.clear_after_read = clear; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + cmd.size = mem.size; + cmd.system_addr = mem.phys_base; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + smem_ofst + offset.offset; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto free_dma_mem; + } + desc.opcode = cmd_pyld->opcode; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + desc.type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(1, &desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + ret = ipahal_parse_stats(IPAHAL_HW_STATS_FNR, + &get_offset->init, mem.base, &stats); + if (ret) { + IPAERR("failed to parse stats (error %d)\n", ret); + goto destroy_imm; + } + + if (out) { + out->num_pkts = stats.num_packets; + out->num_pkts_hash = stats.num_packets_hash; + } + + ret = 0; + +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +free_dma_mem: + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); +free_offset: + kfree(get_offset); + return ret; + +} + + +int ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id, + struct ipa_flt_rt_stats *out) +{ + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (ip < 0 || ip >= IPA_IP_MAX) { + IPAERR("wrong ip type %d\n", ip); + return -EINVAL; + } + + return __ipa_get_flt_rt_stats(ip, filtering, rule_id, out); +} + +int ipa_reset_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id) +{ + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (ip < 0 || ip >= IPA_IP_MAX) { + IPAERR("wrong ip type %d\n", ip); + return -EINVAL; + } + + return __ipa_get_flt_rt_stats(ip, filtering, rule_id, NULL); +} + +int ipa_reset_all_flt_rt_stats(enum ipa_ip_type ip, bool filtering) +{ + struct ipahal_stats_init_flt_rt *init; + int i; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (ip < 0 || ip >= IPA_IP_MAX) { + IPAERR("wrong ip type %d\n", ip); + return -EINVAL; + } + + if (ip == IPA_IP_v4 && filtering) + init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init; + else if (ip == IPA_IP_v4) + init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init; + else if (ip == IPA_IP_v6 && filtering) + init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init; + else + init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init; + + for (i = 0; i < IPAHAL_MAX_RULE_ID_32 * 32; i++) { + int idx = i / 32; + int bit = i % 32; + + if (init->rule_id_bitmask[idx] & (1 << bit)) + __ipa_get_flt_rt_stats(ip, filtering, i, NULL); + } + + return 0; +} + +int ipa_init_drop_stats(u32 pipe_bitmask) +{ + struct ipahal_stats_init_pyld *pyld; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipahal_imm_cmd_register_write drop_base = {0}; + struct ipahal_imm_cmd_pyld *drop_base_pyld; + struct ipahal_imm_cmd_register_write drop_mask = {0}; + struct ipahal_imm_cmd_pyld *drop_mask_pyld; + struct ipa3_desc desc[3] = { {0} }; + dma_addr_t dma_address; + int ret; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + /* reset driver's cache */ + memset(&ipa3_ctx->hw_stats.drop, 0, sizeof(ipa3_ctx->hw_stats.drop)); + ipa3_ctx->hw_stats.drop.init.enabled_bitmask = pipe_bitmask; + IPADBG_LOW("pipe_bitmask=0x%x\n", pipe_bitmask); + + pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_DROP, + &ipa3_ctx->hw_stats.drop.init, false); + if (!pyld) { + IPAERR("failed to generate pyld\n"); + return -EPERM; + } + + if (pyld->len > IPA_MEM_PART(stats_drop_size)) { + IPAERR("SRAM partition too small: %d needed %d\n", + IPA_MEM_PART(stats_drop_size), pyld->len); + ret = -EPERM; + goto destroy_init_pyld; + } + + dma_address = dma_map_single(ipa3_ctx->pdev, + pyld->data, + pyld->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) { + IPAERR("failed to DMA map\n"); + ret = -EPERM; + goto destroy_init_pyld; + } + + /* setting the registers and init the stats pyld are done atomically */ + drop_mask.skip_pipeline_clear = false; + drop_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + drop_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_DROP_CNT_MASK_n, + ipa3_ctx->ee); + drop_mask.value = pipe_bitmask; + drop_mask.value_mask = ~0; + drop_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + &drop_mask, false); + if (!drop_mask_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto unmap; + } + desc[0].opcode = drop_mask_pyld->opcode; + desc[0].pyld = drop_mask_pyld->data; + desc[0].len = drop_mask_pyld->len; + desc[0].type = IPA_IMM_CMD_DESC; + + drop_base.skip_pipeline_clear = false; + drop_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + drop_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_DROP_CNT_BASE_n, + ipa3_ctx->ee); + drop_base.value = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_drop_ofst); + drop_base.value_mask = ~0; + drop_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + &drop_base, false); + if (!drop_base_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto destroy_drop_mask; + } + desc[1].opcode = drop_base_pyld->opcode; + desc[1].pyld = drop_base_pyld->data; + desc[1].len = drop_base_pyld->len; + desc[1].type = IPA_IMM_CMD_DESC; + + cmd.is_read = false; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + cmd.size = pyld->len; + cmd.system_addr = dma_address; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_drop_ofst); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto destroy_drop_base; + } + desc[2].opcode = cmd_pyld->opcode; + desc[2].pyld = cmd_pyld->data; + desc[2].len = cmd_pyld->len; + desc[2].type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(3, desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + ret = 0; + +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +destroy_drop_base: + ipahal_destroy_imm_cmd(drop_base_pyld); +destroy_drop_mask: + ipahal_destroy_imm_cmd(drop_mask_pyld); +unmap: + dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE); +destroy_init_pyld: + ipahal_destroy_stats_init_pyld(pyld); + return ret; +} + +int ipa_get_drop_stats(struct ipa_drop_stats_all *out) +{ + int i; + int ret; + struct ipahal_stats_get_offset_drop get_offset = { { 0 } }; + struct ipahal_stats_offset offset = { 0 }; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipa_mem_buffer mem; + struct ipa3_desc desc = { 0 }; + struct ipahal_stats_drop_all *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + get_offset.init = ipa3_ctx->hw_stats.drop.init; + ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_DROP, &get_offset, + &offset); + if (ret) { + IPAERR("failed to get offset from hal %d\n", ret); + return ret; + } + + IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size); + + if (offset.size == 0) + return 0; + + mem.size = offset.size; + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, + mem.size, + &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA memory\n"); + return ret; + } + + cmd.is_read = true; + cmd.clear_after_read = true; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + cmd.size = mem.size; + cmd.system_addr = mem.phys_base; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_drop_ofst) + offset.offset; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto free_dma_mem; + } + desc.opcode = cmd_pyld->opcode; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + desc.type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(1, &desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) { + ret = -ENOMEM; + goto destroy_imm; + } + + ret = ipahal_parse_stats(IPAHAL_HW_STATS_DROP, + &ipa3_ctx->hw_stats.drop.init, mem.base, stats); + if (ret) { + IPAERR("failed to parse stats (error %d)\n", ret); + goto free_stats; + } + + /* + * update driver cache. + * the stats were read from hardware with clear_after_read meaning + * hardware stats are 0 now + */ + for (i = 0; i < IPA_CLIENT_MAX; i++) { + int ep_idx = ipa3_get_ep_mapping(i); + + if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES) + continue; + + if (ipa3_ctx->ep[ep_idx].client != i) + continue; + + ipa3_ctx->hw_stats.drop.stats.client[i].drop_byte_cnt += + stats->stats[ep_idx].drop_byte_cnt; + ipa3_ctx->hw_stats.drop.stats.client[i].drop_packet_cnt += + stats->stats[ep_idx].drop_packet_cnt; + } + + + if (!out) { + ret = 0; + goto free_stats; + } + + /* copy results to out parameter */ + *out = ipa3_ctx->hw_stats.drop.stats; + + ret = 0; +free_stats: + kfree(stats); +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +free_dma_mem: + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + return ret; + +} + +int ipa_reset_drop_stats(enum ipa_client_type client) +{ + int ret; + struct ipa_drop_stats *stats; + + if (client >= IPA_CLIENT_MAX) { + IPAERR("invalid client %d\n", client); + return -EINVAL; + } + + /* reading stats will reset them in hardware */ + ret = ipa_get_drop_stats(NULL); + if (ret) { + IPAERR("ipa_get_drop_stats failed %d\n", ret); + return ret; + } + + /* reset driver's cache */ + stats = &ipa3_ctx->hw_stats.drop.stats.client[client]; + memset(stats, 0, sizeof(*stats)); + return 0; +} + +int ipa_reset_all_drop_stats(void) +{ + int ret; + struct ipa_drop_stats_all *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + /* reading stats will reset them in hardware */ + ret = ipa_get_drop_stats(NULL); + if (ret) { + IPAERR("ipa_get_drop_stats failed %d\n", ret); + return ret; + } + + /* reset driver's cache */ + stats = &ipa3_ctx->hw_stats.drop.stats; + memset(stats, 0, sizeof(*stats)); + return 0; +} + + +#ifndef CONFIG_DEBUG_FS +int ipa_debugfs_init_stats(struct dentry *parent) { return 0; } +#else +#define IPA_MAX_MSG_LEN 4096 +static char dbg_buff[IPA_MAX_MSG_LEN]; + +static ssize_t ipa_debugfs_reset_quota_stats(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 client = 0; + int ret; + + mutex_lock(&ipa3_ctx->lock); + if (sizeof(dbg_buff) < count + 1) { + ret = -EFAULT; + goto bail; + } + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) { + ret = -EFAULT; + goto bail; + } + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &client)) { + ret = -EFAULT; + goto bail; + } + + if (client == -1) + ipa_reset_all_quota_stats(); + else + ipa_reset_quota_stats(client); + + ret = count; +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +static ssize_t ipa_debugfs_print_quota_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int nbytes = 0; + struct ipa_quota_stats_all *out; + int i; + int res; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + mutex_lock(&ipa3_ctx->lock); + res = ipa_get_quota_stats(out); + if (res) { + mutex_unlock(&ipa3_ctx->lock); + kfree(out); + return res; + } + for (i = 0; i < IPA_CLIENT_MAX; i++) { + int ep_idx = ipa3_get_ep_mapping(i); + + if (ep_idx == -1) + continue; + + if (IPA_CLIENT_IS_TEST(i)) + continue; + + if (!(ipa3_ctx->hw_stats.quota.init.enabled_bitmask & + (1 << ep_idx))) + continue; + + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "%s:\n", + ipa_clients_strings[i]); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv4_bytes=%llu\n", + out->client[i].num_ipv4_bytes); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv6_bytes=%llu\n", + out->client[i].num_ipv6_bytes); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv4_pkts=%u\n", + out->client[i].num_ipv4_pkts); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv6_pkts=%u\n", + out->client[i].num_ipv6_pkts); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "\n"); + + } + mutex_unlock(&ipa3_ctx->lock); + kfree(out); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_debugfs_reset_tethering_stats(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 client = 0; + int ret; + + mutex_lock(&ipa3_ctx->lock); + if (sizeof(dbg_buff) < count + 1) { + ret = -EFAULT; + goto bail; + } + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) { + ret = -EFAULT; + goto bail; + } + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &client)) { + ret = -EFAULT; + goto bail; + } + + if (client == -1) + ipa_reset_all_teth_stats(); + else + ipa_reset_all_cons_teth_stats(client); + + ret = count; +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +static ssize_t ipa_debugfs_print_tethering_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int nbytes = 0; + struct ipa_quota_stats_all *out; + int i, j; + int res; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < IPA_CLIENT_MAX; i++) { + int ep_idx = ipa3_get_ep_mapping(i); + + if (ep_idx == -1) + continue; + + if (!IPA_CLIENT_IS_PROD(i)) + continue; + + if (IPA_CLIENT_IS_TEST(i)) + continue; + + if (!(ipa3_ctx->hw_stats.teth.init.prod_bitmask & + (1 << ep_idx))) + continue; + + res = ipa_get_teth_stats(); + if (res) { + mutex_unlock(&ipa3_ctx->lock); + kfree(out); + return res; + } + + for (j = 0; j < IPA_CLIENT_MAX; j++) { + int cons_idx = ipa3_get_ep_mapping(j); + + if (cons_idx == -1) + continue; + + if (IPA_CLIENT_IS_TEST(j)) + continue; + + if (!(ipa3_ctx->hw_stats.teth.init.cons_bitmask[ep_idx] + & (1 << cons_idx))) + continue; + + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "%s->%s:\n", + ipa_clients_strings[i], + ipa_clients_strings[j]); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv4_bytes=%llu\n", + out->client[j].num_ipv4_bytes); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv6_bytes=%llu\n", + out->client[j].num_ipv6_bytes); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv4_pkts=%u\n", + out->client[j].num_ipv4_pkts); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv6_pkts=%u\n", + out->client[j].num_ipv6_pkts); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "\n"); + } + } + mutex_unlock(&ipa3_ctx->lock); + kfree(out); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_debugfs_control_flt_rt_stats(enum ipa_ip_type ip, + bool filtering, struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + u16 rule_id = 0; + int ret; + + mutex_lock(&ipa3_ctx->lock); + if (sizeof(dbg_buff) < count + 1) { + ret = -EFAULT; + goto bail; + } + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) { + ret = -EFAULT; + goto bail; + } + + dbg_buff[count] = '\0'; + if (strcmp(dbg_buff, "start\n") == 0) { + ipa_flt_rt_stats_start(ip, filtering); + } else if (strcmp(dbg_buff, "clear\n") == 0) { + ipa_flt_rt_stats_clear_rule_ids(ip, filtering); + } else if (strcmp(dbg_buff, "reset\n") == 0) { + ipa_reset_all_flt_rt_stats(ip, filtering); + } else { + if (kstrtou16(dbg_buff, 0, &rule_id)) { + ret = -EFAULT; + goto bail; + } + ipa_flt_rt_stats_add_rule_id(ip, filtering, rule_id); + } + + ret = count; +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +static ssize_t ipa_debugfs_print_flt_rt_stats(enum ipa_ip_type ip, + bool filtering, struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int nbytes = 0; + struct ipahal_stats_init_flt_rt *init; + struct ipa_flt_rt_stats out; + int i; + int res; + + if (ip == IPA_IP_v4 && filtering) + init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init; + else if (ip == IPA_IP_v4) + init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init; + else if (ip == IPA_IP_v6 && filtering) + init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init; + else + init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init; + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < IPAHAL_MAX_RULE_ID_32 * 32; i++) { + int idx = i / 32; + int bit = i % 32; + + if (init->rule_id_bitmask[idx] & (1 << bit)) { + res = ipa_get_flt_rt_stats(ip, filtering, i, &out); + if (res) { + mutex_unlock(&ipa3_ctx->lock); + return res; + } + + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "rule_id: %d\n", i); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_pkts: %d\n", + out.num_pkts); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_pkts_hash: %d\n", + out.num_pkts_hash); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "\n"); + } + } + + mutex_unlock(&ipa3_ctx->lock); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_debugfs_reset_drop_stats(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 client = 0; + int ret; + + mutex_lock(&ipa3_ctx->lock); + if (sizeof(dbg_buff) < count + 1) { + ret = -EFAULT; + goto bail; + } + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) { + ret = -EFAULT; + goto bail; + } + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &client)) { + ret = -EFAULT; + goto bail; + } + + if (client == -1) + ipa_reset_all_drop_stats(); + else + ipa_reset_drop_stats(client); + + ret = count; +bail: + mutex_unlock(&ipa3_ctx->lock); + return count; +} + +static ssize_t ipa_debugfs_print_drop_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int nbytes = 0; + struct ipa_drop_stats_all *out; + int i; + int res; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + mutex_lock(&ipa3_ctx->lock); + res = ipa_get_drop_stats(out); + if (res) { + mutex_unlock(&ipa3_ctx->lock); + kfree(out); + return res; + } + + for (i = 0; i < IPA_CLIENT_MAX; i++) { + int ep_idx = ipa3_get_ep_mapping(i); + + if (ep_idx == -1) + continue; + + if (!IPA_CLIENT_IS_CONS(i)) + continue; + + if (IPA_CLIENT_IS_TEST(i)) + continue; + + if (!(ipa3_ctx->hw_stats.drop.init.enabled_bitmask & + (1 << ep_idx))) + continue; + + + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "%s:\n", + ipa_clients_strings[i]); + + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "drop_byte_cnt=%u\n", + out->client[i].drop_byte_cnt); + + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "drop_packet_cnt=%u\n", + out->client[i].drop_packet_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "\n"); + } + mutex_unlock(&ipa3_ctx->lock); + kfree(out); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_debugfs_control_flt_v4_stats(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_control_flt_rt_stats(IPA_IP_v4, true, file, ubuf, + count, ppos); +} + +static ssize_t ipa_debugfs_control_flt_v6_stats(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_control_flt_rt_stats(IPA_IP_v6, true, file, ubuf, + count, ppos); +} + +static ssize_t ipa_debugfs_control_rt_v4_stats(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_control_flt_rt_stats(IPA_IP_v4, false, file, ubuf, + count, ppos); +} + +static ssize_t ipa_debugfs_control_rt_v6_stats(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_control_flt_rt_stats(IPA_IP_v6, false, file, ubuf, + count, ppos); +} + +static ssize_t ipa_debugfs_print_flt_v4_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_print_flt_rt_stats(IPA_IP_v4, true, file, ubuf, + count, ppos); +} + +static ssize_t ipa_debugfs_print_flt_v6_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_print_flt_rt_stats(IPA_IP_v6, true, file, ubuf, + count, ppos); +} + +static ssize_t ipa_debugfs_print_rt_v4_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_print_flt_rt_stats(IPA_IP_v4, false, file, ubuf, + count, ppos); +} + +static ssize_t ipa_debugfs_print_rt_v6_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_print_flt_rt_stats(IPA_IP_v6, false, file, ubuf, + count, ppos); +} + +static const struct file_operations ipa3_quota_ops = { + .read = ipa_debugfs_print_quota_stats, + .write = ipa_debugfs_reset_quota_stats, +}; + +static const struct file_operations ipa3_tethering_ops = { + .read = ipa_debugfs_print_tethering_stats, + .write = ipa_debugfs_reset_tethering_stats, +}; + +static const struct file_operations ipa3_flt_v4_ops = { + .read = ipa_debugfs_print_flt_v4_stats, + .write = ipa_debugfs_control_flt_v4_stats, +}; + +static const struct file_operations ipa3_flt_v6_ops = { + .read = ipa_debugfs_print_flt_v6_stats, + .write = ipa_debugfs_control_flt_v6_stats, +}; + +static const struct file_operations ipa3_rt_v4_ops = { + .read = ipa_debugfs_print_rt_v4_stats, + .write = ipa_debugfs_control_rt_v4_stats, +}; + +static const struct file_operations ipa3_rt_v6_ops = { + .read = ipa_debugfs_print_rt_v6_stats, + .write = ipa_debugfs_control_rt_v6_stats, +}; + +static const struct file_operations ipa3_drop_ops = { + .read = ipa_debugfs_print_drop_stats, + .write = ipa_debugfs_reset_drop_stats, +}; + + +int ipa_debugfs_init_stats(struct dentry *parent) +{ + const mode_t read_write_mode = 0664; + struct dentry *file; + struct dentry *dent; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + dent = debugfs_create_dir("hw_stats", parent); + if (IS_ERR_OR_NULL(dent)) { + IPAERR("fail to create folder in debug_fs\n"); + return -EFAULT; + } + + file = debugfs_create_file("quota", read_write_mode, dent, NULL, + &ipa3_quota_ops); + if (IS_ERR_OR_NULL(file)) { + IPAERR("fail to create file %s\n", "quota"); + goto fail; + } + + file = debugfs_create_file("drop", read_write_mode, dent, NULL, + &ipa3_drop_ops); + if (IS_ERR_OR_NULL(file)) { + IPAERR("fail to create file %s\n", "drop"); + goto fail; + } + + file = debugfs_create_file("tethering", read_write_mode, dent, NULL, + &ipa3_tethering_ops); + if (IS_ERR_OR_NULL(file)) { + IPAERR("fail to create file %s\n", "tethering"); + goto fail; + } + + file = debugfs_create_file("flt_v4", read_write_mode, dent, NULL, + &ipa3_flt_v4_ops); + if (IS_ERR_OR_NULL(file)) { + IPAERR("fail to create file %s\n", "flt_v4"); + goto fail; + } + + file = debugfs_create_file("flt_v6", read_write_mode, dent, NULL, + &ipa3_flt_v6_ops); + if (IS_ERR_OR_NULL(file)) { + IPAERR("fail to create file %s\n", "flt_v6"); + goto fail; + } + + file = debugfs_create_file("rt_v4", read_write_mode, dent, NULL, + &ipa3_rt_v4_ops); + if (IS_ERR_OR_NULL(file)) { + IPAERR("fail to create file %s\n", "rt_v4"); + goto fail; + } + + file = debugfs_create_file("rt_v6", read_write_mode, dent, NULL, + &ipa3_rt_v6_ops); + if (IS_ERR_OR_NULL(file)) { + IPAERR("fail to create file %s\n", "rt_v6"); + goto fail; + } + + return 0; +fail: + debugfs_remove_recursive(dent); + return -EFAULT; +} +#endif diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h new file mode 100644 index 000000000000..4e14bfd25777 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -0,0 +1,2652 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA3_I_H_ +#define _IPA3_I_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_qmi_service.h" +#include "../ipa_api.h" +#include "ipahal/ipahal_reg.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_fltrt.h" +#include "ipahal/ipahal_hw_stats.h" +#include "../ipa_common_i.h" +#include "ipa_uc_offload_i.h" +#include "ipa_pm.h" +#include +#include + +#define IPA_DEV_NAME_MAX_LEN 15 +#define DRV_NAME "ipa" + +#define IPA_COOKIE 0x57831603 +#define IPA_RT_RULE_COOKIE 0x57831604 +#define IPA_RT_TBL_COOKIE 0x57831605 +#define IPA_FLT_COOKIE 0x57831606 +#define IPA_HDR_COOKIE 0x57831607 +#define IPA_PROC_HDR_COOKIE 0x57831608 + +#define MTU_BYTE 1500 + +#define IPA_EP_NOT_ALLOCATED (-1) +#define IPA3_MAX_NUM_PIPES 31 +#define IPA_SYS_DESC_FIFO_SZ 0x800 +#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000 +#define IPA_COMMON_EVENT_RING_SIZE 0x7C00 +#define IPA_LAN_RX_HEADER_LENGTH (2) +#define IPA_QMAP_HEADER_LENGTH (4) +#define IPA_DL_CHECKSUM_LENGTH (8) +#define IPA_NUM_DESC_PER_SW_TX (3) +#define IPA_GENERIC_RX_POOL_SZ 192 +#define IPA_UC_FINISH_MAX 6 +#define IPA_UC_WAIT_MIN_SLEEP 1000 +#define IPA_UC_WAII_MAX_SLEEP 1200 +/* + * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but + * IPA users still use sps_iovec size as FIFO element size. + */ +#define IPA_FIFO_ELEMENT_SIZE 8 + +#define IPA_MAX_STATUS_STAT_NUM 30 + +#define IPA_IPC_LOG_PAGES 50 + +#define IPA_MAX_NUM_REQ_CACHE 10 + +#define NAPI_WEIGHT 60 + +#define IPADBG(fmt, args...) \ + do { \ + pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ + if (ipa3_ctx) { \ + IPA_IPC_LOGGING(ipa3_ctx->logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define IPADBG_LOW(fmt, args...) \ + do { \ + pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ + if (ipa3_ctx) \ + IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAERR(fmt, args...) \ + do { \ + pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ + if (ipa3_ctx) { \ + IPA_IPC_LOGGING(ipa3_ctx->logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define IPAERR_RL(fmt, args...) \ + do { \ + pr_err_ratelimited_ipa(DRV_NAME " %s:%d " fmt, __func__,\ + __LINE__, ## args);\ + if (ipa3_ctx) { \ + IPA_IPC_LOGGING(ipa3_ctx->logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +/* round addresses for closes page per SMMU requirements */ +#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \ + do { \ + (iova_p) = rounddown((iova), PAGE_SIZE); \ + (pa_p) = rounddown((pa), PAGE_SIZE); \ + (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \ + } while (0) + +#define WLAN_AMPDU_TX_EP 15 +#define WLAN_PROD_TX_EP 19 +#define WLAN1_CONS_RX_EP 14 +#define WLAN2_CONS_RX_EP 16 +#define WLAN3_CONS_RX_EP 17 +#define WLAN4_CONS_RX_EP 18 + +#define IPA_RAM_NAT_OFST 0 +#define IPA_RAM_NAT_SIZE 0 +#define IPA_RAM_IPV6CT_OFST 0 +#define IPA_RAM_IPV6CT_SIZE 0 +#define IPA_MEM_CANARY_VAL 0xdeadbeef + +#define IPA_STATS + +#ifdef IPA_STATS +#define IPA_STATS_INC_CNT(val) (++val) +#define IPA_STATS_DEC_CNT(val) (--val) +#define IPA_STATS_EXCP_CNT(__excp, __base) do { \ + if (__excp < 0 || __excp >= IPAHAL_PKT_STATUS_EXCEPTION_MAX) \ + break; \ + ++__base[__excp]; \ + } while (0) +#else +#define IPA_STATS_INC_CNT(x) do { } while (0) +#define IPA_STATS_DEC_CNT(x) +#define IPA_STATS_EXCP_CNT(__excp, __base) do { } while (0) +#endif + +#define IPA_HDR_BIN0 0 +#define IPA_HDR_BIN1 1 +#define IPA_HDR_BIN2 2 +#define IPA_HDR_BIN3 3 +#define IPA_HDR_BIN4 4 +#define IPA_HDR_BIN_MAX 5 + +#define IPA_HDR_PROC_CTX_BIN0 0 +#define IPA_HDR_PROC_CTX_BIN1 1 +#define IPA_HDR_PROC_CTX_BIN_MAX 2 + +#define IPA_RX_POOL_CEIL 32 +#define IPA_RX_SKB_SIZE 1792 + +#define IPA_A5_MUX_HDR_NAME "ipa_excp_hdr" +#define IPA_LAN_RX_HDR_NAME "ipa_lan_hdr" +#define IPA_INVALID_L4_PROTOCOL 0xFF + +#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8 +#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \ + (((start_ofst) + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1) & \ + ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1)) + +#define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX) +#define IPA_MEM_PART(x_) (ipa3_ctx->ctrl->mem_partition->x_) + +#define IPA_GSI_CHANNEL_STOP_MAX_RETRY 10 +#define IPA_GSI_CHANNEL_STOP_PKT_SIZE 1 + +#define IPA_GSI_CHANNEL_EMPTY_MAX_RETRY 15 +#define IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC (1000) +#define IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC (2000) + +#define IPA_SLEEP_CLK_RATE_KHZ (32) + +#define IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120 +#define IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN 96 +#define IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50 +#define IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN 40 +#define SMEM_IPA_FILTER_TABLE 497 + +enum { + SMEM_APPS, + SMEM_MODEM, + SMEM_Q6, + SMEM_DSPS, + SMEM_WCNSS, + SMEM_CDSP, + SMEM_RPM, + SMEM_TZ, + SMEM_SPSS, + SMEM_HYP, + NUM_SMEM_SUBSYSTEMS, +}; + +#define IPA_WDI_RX_RING_RES 0 +#define IPA_WDI_RX_RING_RP_RES 1 +#define IPA_WDI_RX_COMP_RING_RES 2 +#define IPA_WDI_RX_COMP_RING_WP_RES 3 +#define IPA_WDI_TX_RING_RES 4 +#define IPA_WDI_CE_RING_RES 5 +#define IPA_WDI_CE_DB_RES 6 +#define IPA_WDI_TX_DB_RES 7 +#define IPA_WDI_MAX_RES 8 + +/* use QMAP header reserved bit to identify tethered traffic */ +#define IPA_QMAP_TETH_BIT (1 << 30) + +#ifdef CONFIG_ARM64 +/* Outer caches unsupported on ARM64 platforms */ +# define outer_flush_range(x, y) +# define __cpuc_flush_dcache_area __flush_dcache_area +#endif + +#define IPA_SMP2P_OUT_CLK_RSP_CMPLT_IDX 0 +#define IPA_SMP2P_OUT_CLK_VOTE_IDX 1 +#define IPA_SMP2P_SMEM_STATE_MASK 3 + + +#define IPA_SUMMING_THRESHOLD (0x10) +#define IPA_PIPE_MEM_START_OFST (0x0) +#define IPA_PIPE_MEM_SIZE (0x0) +#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \ + x == IPA_MODE_MOBILE_AP_WAN || \ + x == IPA_MODE_MOBILE_AP_WLAN) +#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL) +#define IPA_A5_MUX_HEADER_LENGTH (8) + +#define IPA_AGGR_MAX_STR_LENGTH (10) + +#define CLEANUP_TAG_PROCESS_TIMEOUT 500 + +#define IPA_AGGR_STR_IN_BYTES(str) \ + (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1) + +#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100 + +#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048 + +#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0 +#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1 +#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2 +#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3 + +#define IPA_MHI_GSI_EVENT_RING_ID_START 10 +#define IPA_MHI_GSI_EVENT_RING_ID_END 12 + +#define IPA_SMEM_SIZE (8 * 1024) + +#define IPA_GSI_CHANNEL_HALT_MIN_SLEEP 5000 +#define IPA_GSI_CHANNEL_HALT_MAX_SLEEP 10000 +#define IPA_GSI_CHANNEL_HALT_MAX_TRY 10 + +/* round addresses for closes page per SMMU requirements */ +#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \ + do { \ + (iova_p) = rounddown((iova), PAGE_SIZE); \ + (pa_p) = rounddown((pa), PAGE_SIZE); \ + (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \ + } while (0) + + +/* The relative location in /lib/firmware where the FWs will reside */ +#define IPA_FWS_PATH "ipa/ipa_fws.elf" +/* + * The following paths below are used when building the system for the + * emulation environment. + * + * As new hardware platforms are added into the emulation environment, + * please add the appropriate paths here for their firmwares. + */ +#define IPA_FWS_PATH_4_0 "ipa/4.0/ipa_fws.elf" +#define IPA_FWS_PATH_3_5_1 "ipa/3.5.1/ipa_fws.elf" +#define IPA_FWS_PATH_4_5 "ipa/4.5/ipa_fws.elf" + +#ifdef CONFIG_COMPAT +#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_HDR, \ + compat_uptr_t) +#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_HDR, \ + compat_uptr_t) +#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_RT_RULE, \ + compat_uptr_t) +#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_RT_RULE, \ + compat_uptr_t) +#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_RT_TBL, \ + compat_uptr_t) +#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_COPY_HDR, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_TX_PROPS, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_RX_PROPS, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_EXT_PROPS, \ + compat_uptr_t) +#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_HDR, \ + compat_uptr_t) +#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ALLOC_NAT_MEM, \ + compat_uptr_t) +#define IPA_IOC_ALLOC_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ALLOC_NAT_TABLE, \ + compat_uptr_t) +#define IPA_IOC_ALLOC_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ALLOC_IPV6CT_TABLE, \ + compat_uptr_t) +#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_V4_INIT_NAT, \ + compat_uptr_t) +#define IPA_IOC_INIT_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_INIT_IPV6CT_TABLE, \ + compat_uptr_t) +#define IPA_IOC_TABLE_DMA_CMD32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_TABLE_DMA_CMD, \ + compat_uptr_t) +#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_V4_DEL_NAT, \ + compat_uptr_t) +#define IPA_IOC_DEL_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_NAT_TABLE, \ + compat_uptr_t) +#define IPA_IOC_DEL_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_IPV6CT_TABLE, \ + compat_uptr_t) +#define IPA_IOC_NAT_MODIFY_PDN32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NAT_MODIFY_PDN, \ + compat_uptr_t) +#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_NAT_OFFSET, \ + compat_uptr_t) +#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_PULL_MSG, \ + compat_uptr_t) +#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_RM_ADD_DEPENDENCY, \ + compat_uptr_t) +#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_RM_DEL_DEPENDENCY, \ + compat_uptr_t) +#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GENERATE_FLT_EQ, \ + compat_uptr_t) +#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_RT_TBL_INDEX, \ + compat_uptr_t) +#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_WRITE_QMAPID, \ + compat_uptr_t) +#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_MDFY_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \ + compat_uptr_t) +#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_HDR_PROC_CTX, \ + compat_uptr_t) +#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_HDR_PROC_CTX, \ + compat_uptr_t) +#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_MDFY_RT_RULE, \ + compat_uptr_t) +#endif /* #ifdef CONFIG_COMPAT */ + +#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311 +#define TZ_MEM_PROTECT_REGION_ID 0x10 + +#define MBOX_TOUT_MS 100 + +struct ipa3_active_client_htable_entry { + struct hlist_node list; + char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN]; + int count; + enum ipa_active_client_log_type type; +}; + +struct ipa3_active_clients_log_ctx { + spinlock_t lock; + char *log_buffer[IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES]; + int log_head; + int log_tail; + bool log_rdy; + struct hlist_head htable[IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE]; +}; + +struct ipa3_client_names { + enum ipa_client_type names[MAX_RESOURCE_TO_CLIENTS]; + int length; +}; + +struct ipa_smmu_cb_ctx { + bool valid; + struct device *dev; + struct dma_iommu_mapping *mapping; + struct iommu_domain *iommu; + unsigned long next_addr; + u32 va_start; + u32 va_size; + u32 va_end; +}; + +/** + * struct ipa3_flt_entry - IPA filtering table entry + * @link: entry's link in global filtering enrties list + * @rule: filter rule + * @cookie: cookie used for validity check + * @tbl: filter table + * @rt_tbl: routing table + * @hw_len: entry's size + * @id: rule handle - globally unique + * @prio: rule 10bit priority which defines the order of the rule + * among other rules at the same integrated table + * @rule_id: rule 10bit ID to be returned in packet status + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa3_flt_entry { + struct list_head link; + u32 cookie; + struct ipa_flt_rule rule; + struct ipa3_flt_tbl *tbl; + struct ipa3_rt_tbl *rt_tbl; + u32 hw_len; + int id; + u16 prio; + u16 rule_id; + bool ipacm_installed; +}; + +/** + * struct ipa3_rt_tbl - IPA routing table + * @link: table's link in global routing tables list + * @head_rt_rule_list: head of routing rules list + * @name: routing table name + * @idx: routing table index + * @rule_cnt: number of rules in routing table + * @ref_cnt: reference counter of routing table + * @set: collection of routing tables + * @cookie: cookie used for validity check + * @in_sys: flag indicating if the table is located in system memory + * @sz: the size of the routing table + * @curr_mem: current routing tables block in sys memory + * @prev_mem: previous routing table block in sys memory + * @id: routing table id + * @rule_ids: common idr structure that holds the rule_id for each rule + */ +struct ipa3_rt_tbl { + struct list_head link; + u32 cookie; + struct list_head head_rt_rule_list; + char name[IPA_RESOURCE_NAME_MAX]; + u32 idx; + u32 rule_cnt; + u32 ref_cnt; + struct ipa3_rt_tbl_set *set; + bool in_sys[IPA_RULE_TYPE_MAX]; + u32 sz[IPA_RULE_TYPE_MAX]; + struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX]; + struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX]; + int id; + struct idr *rule_ids; +}; + +/** + * struct ipa3_hdr_entry - IPA header table entry + * @link: entry's link in global header table entries list + * @hdr: the header + * @hdr_len: header length + * @name: name of header table entry + * @type: l2 header type + * @is_partial: flag indicating if header table entry is partial + * @is_hdr_proc_ctx: false - hdr entry resides in hdr table, + * true - hdr entry resides in DDR and pointed to by proc ctx + * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true, + * else 0 + * @proc_ctx: processing context header + * @offset_entry: entry's offset + * @cookie: cookie used for validity check + * @ref_cnt: reference counter of routing table + * @id: header entry id + * @is_eth2_ofst_valid: is eth2_ofst field valid? + * @eth2_ofst: offset to start of Ethernet-II/802.3 header + * @user_deleted: is the header deleted by the user? + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa3_hdr_entry { + struct list_head link; + u32 cookie; + u8 hdr[IPA_HDR_MAX_SIZE]; + u32 hdr_len; + char name[IPA_RESOURCE_NAME_MAX]; + enum ipa_hdr_l2_type type; + u8 is_partial; + bool is_hdr_proc_ctx; + dma_addr_t phys_base; + struct ipa3_hdr_proc_ctx_entry *proc_ctx; + struct ipa_hdr_offset_entry *offset_entry; + u32 ref_cnt; + int id; + u8 is_eth2_ofst_valid; + u16 eth2_ofst; + bool user_deleted; + bool ipacm_installed; +}; + +/** + * struct ipa3_hdr_tbl - IPA header table + * @head_hdr_entry_list: header entries list + * @head_offset_list: header offset list + * @head_free_offset_list: header free offset list + * @hdr_cnt: number of headers + * @end: the last header index + */ +struct ipa3_hdr_tbl { + struct list_head head_hdr_entry_list; + struct list_head head_offset_list[IPA_HDR_BIN_MAX]; + struct list_head head_free_offset_list[IPA_HDR_BIN_MAX]; + u32 hdr_cnt; + u32 end; +}; + +/** + * struct ipa3_hdr_offset_entry - IPA header offset entry + * @link: entry's link in global processing context header offset entries list + * @offset: the offset + * @bin: bin + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa3_hdr_proc_ctx_offset_entry { + struct list_head link; + u32 offset; + u32 bin; + bool ipacm_installed; +}; + +/** + * struct ipa3_hdr_proc_ctx_entry - IPA processing context header table entry + * @link: entry's link in global header table entries list + * @type: header processing context type + * @l2tp_params: L2TP parameters + * @offset_entry: entry's offset + * @hdr: the header + * @cookie: cookie used for validity check + * @ref_cnt: reference counter of routing table + * @id: processing context header entry id + * @user_deleted: is the hdr processing context deleted by the user? + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa3_hdr_proc_ctx_entry { + struct list_head link; + u32 cookie; + enum ipa_hdr_proc_type type; + struct ipa_l2tp_hdr_proc_ctx_params l2tp_params; + struct ipa3_hdr_proc_ctx_offset_entry *offset_entry; + struct ipa3_hdr_entry *hdr; + u32 ref_cnt; + int id; + bool user_deleted; + bool ipacm_installed; +}; + +/** + * struct ipa3_hdr_proc_ctx_tbl - IPA processing context header table + * @head_proc_ctx_entry_list: header entries list + * @head_offset_list: header offset list + * @head_free_offset_list: header free offset list + * @proc_ctx_cnt: number of processing context headers + * @end: the last processing context header index + * @start_offset: offset in words of processing context header table + */ +struct ipa3_hdr_proc_ctx_tbl { + struct list_head head_proc_ctx_entry_list; + struct list_head head_offset_list[IPA_HDR_PROC_CTX_BIN_MAX]; + struct list_head head_free_offset_list[IPA_HDR_PROC_CTX_BIN_MAX]; + u32 proc_ctx_cnt; + u32 end; + u32 start_offset; +}; + +/** + * struct ipa3_flt_tbl - IPA filter table + * @head_flt_rule_list: filter rules list + * @rule_cnt: number of filter rules + * @in_sys: flag indicating if filter table is located in system memory + * @sz: the size of the filter tables + * @end: the last header index + * @curr_mem: current filter tables block in sys memory + * @prev_mem: previous filter table block in sys memory + * @rule_ids: common idr structure that holds the rule_id for each rule + */ +struct ipa3_flt_tbl { + struct list_head head_flt_rule_list; + u32 rule_cnt; + bool in_sys[IPA_RULE_TYPE_MAX]; + u32 sz[IPA_RULE_TYPE_MAX]; + struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX]; + struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX]; + bool sticky_rear; + struct idr *rule_ids; +}; + +/** + * struct ipa3_rt_entry - IPA routing table entry + * @link: entry's link in global routing table entries list + * @rule: routing rule + * @cookie: cookie used for validity check + * @tbl: routing table + * @hdr: header table + * @proc_ctx: processing context table + * @hw_len: the length of the table + * @id: rule handle - globaly unique + * @prio: rule 10bit priority which defines the order of the rule + * among other rules at the integrated same table + * @rule_id: rule 10bit ID to be returned in packet status + * @rule_id_valid: indicate if rule_id_valid valid or not? + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa3_rt_entry { + struct list_head link; + u32 cookie; + struct ipa_rt_rule rule; + struct ipa3_rt_tbl *tbl; + struct ipa3_hdr_entry *hdr; + struct ipa3_hdr_proc_ctx_entry *proc_ctx; + u32 hw_len; + int id; + u16 prio; + u16 rule_id; + u16 rule_id_valid; + bool ipacm_installed; +}; + +/** + * struct ipa3_rt_tbl_set - collection of routing tables + * @head_rt_tbl_list: collection of routing tables + * @tbl_cnt: number of routing tables + * @rule_ids: idr structure that holds the rule_id for each rule + */ +struct ipa3_rt_tbl_set { + struct list_head head_rt_tbl_list; + u32 tbl_cnt; + struct idr rule_ids; +}; + +/** + * struct ipa3_wlan_stats - Wlan stats for each wlan endpoint + * @rx_pkts_rcvd: Packets sent by wlan driver + * @rx_pkts_status_rcvd: Status packets received from ipa hw + * @rx_hd_processed: Data Descriptors processed by IPA Driver + * @rx_hd_reply: Data Descriptors recycled by wlan driver + * @rx_hd_rcvd: Data Descriptors sent by wlan driver + * @rx_pkt_leak: Packet count that are not recycled + * @rx_dp_fail: Packets failed to transfer to IPA HW + * @tx_pkts_rcvd: SKB Buffers received from ipa hw + * @tx_pkts_sent: SKB Buffers sent to wlan driver + * @tx_pkts_dropped: Dropped packets count + */ +struct ipa3_wlan_stats { + u32 rx_pkts_rcvd; + u32 rx_pkts_status_rcvd; + u32 rx_hd_processed; + u32 rx_hd_reply; + u32 rx_hd_rcvd; + u32 rx_pkt_leak; + u32 rx_dp_fail; + u32 tx_pkts_rcvd; + u32 tx_pkts_sent; + u32 tx_pkts_dropped; +}; + +/** + * struct ipa3_wlan_comm_memb - Wlan comm members + * @wlan_spinlock: protects wlan comm buff list and its size + * @ipa_tx_mul_spinlock: protects tx dp mul transfer + * @wlan_comm_total_cnt: wlan common skb buffers allocated count + * @wlan_comm_free_cnt: wlan common skb buffer free count + * @total_tx_pkts_freed: Recycled Buffer count + * @wlan_comm_desc_list: wlan common skb buffer list + */ +struct ipa3_wlan_comm_memb { + spinlock_t wlan_spinlock; + spinlock_t ipa_tx_mul_spinlock; + u32 wlan_comm_total_cnt; + u32 wlan_comm_free_cnt; + u32 total_tx_pkts_freed; + struct list_head wlan_comm_desc_list; + atomic_t active_clnt_cnt; +}; + +struct ipa_gsi_ep_mem_info { + u16 evt_ring_len; + u64 evt_ring_base_addr; + void *evt_ring_base_vaddr; + u16 chan_ring_len; + u64 chan_ring_base_addr; + void *chan_ring_base_vaddr; +}; + +struct ipa3_status_stats { + struct ipahal_pkt_status status[IPA_MAX_STATUS_STAT_NUM]; + unsigned int curr; +}; + +/** + * struct ipa3_ep_context - IPA end point context + * @valid: flag indicating id EP context is valid + * @client: EP client type + * @gsi_chan_hdl: EP's GSI channel handle + * @gsi_evt_ring_hdl: EP's GSI channel event ring handle + * @gsi_mem_info: EP's GSI channel rings info + * @chan_scratch: EP's GSI channel scratch info + * @cfg: EP cionfiguration + * @dst_pipe_index: destination pipe index + * @rt_tbl_idx: routing table index + * @priv: user provided information which will forwarded once the user is + * notified for new data avail + * @client_notify: user provided CB for EP events notification, the event is + * data revived. + * @skip_ep_cfg: boolean field that determines if EP should be configured + * by IPA driver + * @keep_ipa_awake: when true, IPA will not be clock gated + * @disconnect_in_progress: Indicates client disconnect in progress. + * @qmi_request_sent: Indicates whether QMI request to enable clear data path + * request is sent or not. + * @client_lock_unlock: callback function to take mutex lock/unlock for USB + * clients + */ +struct ipa3_ep_context { + int valid; + enum ipa_client_type client; + unsigned long gsi_chan_hdl; + unsigned long gsi_evt_ring_hdl; + struct ipa_gsi_ep_mem_info gsi_mem_info; + union __packed gsi_channel_scratch chan_scratch; + bool bytes_xfered_valid; + u16 bytes_xfered; + dma_addr_t phys_base; + struct ipa_ep_cfg cfg; + struct ipa_ep_cfg_holb holb; + struct ipahal_reg_ep_cfg_status status; + u32 dst_pipe_index; + u32 rt_tbl_idx; + void *priv; + void (*client_notify)(void *priv, enum ipa_dp_evt_type evt, + unsigned long data); + atomic_t avail_fifo_desc; + u32 dflt_flt4_rule_hdl; + u32 dflt_flt6_rule_hdl; + bool skip_ep_cfg; + bool keep_ipa_awake; + struct ipa3_wlan_stats wstats; + u32 uc_offload_state; + u32 gsi_offload_state; + bool disconnect_in_progress; + u32 qmi_request_sent; + u32 eot_in_poll_err; + bool ep_delay_set; + + int (*client_lock_unlock)(bool is_lock); + + /* sys MUST be the last element of this struct */ + struct ipa3_sys_context *sys; +}; + +/** + * ipa_usb_xdci_chan_params - xDCI channel related properties + * + * @ipa_ep_cfg: IPA EP configuration + * @client: type of "client" + * @priv: callback cookie + * @notify: callback + * priv - callback cookie evt - type of event data - data relevant + * to event. May not be valid. See event_type enum for valid + * cases. + * @skip_ep_cfg: boolean field that determines if EP should be + * configured by IPA driver + * @keep_ipa_awake: when true, IPA will not be clock gated + * @evt_ring_params: parameters for the channel's event ring + * @evt_scratch: parameters for the channel's event ring scratch + * @chan_params: parameters for the channel + * @chan_scratch: parameters for the channel's scratch + * + */ +struct ipa_request_gsi_channel_params { + struct ipa_ep_cfg ipa_ep_cfg; + enum ipa_client_type client; + void *priv; + ipa_notify_cb notify; + bool skip_ep_cfg; + bool keep_ipa_awake; + struct gsi_evt_ring_props evt_ring_params; + union __packed gsi_evt_scratch evt_scratch; + struct gsi_chan_props chan_params; + union __packed gsi_channel_scratch chan_scratch; +}; + +enum ipa3_sys_pipe_policy { + IPA_POLICY_INTR_MODE, + IPA_POLICY_NOINTR_MODE, + IPA_POLICY_INTR_POLL_MODE, +}; + +struct ipa3_repl_ctx { + struct ipa3_rx_pkt_wrapper **cache; + atomic_t head_idx; + atomic_t tail_idx; + u32 capacity; + atomic_t pending; +}; + +/** + * struct ipa3_sys_context - IPA GPI pipes context + * @head_desc_list: header descriptors list + * @len: the size of the above list + * @spinlock: protects the list and its size + * @ep: IPA EP context + * + * IPA context specific to the GPI pipes a.k.a LAN IN/OUT and WAN + */ +struct ipa3_sys_context { + u32 len; + atomic_t curr_polling_state; + struct delayed_work switch_to_intr_work; + enum ipa3_sys_pipe_policy policy; + bool use_comm_evt_ring; + bool nop_pending; + int (*pyld_hdlr)(struct sk_buff *skb, struct ipa3_sys_context *sys); + struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags); + void (*free_skb)(struct sk_buff *skb); + void (*free_rx_wrapper)(struct ipa3_rx_pkt_wrapper *rk_pkt); + u32 rx_buff_sz; + u32 rx_pool_sz; + struct sk_buff *prev_skb; + unsigned int len_rem; + unsigned int len_pad; + unsigned int len_partial; + bool drop_packet; + struct work_struct work; + struct delayed_work replenish_rx_work; + struct work_struct repl_work; + void (*repl_hdlr)(struct ipa3_sys_context *sys); + struct ipa3_repl_ctx repl; + u32 pkt_sent; + struct napi_struct *napi_obj; + + /* ordering is important - mutable fields go above */ + struct ipa3_ep_context *ep; + struct list_head head_desc_list; + struct list_head rcycl_list; + spinlock_t spinlock; + struct hrtimer db_timer; + struct workqueue_struct *wq; + struct workqueue_struct *repl_wq; + struct ipa3_status_stats *status_stat; + u32 pm_hdl; + /* ordering is important - other immutable fields go below */ +}; + +/** + * enum ipa3_desc_type - IPA decriptors type + * + * IPA decriptors type, IPA supports DD and ICD but no CD + */ +enum ipa3_desc_type { + IPA_DATA_DESC, + IPA_DATA_DESC_SKB, + IPA_DATA_DESC_SKB_PAGED, + IPA_IMM_CMD_DESC, +}; + +/** + * struct ipa3_tx_pkt_wrapper - IPA Tx packet wrapper + * @type: specify if this packet is for the skb or immediate command + * @mem: memory buffer used by this Tx packet + * @work: work struct for current Tx packet + * @link: linked to the wrappers on that pipe + * @callback: IPA client provided callback + * @user1: cookie1 for above callback + * @user2: cookie2 for above callback + * @sys: corresponding IPA sys context + * @cnt: 1 for single transfers, + * >1 and <0xFFFF for first of a "multiple" transfer, + * 0xFFFF for last desc, 0 for rest of "multiple' transfer + * @bounce: va of bounce buffer + * @unmap_dma: in case this is true, the buffer will not be dma unmapped + * + * This struct can wrap both data packet and immediate command packet. + */ +struct ipa3_tx_pkt_wrapper { + enum ipa3_desc_type type; + struct ipa_mem_buffer mem; + struct work_struct work; + struct list_head link; + void (*callback)(void *user1, int user2); + void *user1; + int user2; + struct ipa3_sys_context *sys; + u32 cnt; + void *bounce; + bool no_unmap_dma; +}; + +/** + * struct ipa3_dma_xfer_wrapper - IPADMA transfer descr wrapper + * @phys_addr_src: physical address of the source data to copy + * @phys_addr_dest: physical address to store the copied data + * @len: len in bytes to copy + * @link: linked to the wrappers list on the proper(sync/async) cons pipe + * @xfer_done: completion object for sync_memcpy completion + * @callback: IPADMA client provided completion callback + * @user1: cookie1 for above callback + * + * This struct can wrap both sync and async memcpy transfers descriptors. + */ +struct ipa3_dma_xfer_wrapper { + u64 phys_addr_src; + u64 phys_addr_dest; + u16 len; + struct list_head link; + struct completion xfer_done; + void (*callback)(void *user1); + void *user1; +}; + +/** + * struct ipa3_desc - IPA descriptor + * @type: skb or immediate command or plain old data + * @pyld: points to skb + * @frag: points to paged fragment + * or kmalloc'ed immediate command parameters/plain old data + * @dma_address: dma mapped address of pyld + * @dma_address_valid: valid field for dma_address + * @is_tag_status: flag for IP_PACKET_TAG_STATUS imd cmd + * @len: length of the pyld + * @opcode: for immediate commands + * @callback: IPA client provided completion callback + * @user1: cookie1 for above callback + * @user2: cookie2 for above callback + * @xfer_done: completion object for sync completion + * @skip_db_ring: specifies whether GSI doorbell should not be rang + */ +struct ipa3_desc { + enum ipa3_desc_type type; + void *pyld; + skb_frag_t *frag; + dma_addr_t dma_address; + bool dma_address_valid; + bool is_tag_status; + u16 len; + u16 opcode; + void (*callback)(void *user1, int user2); + void *user1; + int user2; + struct completion xfer_done; + bool skip_db_ring; +}; + +/** + * struct ipa3_rx_pkt_wrapper - IPA Rx packet wrapper + * @skb: skb + * @dma_address: DMA address of this Rx packet + * @link: linked to the Rx packets on that pipe + * @len: how many bytes are copied into skb's flat buffer + */ +struct ipa3_rx_pkt_wrapper { + struct list_head link; + struct ipa_rx_data data; + u32 len; + struct work_struct work; + struct ipa3_sys_context *sys; +}; + +/** + * struct ipa_pdn_entry - IPA PDN config table entry + * @public_ip: the PDN's public ip + * @src_metadata: the PDN's metadata to be replaced for source NAT + * @dst_metadata: the PDN's metadata to be replaced for destination NAT + * @resrvd: reserved field + */ +struct ipa_pdn_entry { + u32 public_ip; + u32 src_metadata; + u32 dst_metadata; + u32 resrvd; +}; + +/** + * struct ipa3_nat_ipv6ct_tmp_mem - NAT/IPv6CT temporary memory + * + * In case NAT/IPv6CT table are destroyed the HW is provided with the + * temporary memory + * + * @vaddr: the address of the temporary memory + * @dma_handle: the handle of the temporary memory + */ +struct ipa3_nat_ipv6ct_tmp_mem { + void *vaddr; + dma_addr_t dma_handle; +}; + +/** + * struct ipa3_nat_ipv6ct_common_mem - IPA NAT/IPv6CT memory device + * @class: pointer to the struct class + * @dev: the dev_t of the device + * @cdev: cdev of the device + * @dev_num: device number + * @vaddr: the virtual address in the system memory + * @dma_handle: the system memory DMA handle + * @phys_mem_size: the physical size in the shared memory + * @smem_offset: the offset in the shared memory + * @size: memory size + * @is_mapped: flag indicating if memory is mapped + * @is_sys_mem: flag indicating if memory is sys memory + * @is_mem_allocated: flag indicating if the memory is allocated + * @is_hw_init: flag indicating if the corresponding HW is initialized + * @is_dev_init: flag indicating if device is initialized + * @lock: memory mutex + * @base_address: table virtual address + * @base_table_addr: base table address + * @expansion_table_addr: expansion table address + * @table_entries: num of entries in the base table + * @expn_table_entries: num of entries in the expansion table + * @tmp_mem: temporary memory used to always provide HW with a legal memory + * @name: the device name + */ +struct ipa3_nat_ipv6ct_common_mem { + struct class *class; + struct device *dev; + struct cdev cdev; + dev_t dev_num; + + /* system memory */ + void *vaddr; + dma_addr_t dma_handle; + + /* shared memory */ + u32 phys_mem_size; + u32 smem_offset; + + size_t size; + bool is_mapped; + bool is_sys_mem; + bool is_mem_allocated; + bool is_hw_init; + bool is_dev_init; + struct mutex lock; + void *base_address; + char *base_table_addr; + char *expansion_table_addr; + u32 table_entries; + u32 expn_table_entries; + struct ipa3_nat_ipv6ct_tmp_mem *tmp_mem; + char name[IPA_DEV_NAME_MAX_LEN]; +}; + +/** + * struct ipa3_nat_mem - IPA NAT memory description + * @dev: the memory device structure + * @index_table_addr: index table address + * @index_table_expansion_addr: index expansion table address + * @public_ip_addr: ip address of nat table + * @pdn_mem: pdn config table SW cache memory structure + */ +struct ipa3_nat_mem { + struct ipa3_nat_ipv6ct_common_mem dev; + char *index_table_addr; + char *index_table_expansion_addr; + u32 public_ip_addr; + struct ipa_mem_buffer pdn_mem; +}; + +/** + * struct ipa3_ipv6ct_mem - IPA IPv6 connection tracking memory description + * @dev: the memory device structure + */ +struct ipa3_ipv6ct_mem { + struct ipa3_nat_ipv6ct_common_mem dev; +}; + +/** + * enum ipa3_hw_mode - IPA hardware mode + * @IPA_HW_Normal: Regular IPA hardware + * @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation + * @IPA_HW_PCIE: IPA hardware supporting memory allocation over PCIE Bridge + * @IPA_HW_Emulation: IPA emulation hardware + */ +enum ipa3_hw_mode { + IPA_HW_MODE_NORMAL = 0, + IPA_HW_MODE_VIRTUAL = 1, + IPA_HW_MODE_PCIE = 2, + IPA_HW_MODE_EMULATION = 3, +}; + +enum ipa3_config_this_ep { + IPA_CONFIGURE_THIS_EP, + IPA_DO_NOT_CONFIGURE_THIS_EP, +}; + +struct ipa3_stats { + u32 tx_sw_pkts; + u32 tx_hw_pkts; + u32 rx_pkts; + u32 rx_excp_pkts[IPAHAL_PKT_STATUS_EXCEPTION_MAX]; + u32 rx_repl_repost; + u32 tx_pkts_compl; + u32 rx_q_len; + u32 msg_w[IPA_EVENT_MAX_NUM]; + u32 msg_r[IPA_EVENT_MAX_NUM]; + u32 stat_compl; + u32 aggr_close; + u32 wan_aggr_close; + u32 wan_rx_empty; + u32 wan_repl_rx_empty; + u32 lan_rx_empty; + u32 lan_repl_rx_empty; + u32 flow_enable; + u32 flow_disable; + u32 tx_non_linear; +}; + +struct ipa3_active_clients { + struct mutex mutex; + atomic_t cnt; + int bus_vote_idx; +}; + +struct ipa3_wakelock_ref_cnt { + spinlock_t spinlock; + int cnt; +}; + +struct ipa3_tag_completion { + struct completion comp; + atomic_t cnt; +}; + +struct ipa3_controller; + +/** + * struct ipa3_uc_hdlrs - IPA uC callback functions + * @ipa_uc_loaded_hdlr: Function handler when uC is loaded + * @ipa_uc_event_hdlr: Event handler function + * @ipa3_uc_response_hdlr: Response handler function + * @ipa_uc_event_log_info_hdlr: Log event handler function + */ +struct ipa3_uc_hdlrs { + void (*ipa_uc_loaded_hdlr)(void); + + void (*ipa_uc_event_hdlr) + (struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio); + + int (*ipa3_uc_response_hdlr) + (struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio, + u32 *uc_status); + + void (*ipa_uc_event_log_info_hdlr) + (struct IpaHwEventLogInfoData_t *uc_event_top_mmio); +}; + +/** + * enum ipa3_hw_flags - flags which defines the behavior of HW + * + * @IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE: Halt system in case of assert + * failure. + * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR: Channel error would be reported + * in the event ring only. No event to CPU. + * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP: No need to report event + * IPA_HW_2_CPU_EVENT_MHI_WAKE_UP_REQUEST + * @IPA_HW_FLAG_WORK_OVER_DDR: Perform all transaction to external addresses by + * QMB (avoid memcpy) + * @IPA_HW_FLAG_NO_REPORT_OOB: If set do not report that the device is OOB in + * IN Channel + * @IPA_HW_FLAG_NO_REPORT_DB_MODE: If set, do not report that the device is + * entering a mode where it expects a doorbell to be rung for OUT Channel + * @IPA_HW_FLAG_NO_START_OOB_TIMER + */ +enum ipa3_hw_flags { + IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE = 0x01, + IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR = 0x02, + IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP = 0x04, + IPA_HW_FLAG_WORK_OVER_DDR = 0x08, + IPA_HW_FLAG_NO_REPORT_OOB = 0x10, + IPA_HW_FLAG_NO_REPORT_DB_MODE = 0x20, + IPA_HW_FLAG_NO_START_OOB_TIMER = 0x40 +}; + +/** + * struct ipa3_uc_ctx - IPA uC context + * @uc_inited: Indicates if uC interface has been initialized + * @uc_loaded: Indicates if uC has loaded + * @uc_failed: Indicates if uC has failed / returned an error + * @uc_lock: uC interface lock to allow only one uC interaction at a time + * @uc_spinlock: same as uc_lock but for irq contexts + * @uc_completation: Completion mechanism to wait for uC commands + * @uc_sram_mmio: Pointer to uC mapped memory + * @pending_cmd: The last command sent waiting to be ACKed + * @uc_status: The last status provided by the uC + * @uc_error_type: error type from uC error event + * @uc_error_timestamp: tag timer sampled after uC crashed + */ +struct ipa3_uc_ctx { + bool uc_inited; + bool uc_loaded; + bool uc_failed; + struct mutex uc_lock; + spinlock_t uc_spinlock; + struct completion uc_completion; + struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio; + struct IpaHwEventLogInfoData_t *uc_event_top_mmio; + u32 uc_event_top_ofst; + u32 pending_cmd; + u32 uc_status; + u32 uc_error_type; + u32 uc_error_timestamp; + phys_addr_t rdy_ring_base_pa; + phys_addr_t rdy_ring_rp_pa; + u32 rdy_ring_size; + phys_addr_t rdy_comp_ring_base_pa; + phys_addr_t rdy_comp_ring_wp_pa; + u32 rdy_comp_ring_size; + u32 *rdy_ring_rp_va; + u32 *rdy_comp_ring_wp_va; +}; + +/** + * struct ipa3_uc_wdi_ctx + * @wdi_uc_top_ofst: + * @wdi_uc_top_mmio: + * @wdi_uc_stats_ofst: + * @wdi_uc_stats_mmio: + */ +struct ipa3_uc_wdi_ctx { + /* WDI specific fields */ + u32 wdi_uc_stats_ofst; + struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio; + void *priv; + ipa_uc_ready_cb uc_ready_cb; + /* for AP+STA stats update */ +#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN + ipa_wdi_meter_notifier_cb stats_notify; +#endif +}; + +/** + * struct ipa3_wdi2_ctx - IPA wdi2 context + */ +struct ipa3_wdi2_ctx { + phys_addr_t rdy_ring_base_pa; + phys_addr_t rdy_ring_rp_pa; + u32 rdy_ring_size; + phys_addr_t rdy_comp_ring_base_pa; + phys_addr_t rdy_comp_ring_wp_pa; + u32 rdy_comp_ring_size; + u32 *rdy_ring_rp_va; + u32 *rdy_comp_ring_wp_va; +}; +/** + * struct ipa3_transport_pm - transport power management related members + * @transport_pm_mutex: Mutex to protect the transport_pm functionality. + */ +struct ipa3_transport_pm { + atomic_t dec_clients; + atomic_t eot_activity; + struct mutex transport_pm_mutex; +}; + +/** + * struct ipa3cm_client_info - the client-info indicated from IPACM + * @ipacm_client_enum: the enum to indicate tether-client + * @ipacm_client_uplink: the bool to indicate pipe for uplink + */ +struct ipa3cm_client_info { + enum ipacm_client_enum client_enum; + bool uplink; +}; + +struct ipa3_smp2p_info { + u32 out_base_id; + u32 in_base_id; + bool ipa_clk_on; + bool res_sent; + unsigned int smem_bit; + struct qcom_smem_state *smem_state; +}; + +/** + * struct ipa3_ready_cb_info - A list of all the registrations + * for an indication of IPA driver readiness + * + * @link: linked list link + * @ready_cb: callback + * @user_data: User data + * + */ +struct ipa3_ready_cb_info { + struct list_head link; + ipa_ready_cb ready_cb; + void *user_data; +}; + +struct ipa_dma_task_info { + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_pyld *cmd_pyld; +}; + +struct ipa_quota_stats { + u64 num_ipv4_bytes; + u64 num_ipv6_bytes; + u32 num_ipv4_pkts; + u32 num_ipv6_pkts; +}; + +struct ipa_quota_stats_all { + struct ipa_quota_stats client[IPA_CLIENT_MAX]; +}; + +struct ipa_drop_stats { + u32 drop_packet_cnt; + u32 drop_byte_cnt; +}; + +struct ipa_drop_stats_all { + struct ipa_drop_stats client[IPA_CLIENT_MAX]; +}; + +struct ipa_hw_stats_quota { + struct ipahal_stats_init_quota init; + struct ipa_quota_stats_all stats; +}; + +struct ipa_hw_stats_teth { + struct ipahal_stats_init_tethering init; + struct ipa_quota_stats_all prod_stats_sum[IPA_CLIENT_MAX]; + struct ipa_quota_stats_all prod_stats[IPA_CLIENT_MAX]; +}; + +struct ipa_hw_stats_flt_rt { + struct ipahal_stats_init_flt_rt flt_v4_init; + struct ipahal_stats_init_flt_rt flt_v6_init; + struct ipahal_stats_init_flt_rt rt_v4_init; + struct ipahal_stats_init_flt_rt rt_v6_init; +}; + +struct ipa_hw_stats_drop { + struct ipahal_stats_init_drop init; + struct ipa_drop_stats_all stats; +}; + +struct ipa_hw_stats { + bool enabled; + struct ipa_hw_stats_quota quota; + struct ipa_hw_stats_teth teth; + struct ipa_hw_stats_flt_rt flt_rt; + struct ipa_hw_stats_drop drop; +}; + +struct ipa_cne_evt { + struct ipa_wan_msg wan_msg; + struct ipa_msg_meta msg_meta; +}; + +enum ipa_smmu_cb_type { + IPA_SMMU_CB_AP, + IPA_SMMU_CB_WLAN, + IPA_SMMU_CB_UC, + IPA_SMMU_CB_MAX +}; + +/** + * struct ipa3_char_device_context - IPA character device + * @class: pointer to the struct class + * @dev_num: device number + * @dev: the dev_t of the device + * @cdev: cdev of the device + */ +struct ipa3_char_device_context { + struct class *class; + dev_t dev_num; + struct device *dev; + struct cdev cdev; +}; + +/** + * struct ipa3_context - IPA context + * @cdev: cdev context + * @ep: list of all end points + * @skip_ep_cfg_shadow: state to update filter table correctly across + power-save + * @ep_flt_bitmap: End-points supporting filtering bitmap + * @ep_flt_num: End-points supporting filtering number + * @resume_on_connect: resume ep on ipa connect + * @flt_tbl: list of all IPA filter tables + * @flt_rule_ids: idr structure that holds the rule_id for each rule + * @mode: IPA operating mode + * @mmio: iomem + * @ipa_wrapper_base: IPA wrapper base address + * @ipa_wrapper_size: size of the memory pointed to by ipa_wrapper_base + * @hdr_tbl: IPA header table + * @hdr_proc_ctx_tbl: IPA processing context table + * @rt_tbl_set: list of routing tables each of which is a list of rules + * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped + * @flt_rule_cache: filter rule cache + * @rt_rule_cache: routing rule cache + * @hdr_cache: header cache + * @hdr_offset_cache: header offset cache + * @hdr_proc_ctx_cache: processing context cache + * @hdr_proc_ctx_offset_cache: processing context offset cache + * @rt_tbl_cache: routing table cache + * @tx_pkt_wrapper_cache: Tx packets cache + * @rx_pkt_wrapper_cache: Rx packets cache + * @rt_idx_bitmap: routing table index bitmap + * @lock: this does NOT protect the linked lists within ipa3_sys_context + * @smem_sz: shared memory size available for SW use starting + * from non-restricted bytes + * @smem_restricted_bytes: the bytes that SW should not use in the shared mem + * @nat_mem: NAT memory + * @ipv6ct_mem: IPv6CT memory + * @excp_hdr_hdl: exception header handle + * @dflt_v4_rt_rule_hdl: default v4 routing rule handle + * @dflt_v6_rt_rule_hdl: default v6 routing rule handle + * @aggregation_type: aggregation type used on USB client endpoint + * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint + * @aggregation_time_limit: aggregation time limit used on USB client endpoint + * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system + * @hdr_proc_ctx_tbl_lcl: where proc_ctx tbl resides true-local, false-system + * @hdr_mem: header memory + * @hdr_proc_ctx_mem: processing context memory + * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system + * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system + * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system + * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system + * @power_mgmt_wq: workqueue for power management + * @transport_power_mgmt_wq: workqueue transport related power management + * @tag_process_before_gating: indicates whether to start tag process before + * gating IPA clocks + * @transport_pm: transport power management related information + * @disconnect_lock: protects LAN_CONS packet receive notification CB + * @ipa3_active_clients: structure for reference counting connected IPA clients + * @ipa_hw_type: type of IPA HW type (e.g. IPA 1.0, IPA 1.1 etc') + * @ipa3_hw_mode: mode of IPA HW mode (e.g. Normal, Virtual or over PCIe) + * @use_ipa_teth_bridge: use tethering bridge driver + * @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules + * @logbuf: ipc log buffer for high priority messages + * @logbuf_low: ipc log buffer for low priority messages + * @ipa_wdi2: using wdi-2.0 + * @ipa_fltrt_not_hashable: filter/route rules not hashable + * @use_64_bit_dma_mask: using 64bits dma mask + * @ipa_bus_hdl: msm driver handle for the data path bus + * @ctrl: holds the core specific operations based on + * core version (vtable like) + * @pkt_init_imm_opcode: opcode for IP_PACKET_INIT imm cmd + * @enable_clock_scaling: clock scaling is enabled ? + * @curr_ipa_clk_rate: IPA current clock rate + * @wcstats: wlan common buffer stats + * @uc_ctx: uC interface context + * @uc_wdi_ctx: WDI specific fields for uC interface + * @ipa_num_pipes: The number of pipes used by IPA HW + * @skip_uc_pipe_reset: Indicates whether pipe reset via uC needs to be avoided + * @ipa_client_apps_wan_cons_agg_gro: RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA + * @apply_rg10_wa: Indicates whether to use register group 10 workaround + * @gsi_ch20_wa: Indicates whether to apply GSI physical channel 20 workaround + * @w_lock: Indicates the wakeup source. + * @wakelock_ref_cnt: Indicates the number of times wakelock is acquired + * @ipa_initialization_complete: Indicates that IPA is fully initialized + * @ipa_ready_cb_list: A list of all the clients who require a CB when IPA + * driver is ready/initialized. + * @init_completion_obj: Completion object to be used in case IPA driver hasn't + * @mhi_evid_limits: MHI event rings start and end ids + * finished initializing. Example of use - IOCTLs to /dev/ipa + * IPA context - holds all relevant info about IPA driver and its state + */ +struct ipa3_context { + struct ipa3_char_device_context cdev; + struct ipa3_ep_context ep[IPA3_MAX_NUM_PIPES]; + bool skip_ep_cfg_shadow[IPA3_MAX_NUM_PIPES]; + u32 ep_flt_bitmap; + u32 ep_flt_num; + bool resume_on_connect[IPA_CLIENT_MAX]; + struct ipa3_flt_tbl flt_tbl[IPA3_MAX_NUM_PIPES][IPA_IP_MAX]; + struct idr flt_rule_ids[IPA_IP_MAX]; + void __iomem *mmio; + u32 ipa_wrapper_base; + u32 ipa_wrapper_size; + struct ipa3_hdr_tbl hdr_tbl; + struct ipa3_hdr_proc_ctx_tbl hdr_proc_ctx_tbl; + struct ipa3_rt_tbl_set rt_tbl_set[IPA_IP_MAX]; + struct ipa3_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX]; + struct kmem_cache *flt_rule_cache; + struct kmem_cache *rt_rule_cache; + struct kmem_cache *hdr_cache; + struct kmem_cache *hdr_offset_cache; + struct kmem_cache *hdr_proc_ctx_cache; + struct kmem_cache *hdr_proc_ctx_offset_cache; + struct kmem_cache *rt_tbl_cache; + struct kmem_cache *tx_pkt_wrapper_cache; + struct kmem_cache *rx_pkt_wrapper_cache; + unsigned long rt_idx_bitmap[IPA_IP_MAX]; + struct mutex lock; + u16 smem_sz; + u16 smem_restricted_bytes; + u16 smem_reqd_sz; + struct ipa3_nat_mem nat_mem; + struct ipa3_ipv6ct_mem ipv6ct_mem; + u32 excp_hdr_hdl; + u32 dflt_v4_rt_rule_hdl; + u32 dflt_v6_rt_rule_hdl; + uint aggregation_type; + uint aggregation_byte_limit; + uint aggregation_time_limit; + bool hdr_tbl_lcl; + bool hdr_proc_ctx_tbl_lcl; + struct ipa_mem_buffer hdr_mem; + struct ipa_mem_buffer hdr_proc_ctx_mem; + bool ip4_rt_tbl_hash_lcl; + bool ip4_rt_tbl_nhash_lcl; + bool ip6_rt_tbl_hash_lcl; + bool ip6_rt_tbl_nhash_lcl; + bool ip4_flt_tbl_hash_lcl; + bool ip4_flt_tbl_nhash_lcl; + bool ip6_flt_tbl_hash_lcl; + bool ip6_flt_tbl_nhash_lcl; + struct ipa3_active_clients ipa3_active_clients; + struct ipa3_active_clients_log_ctx ipa3_active_clients_logging; + struct workqueue_struct *power_mgmt_wq; + struct workqueue_struct *transport_power_mgmt_wq; + bool tag_process_before_gating; + struct ipa3_transport_pm transport_pm; + unsigned long gsi_evt_comm_hdl; + u32 gsi_evt_comm_ring_rem; + u32 clnt_hdl_cmd; + u32 clnt_hdl_data_in; + u32 clnt_hdl_data_out; + spinlock_t disconnect_lock; + u8 a5_pipe_index; + struct list_head intf_list; + struct list_head msg_list; + struct list_head pull_msg_list; + struct mutex msg_lock; + struct list_head msg_wlan_client_list; + struct mutex msg_wlan_client_lock; + wait_queue_head_t msg_waitq; + enum ipa_hw_type ipa_hw_type; + enum ipa3_hw_mode ipa3_hw_mode; + bool ipa_config_is_mhi; + bool use_ipa_teth_bridge; + bool modem_cfg_emb_pipe_flt; + bool ipa_wdi2; + bool ipa_wdi2_over_gsi; + bool ipa_fltrt_not_hashable; + bool use_64_bit_dma_mask; + /* featurize if memory footprint becomes a concern */ + struct ipa3_stats stats; + void *smem_pipe_mem; + void *logbuf; + void *logbuf_low; + u32 ipa_bus_hdl; + struct ipa3_controller *ctrl; + struct idr ipa_idr; + struct platform_device *master_pdev; + struct device *pdev; + struct device *uc_pdev; + spinlock_t idr_lock; + u32 enable_clock_scaling; + u32 enable_napi_chain; + u32 curr_ipa_clk_rate; + bool q6_proxy_clk_vote_valid; + struct mutex q6_proxy_clk_vote_mutex; + u32 q6_proxy_clk_vote_cnt; + u32 ipa_num_pipes; + dma_addr_t pkt_init_imm[IPA3_MAX_NUM_PIPES]; + u32 pkt_init_imm_opcode; + + struct ipa3_wlan_comm_memb wc_memb; + + struct ipa3_uc_ctx uc_ctx; + + struct ipa3_uc_wdi_ctx uc_wdi_ctx; + struct ipa3_uc_ntn_ctx uc_ntn_ctx; + u32 wan_rx_ring_size; + u32 lan_rx_ring_size; + bool skip_uc_pipe_reset; + unsigned long gsi_dev_hdl; + u32 ee; + bool apply_rg10_wa; + bool gsi_ch20_wa; + bool s1_bypass_arr[IPA_SMMU_CB_MAX]; + u32 wdi_map_cnt; + struct wakeup_source w_lock; + struct ipa3_wakelock_ref_cnt wakelock_ref_cnt; + /* RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA */ + bool ipa_client_apps_wan_cons_agg_gro; + /* M-release support to know client pipes */ + struct ipa3cm_client_info ipacm_client[IPA3_MAX_NUM_PIPES]; + bool tethered_flow_control; + bool ipa_initialization_complete; + struct list_head ipa_ready_cb_list; + struct completion init_completion_obj; + struct completion uc_loaded_completion_obj; + struct ipa3_smp2p_info smp2p_info; + u32 mhi_evid_limits[2]; /* start and end values */ + u32 ipa_tz_unlock_reg_num; + struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg; + struct ipa_dma_task_info dma_task_info; + struct ipa_hw_stats hw_stats; + struct ipa_cne_evt ipa_cne_evt_req_cache[IPA_MAX_NUM_REQ_CACHE]; + int num_ipa_cne_evt_req; + struct mutex ipa_cne_evt_lock; + bool use_ipa_pm; + bool vlan_mode_iface[IPA_VLAN_IF_MAX]; + bool wdi_over_pcie; + struct ipa3_wdi2_ctx wdi2_ctx; + struct mbox_client mbox_client; + struct mbox_chan *mbox; +}; + +struct ipa3_plat_drv_res { + bool use_ipa_teth_bridge; + u32 ipa_mem_base; + u32 ipa_mem_size; + u32 transport_mem_base; + u32 transport_mem_size; + u32 emulator_intcntrlr_mem_base; + u32 emulator_intcntrlr_mem_size; + u32 emulator_irq; + u32 ipa_irq; + u32 transport_irq; + u32 ipa_pipe_mem_start_ofst; + u32 ipa_pipe_mem_size; + enum ipa_hw_type ipa_hw_type; + enum ipa3_hw_mode ipa3_hw_mode; + u32 ee; + bool modem_cfg_emb_pipe_flt; + bool ipa_wdi2; + bool ipa_wdi2_over_gsi; + bool ipa_fltrt_not_hashable; + bool use_64_bit_dma_mask; + bool use_bw_vote; + u32 wan_rx_ring_size; + u32 lan_rx_ring_size; + bool skip_uc_pipe_reset; + bool apply_rg10_wa; + bool gsi_ch20_wa; + bool tethered_flow_control; + u32 mhi_evid_limits[2]; /* start and end values */ + bool ipa_mhi_dynamic_config; + u32 ipa_tz_unlock_reg_num; + struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg; + bool use_ipa_pm; + struct ipa_pm_init_params pm_init; + bool wdi_over_pcie; +}; + +/** + * struct ipa3_mem_partition - represents IPA RAM Map as read from DTS + * Order and type of members should not be changed without a suitable change + * to DTS file or the code that reads it. + * + * IPA SRAM memory layout: + * +-------------------------+ + * | UC MEM | + * +-------------------------+ + * | UC INFO | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V4 FLT HDR HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V4 FLT HDR NON-HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V6 FLT HDR HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V6 FLT HDR NON-HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V4 RT HDR HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V4 RT HDR NON-HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V6 RT HDR HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V6 RT HDR NON-HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | MODEM HDR | + * +-------------------------+ + * | APPS HDR (IPA4.5) | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | MODEM PROC CTX | + * +-------------------------+ + * | APPS PROC CTX | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY (IPA4.5) | + * +-------------------------+ + * | CANARY (IPA4.5) | + * +-------------------------+ + * | NAT TABLE (IPA4.5) | + * +-------------------------+ + * | NAT IDX TABLE (IPA4.5) | + * +-------------------------+ + * | NAT EXP TABLE (IPA4.5) | + * +-------------------------+ + * | CANARY (IPA4.5) | + * +-------------------------+ + * | CANARY (IPA4.5) | + * +-------------------------+ + * | PDN CONFIG | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | QUOTA STATS | + * +-------------------------+ + * | TETH STATS | + * +-------------------------+ + * | FnR STATS | + * +-------------------------+ + * | DROP STATS | + * +-------------------------+ + * | CANARY (IPA4.5) | + * +-------------------------+ + * | CANARY (IPA4.5) | + * +-------------------------+ + * | MODEM MEM | + * +-------------------------+ + * | Dummy (IPA4.5) | + * +-------------------------+ + * | CANARY (IPA4.5) | + * +-------------------------+ + * | UC DESC RAM (IPA3.5) | + * +-------------------------+ + */ +struct ipa3_mem_partition { + u32 ofst_start; + u32 v4_flt_hash_ofst; + u32 v4_flt_hash_size; + u32 v4_flt_hash_size_ddr; + u32 v4_flt_nhash_ofst; + u32 v4_flt_nhash_size; + u32 v4_flt_nhash_size_ddr; + u32 v6_flt_hash_ofst; + u32 v6_flt_hash_size; + u32 v6_flt_hash_size_ddr; + u32 v6_flt_nhash_ofst; + u32 v6_flt_nhash_size; + u32 v6_flt_nhash_size_ddr; + u32 v4_rt_num_index; + u32 v4_modem_rt_index_lo; + u32 v4_modem_rt_index_hi; + u32 v4_apps_rt_index_lo; + u32 v4_apps_rt_index_hi; + u32 v4_rt_hash_ofst; + u32 v4_rt_hash_size; + u32 v4_rt_hash_size_ddr; + u32 v4_rt_nhash_ofst; + u32 v4_rt_nhash_size; + u32 v4_rt_nhash_size_ddr; + u32 v6_rt_num_index; + u32 v6_modem_rt_index_lo; + u32 v6_modem_rt_index_hi; + u32 v6_apps_rt_index_lo; + u32 v6_apps_rt_index_hi; + u32 v6_rt_hash_ofst; + u32 v6_rt_hash_size; + u32 v6_rt_hash_size_ddr; + u32 v6_rt_nhash_ofst; + u32 v6_rt_nhash_size; + u32 v6_rt_nhash_size_ddr; + u32 modem_hdr_ofst; + u32 modem_hdr_size; + u32 apps_hdr_ofst; + u32 apps_hdr_size; + u32 apps_hdr_size_ddr; + u32 modem_hdr_proc_ctx_ofst; + u32 modem_hdr_proc_ctx_size; + u32 apps_hdr_proc_ctx_ofst; + u32 apps_hdr_proc_ctx_size; + u32 apps_hdr_proc_ctx_size_ddr; + u32 nat_tbl_ofst; + u32 nat_tbl_size; + u32 nat_index_tbl_ofst; + u32 nat_index_tbl_size; + u32 nat_exp_tbl_ofst; + u32 nat_exp_tbl_size; + u32 modem_comp_decomp_ofst; + u32 modem_comp_decomp_size; + u32 modem_ofst; + u32 modem_size; + u32 apps_v4_flt_hash_ofst; + u32 apps_v4_flt_hash_size; + u32 apps_v4_flt_nhash_ofst; + u32 apps_v4_flt_nhash_size; + u32 apps_v6_flt_hash_ofst; + u32 apps_v6_flt_hash_size; + u32 apps_v6_flt_nhash_ofst; + u32 apps_v6_flt_nhash_size; + u32 uc_info_ofst; + u32 uc_info_size; + u32 end_ofst; + u32 apps_v4_rt_hash_ofst; + u32 apps_v4_rt_hash_size; + u32 apps_v4_rt_nhash_ofst; + u32 apps_v4_rt_nhash_size; + u32 apps_v6_rt_hash_ofst; + u32 apps_v6_rt_hash_size; + u32 apps_v6_rt_nhash_ofst; + u32 apps_v6_rt_nhash_size; + u32 uc_descriptor_ram_ofst; + u32 uc_descriptor_ram_size; + u32 pdn_config_ofst; + u32 pdn_config_size; + u32 stats_quota_ofst; + u32 stats_quota_size; + u32 stats_tethering_ofst; + u32 stats_tethering_size; + u32 stats_fnr_ofst; + u32 stats_fnr_size; + + /* Irrelevant starting IPA4.5 */ + u32 stats_flt_v4_ofst; + u32 stats_flt_v4_size; + u32 stats_flt_v6_ofst; + u32 stats_flt_v6_size; + u32 stats_rt_v4_ofst; + u32 stats_rt_v4_size; + u32 stats_rt_v6_ofst; + u32 stats_rt_v6_size; + + u32 stats_drop_ofst; + u32 stats_drop_size; +}; + +struct ipa3_controller { + struct ipa3_mem_partition *mem_partition; + u32 ipa_clk_rate_turbo; + u32 ipa_clk_rate_nominal; + u32 ipa_clk_rate_svs; + u32 ipa_clk_rate_svs2; + u32 clock_scaling_bw_threshold_turbo; + u32 clock_scaling_bw_threshold_nominal; + u32 clock_scaling_bw_threshold_svs; + u32 ipa_reg_base_ofst; + u32 max_holb_tmr_val; + void (*ipa_sram_read_settings)(void); + int (*ipa_init_sram)(void); + int (*ipa_init_hdr)(void); + int (*ipa_init_rt4)(void); + int (*ipa_init_rt6)(void); + int (*ipa_init_flt4)(void); + int (*ipa_init_flt6)(void); + int (*ipa3_read_ep_reg)(char *buff, int max_len, int pipe); + int (*ipa3_commit_flt)(enum ipa_ip_type ip); + int (*ipa3_commit_rt)(enum ipa_ip_type ip); + int (*ipa3_commit_hdr)(void); + void (*ipa3_enable_clks)(void); + void (*ipa3_disable_clks)(void); + struct msm_bus_scale_pdata *msm_bus_data_ptr; +}; + +extern struct ipa3_context *ipa3_ctx; + +/* public APIs */ +/* Generic GSI channels functions */ +int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params, + struct ipa_req_chan_out_params *out_params); + +int ipa3_release_gsi_channel(u32 clnt_hdl); + +int ipa3_start_gsi_channel(u32 clnt_hdl); + +int ipa3_stop_gsi_channel(u32 clnt_hdl); + +int ipa3_reset_gsi_channel(u32 clnt_hdl); + +int ipa3_reset_gsi_event_ring(u32 clnt_hdl); + +/* Specific xDCI channels functions */ +int ipa3_set_usb_max_packet_size( + enum ipa_usb_max_usb_packet_size usb_max_packet_size); + +int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid); + +int ipa3_xdci_connect(u32 clnt_hdl); + +int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id); + +void ipa3_xdci_ep_delay_rm(u32 clnt_hdl); +void ipa3_register_lock_unlock_callback(int (*client_cb)(bool), u32 ipa_ep_idx); +void ipa3_deregister_lock_unlock_callback(u32 ipa_ep_idx); +int ipa3_set_reset_client_prod_pipe_delay(bool set_reset, + enum ipa_client_type client); +int ipa3_set_reset_client_cons_pipe_sus_holb(bool set_reset, + enum ipa_client_type client); + +int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + bool should_force_clear, u32 qmi_req_id, bool is_dpl); + +int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl); + +/* + * Remove ep delay + */ +int ipa3_clear_endpoint_delay(u32 clnt_hdl); + +/* + * Configuration + */ +int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg); + +int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg); + +int ipa3_cfg_ep_conn_track(u32 clnt_hdl, + const struct ipa_ep_cfg_conn_track *ep_conn_track); + +int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg); + +int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg); + +int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg); + +int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg); + +int ipa3_cfg_ep_deaggr(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ipa_ep_cfg); + +int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg); + +int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg); + +void ipa3_cal_ep_holb_scale_base_val(u32 tmr_val, + struct ipa_ep_cfg_holb *ep_holb); + +int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg); + +int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg); + +int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ipa_ep_cfg); + +int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl); + +/* + * Header removal / addition + */ +int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs); + +int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool by_user); + +int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls); + +int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user); + +int ipa3_commit_hdr(void); + +int ipa3_reset_hdr(bool user_only); + +int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup); + +int ipa3_put_hdr(u32 hdr_hdl); + +int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy); + +/* + * Header Processing Context + */ +int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only); + +int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); + +int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls, + bool by_user); + +/* + * Routing + */ +int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules); + +int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, + bool user_only); + +int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules); + +int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules); + +int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls); + +int ipa3_commit_rt(enum ipa_ip_type ip); + +int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only); + +int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup); + +int ipa3_put_rt_tbl(u32 rt_tbl_hdl); + +int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in); + +int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules); + +/* + * Filtering + */ +int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules); + +int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, + bool user_only); + +int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules); + +int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls); + +int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules); + +int ipa3_commit_flt(enum ipa_ip_type ip); + +int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only); + +/* + * NAT + */ +int ipa3_nat_ipv6ct_init_devices(void); +void ipa3_nat_ipv6ct_destroy_devices(void); + +int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem); +int ipa3_allocate_nat_table( + struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc); +int ipa3_allocate_ipv6ct_table( + struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc); + +int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init); +int ipa3_ipv6ct_init_cmd(struct ipa_ioc_ipv6ct_init *init); + +int ipa3_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma); +int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma); + +int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del); +int ipa3_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del); +int ipa3_del_ipv6ct_table(struct ipa_ioc_nat_ipv6ct_table_del *del); + +int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn); + +/* + * Messaging + */ +int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback); +int ipa3_resend_wlan_msg(void); +int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback); +int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta); + +/* + * Interface + */ +int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx); +int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext); +int ipa3_deregister_intf(const char *name); + +/* + * Aggregation + */ +int ipa3_set_aggr_mode(enum ipa_aggr_mode mode); + +int ipa3_set_qcncm_ndp_sig(char sig[3]); + +int ipa3_set_single_ndp_per_mbim(bool enable); + +/* + * Data path + */ +int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *metadata); + +/* + * To transfer multiple data packets + * While passing the data descriptor list, the anchor node + * should be of type struct ipa_tx_data_desc not list_head + */ +int ipa3_tx_dp_mul(enum ipa_client_type dst, + struct ipa_tx_data_desc *data_desc); + +void ipa3_free_skb(struct ipa_rx_data *data); + +/* + * System pipes + */ +int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl); + +int ipa3_teardown_sys_pipe(u32 clnt_hdl); + +int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in, + unsigned long *ipa_transport_hdl, + u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status); + +int ipa3_sys_teardown(u32 clnt_hdl); + +int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl, + unsigned long gsi_ev_hdl); + +int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out); +int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out); + +int ipa3_disconnect_wdi_pipe(u32 clnt_hdl); +int ipa3_enable_wdi_pipe(u32 clnt_hdl); +int ipa3_enable_gsi_wdi_pipe(u32 clnt_hdl); +int ipa3_disable_wdi_pipe(u32 clnt_hdl); +int ipa3_disable_gsi_wdi_pipe(u32 clnt_hdl); +int ipa3_disconnect_gsi_wdi_pipe(u32 clnt_hdl); +int ipa3_resume_wdi_pipe(u32 clnt_hdl); +int ipa3_resume_gsi_wdi_pipe(u32 clnt_hdl); +int ipa3_suspend_wdi_pipe(u32 clnt_hdl); +int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats); +u16 ipa3_get_smem_restr_bytes(void); +int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes); +int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp); +int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl, + struct ipa_ntn_conn_in_params *params); +int ipa3_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv); +void ipa3_ntn_uc_dereg_rdyCB(void); +int ipa3_conn_wdi3_pipes(struct ipa_wdi_conn_in_params *in, + struct ipa_wdi_conn_out_params *out, + ipa_wdi_meter_notifier_cb wdi_notify); +int ipa3_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx); +int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx); +int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx); + +/* + * To retrieve doorbell physical address of + * wlan pipes + */ +int ipa3_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out); + +/* + * To register uC ready callback if uC not ready + * and also check uC readiness + * if uC not ready only, register callback + */ +int ipa3_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param); +/* + * To de-register uC ready callback + */ +int ipa3_uc_dereg_rdyCB(void); + +int ipa_create_uc_smmu_mapping(int res_idx, bool wlan_smmu_en, + phys_addr_t pa, struct sg_table *sgt, size_t len, bool device, + unsigned long *iova); + +/* + * Tethering bridge (Rmnet / MBIM) + */ +int ipa3_teth_bridge_init(struct teth_bridge_init_params *params); + +int ipa3_teth_bridge_disconnect(enum ipa_client_type client); + +int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params); + +/* + * Tethering client info + */ +void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink); + +enum ipacm_client_enum ipa3_get_client(int pipe_idx); + +bool ipa3_get_client_uplink(int pipe_idx); + +int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats); + +int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota); +/* + * IPADMA + */ +int ipa3_dma_init(void); + +int ipa3_dma_enable(void); + +int ipa3_dma_disable(void); + +int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len); + +int ipa3_dma_async_memcpy(u64 dest, u64 src, int len, + void (*user_cb)(void *user1), void *user_param); + +int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len); + +void ipa3_dma_destroy(void); + +/* + * MHI + */ + +int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params); + +int ipa3_connect_mhi_pipe( + struct ipa_mhi_connect_params_internal *in, + u32 *clnt_hdl); + +int ipa3_disconnect_mhi_pipe(u32 clnt_hdl); + +bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client); + +int ipa3_mhi_reset_channel_internal(enum ipa_client_type client); + +int ipa3_mhi_start_channel_internal(enum ipa_client_type client); + +bool ipa3_has_open_aggr_frame(enum ipa_client_type client); + +int ipa3_mhi_resume_channels_internal(enum ipa_client_type client, + bool LPTransitionRejected, bool brstmode_enabled, + union __packed gsi_channel_scratch ch_scratch, u8 index); + +int ipa3_mhi_destroy_channel(enum ipa_client_type client); + +/* + * mux id + */ +int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in); + +/* + * interrupts + */ +int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data); + +int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt); + +/* + * Miscellaneous + */ +int ipa3_get_ep_mapping(enum ipa_client_type client); + +bool ipa3_is_ready(void); + +void ipa3_proxy_clk_vote(void); +void ipa3_proxy_clk_unvote(void); + +bool ipa3_is_client_handle_valid(u32 clnt_hdl); + +enum ipa_client_type ipa3_get_client_mapping(int pipe_idx); +enum ipa_client_type ipa3_get_client_by_pipe(int pipe_idx); + +void ipa_init_ep_flt_bitmap(void); + +bool ipa_is_ep_support_flt(int pipe_idx); + +enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx); + +bool ipa3_get_modem_cfg_emb_pipe_flt(void); + +u8 ipa3_get_qmb_master_sel(enum ipa_client_type client); + +int ipa3_get_smmu_params(struct ipa_smmu_in_params *in, + struct ipa_smmu_out_params *out); + +/* internal functions */ + +int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type, + struct ipa_api_controller *api_ctrl); + +bool ipa_is_modem_pipe(int pipe_idx); + +int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc, + bool in_atomic); +int ipa3_send(struct ipa3_sys_context *sys, + u32 num_desc, + struct ipa3_desc *desc, + bool in_atomic); +int ipa3_get_ep_mapping(enum ipa_client_type client); +int ipa_get_ep_group(enum ipa_client_type client); + +int ipa3_generate_hw_rule(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + u8 **buf, + u16 *en_rule); +int ipa3_init_hw(void); +struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name); +int ipa3_set_single_ndp_per_mbim(bool enable); +void ipa3_debugfs_init(void); +void ipa3_debugfs_remove(void); + +void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size); +#ifdef IPA_DEBUG +#define IPA_DUMP_BUFF(base, phy_base, size) \ + ipa3_dump_buff_internal(base, phy_base, size) +#else +#define IPA_DUMP_BUFF(base, phy_base, size) +#endif +int ipa3_init_mem_partition(enum ipa_hw_type ipa_hw_type); +int ipa3_controller_static_bind(struct ipa3_controller *controller, + enum ipa_hw_type ipa_hw_type); +int ipa3_cfg_route(struct ipahal_reg_route *route); +int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout); +int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr); +int ipa3_cfg_filter(u32 disable); +int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary); +struct ipa3_context *ipa3_get_ctx(void); +void ipa3_enable_clks(void); +void ipa3_disable_clks(void); +void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id); +int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info + *id); +void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id); +void ipa3_dec_client_disable_clks_no_block( + struct ipa_active_client_logging_info *id); +void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id, + bool int_ctx); +void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id, + bool int_ctx); +int ipa3_active_clients_log_print_buffer(char *buf, int size); +int ipa3_active_clients_log_print_table(char *buf, int size); +void ipa3_active_clients_log_clear(void); +int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev); +void ipa3_interrupts_destroy(u32 ipa_irq, struct device *ipa_dev); +int __ipa3_del_rt_rule(u32 rule_hdl); +int __ipa3_del_hdr(u32 hdr_hdl, bool by_user); +int __ipa3_release_hdr(u32 hdr_hdl); +int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl); +int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe); +int _ipa_read_ep_reg_v4_0(char *buf, int max_len, int pipe); +int _ipa_read_ipahal_regs(void); +void _ipa_enable_clks_v3_0(void); +void _ipa_disable_clks_v3_0(void); +struct device *ipa3_get_dma_dev(void); +void ipa3_suspend_active_aggr_wa(u32 clnt_hdl); +void ipa3_suspend_handler(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data); + +ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos); +int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count); +int ipa3_query_intf(struct ipa_ioc_query_intf *lookup); +int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx); +int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx); +int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext); + +void wwan_cleanup(void); + +int ipa3_teth_bridge_driver_init(void); +void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data); + +int _ipa_init_sram_v3(void); +int _ipa_init_hdr_v3_0(void); +int _ipa_init_rt4_v3(void); +int _ipa_init_rt6_v3(void); +int _ipa_init_flt4_v3(void); +int _ipa_init_flt6_v3(void); + +int __ipa_commit_flt_v3(enum ipa_ip_type ip); +int __ipa_commit_rt_v3(enum ipa_ip_type ip); + +int __ipa_commit_hdr_v3_0(void); +void ipa3_skb_recycle(struct sk_buff *skb); +void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx); +void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx); + +int ipa3_enable_data_path(u32 clnt_hdl); +int ipa3_disable_data_path(u32 clnt_hdl); +int ipa3_disable_gsi_data_path(u32 clnt_hdl); +int ipa3_alloc_rule_id(struct idr *rule_ids); +int ipa3_id_alloc(void *ptr); +void *ipa3_id_find(u32 id); +void ipa3_id_remove(u32 id); +int ipa3_enable_force_clear(u32 request_id, bool throttle_source, + u32 source_pipe_bitmask); +int ipa3_disable_force_clear(u32 request_id); + +int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage, + u32 bandwidth_mbps); + +int ipa3_cfg_ep_status(u32 clnt_hdl, + const struct ipahal_reg_ep_cfg_status *ipa_ep_cfg); + +int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name name); +int ipa3_suspend_resource_sync(enum ipa_rm_resource_name name); +int ipa3_resume_resource(enum ipa_rm_resource_name name); +bool ipa3_should_pipe_be_suspended(enum ipa_client_type client); +int ipa3_tag_aggr_force_close(int pipe_num); + +void ipa3_active_clients_unlock(void); +int ipa3_wdi_init(void); +int ipa3_write_qmapid_gsi_wdi_pipe(u32 clnt_hdl, u8 qmap_id); +int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id); +int ipa3_tag_process(struct ipa3_desc *desc, int num_descs, + unsigned long timeout); + +void ipa3_q6_pre_shutdown_cleanup(void); +void ipa3_q6_post_shutdown_cleanup(void); +int ipa3_init_q6_smem(void); + +int ipa3_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req); +int ipa3_mhi_query_ch_info(enum ipa_client_type client, + struct gsi_chan_info *ch_info); + +int ipa3_uc_interface_init(void); +int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client); +int ipa3_uc_state_check(void); +int ipa3_uc_loaded_check(void); +void ipa3_uc_load_notify(void); +int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status, + bool polling_mode, unsigned long timeout_jiffies); +void ipa3_uc_register_handlers(enum ipa3_hw_features feature, + struct ipa3_uc_hdlrs *hdlrs); +int ipa3_uc_notify_clk_state(bool enabled); +int ipa3_dma_setup(void); +void ipa3_dma_shutdown(void); +void ipa3_dma_async_memcpy_notify_cb(void *priv, + enum ipa_dp_evt_type evt, unsigned long data); + +int ipa3_uc_update_hw_flags(u32 flags); + +int ipa3_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void)); +void ipa3_uc_mhi_cleanup(void); +int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd); +int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr, + u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx, + u32 first_evt_idx); +int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle, + int contexArrayIndex, int channelDirection); +int ipa3_uc_mhi_reset_channel(int channelHandle); +int ipa3_uc_mhi_suspend_channel(int channelHandle); +int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected); +int ipa3_uc_mhi_stop_event_update_channel(int channelHandle); +int ipa3_uc_mhi_print_stats(char *dbg_buff, int size); +int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len); +int ipa3_uc_send_remote_ipa_info(u32 remote_addr, uint32_t mbox_n); +void ipa3_tag_destroy_imm(void *user1, int user2); +const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info + (enum ipa_client_type client); +void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val); + +/* Hardware stats */ + +#define IPA_STATS_MAX_PIPE_BIT 32 + +struct ipa_teth_stats_endpoints { + u32 prod_mask; + u32 dst_ep_mask[IPA_STATS_MAX_PIPE_BIT]; +}; + +struct ipa_flt_rt_stats { + u32 num_pkts; + u32 num_pkts_hash; +}; + +int ipa_hw_stats_init(void); + +int ipa_debugfs_init_stats(struct dentry *parent); + +int ipa_init_quota_stats(u32 pipe_bitmask); + +int ipa_get_quota_stats(struct ipa_quota_stats_all *out); + +int ipa_reset_quota_stats(enum ipa_client_type client); + +int ipa_reset_all_quota_stats(void); + +int ipa_init_drop_stats(u32 pipe_bitmask); + +int ipa_get_drop_stats(struct ipa_drop_stats_all *out); + +int ipa_reset_drop_stats(enum ipa_client_type client); + +int ipa_reset_all_drop_stats(void); + +int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in); + +int ipa_get_teth_stats(void); + +int ipa_query_teth_stats(enum ipa_client_type prod, + struct ipa_quota_stats_all *out, bool reset); + +int ipa_reset_teth_stats(enum ipa_client_type prod, enum ipa_client_type cons); + +int ipa_reset_all_cons_teth_stats(enum ipa_client_type prod); + +int ipa_reset_all_teth_stats(void); + +int ipa_flt_rt_stats_add_rule_id(enum ipa_ip_type ip, bool filtering, + u16 rule_id); + +int ipa_flt_rt_stats_start(enum ipa_ip_type ip, bool filtering); + +int ipa_flt_rt_stats_clear_rule_ids(enum ipa_ip_type ip, bool filtering); + +int ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id, + struct ipa_flt_rt_stats *out); + +int ipa_reset_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id); + +int ipa_reset_all_flt_rt_stats(enum ipa_ip_type ip, bool filtering); + +u32 ipa3_get_num_pipes(void); +struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(enum ipa_smmu_cb_type); +struct iommu_domain *ipa3_get_smmu_domain(void); +struct iommu_domain *ipa3_get_uc_smmu_domain(void); +struct iommu_domain *ipa3_get_wlan_smmu_domain(void); +struct iommu_domain *ipa3_get_smmu_domain_by_type + (enum ipa_smmu_cb_type cb_type); +int ipa3_iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); +int ipa3_ap_suspend(struct device *dev); +int ipa3_ap_resume(struct device *dev); +int ipa3_init_interrupts(void); +struct iommu_domain *ipa3_get_smmu_domain(void); +int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); +int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); +int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple); +int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple); +void ipa3_set_resorce_groups_min_max_limits(void); +void ipa3_suspend_apps_pipes(bool suspend); +int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, + enum ipa_ip_type ip_type, + bool hashable, + struct ipahal_flt_rule_entry entry[], + int *num_entry); +int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, + enum ipa_ip_type ip_type, + bool hashable, + struct ipahal_rt_rule_entry entry[], + int *num_entry); +int ipa3_restore_suspend_handler(void); +int ipa3_inject_dma_task_for_gsi(void); +int ipa3_uc_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr); +void ipa3_inc_acquire_wakelock(void); +void ipa3_dec_release_wakelock(void); +int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base, + enum gsi_ver); +int emulator_load_fws( + const struct firmware *firmware, + u32 transport_mem_base, + u32 transport_mem_size, + enum gsi_ver); +int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data); +const char *ipa_hw_error_str(enum ipa3_hw_errors err_type); +int ipa_gsi_ch20_wa(void); +int ipa3_rx_poll(u32 clnt_hdl, int budget); +void ipa3_recycle_wan_skb(struct sk_buff *skb); +int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map, + enum ipa_smmu_cb_type cb_type); +int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt, + enum ipa_smmu_cb_type cb_type); +void ipa3_reset_freeze_vote(void); +int ipa3_ntn_init(void); +int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats); +struct dentry *ipa_debugfs_get_root(void); +bool ipa3_is_msm_device(void); +struct device *ipa3_get_pdev(void); +void ipa3_enable_dcd(void); +void ipa3_disable_prefetch(enum ipa_client_type client); +int ipa3_alloc_common_event_ring(void); +int ipa3_allocate_dma_task_for_gsi(void); +void ipa3_free_dma_task_for_gsi(void); +int ipa3_set_clock_plan_from_pm(int idx); +void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys); +int ipa3_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs); +void ipa3_init_imm_cmd_desc(struct ipa3_desc *desc, + struct ipahal_imm_cmd_pyld *cmd_pyld); +int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res); +uint ipa3_get_emulation_type(void); +int ipa3_get_transport_info( + phys_addr_t *phys_addr_ptr, + unsigned long *size_ptr); +irq_handler_t ipa3_get_isr(void); +void ipa_pc_qmp_enable(void); +#endif /* _IPA3_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c new file mode 100644 index 000000000000..683207a5b8e7 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c @@ -0,0 +1,629 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include "ipa_i.h" + +#define INTERRUPT_WORKQUEUE_NAME "ipa_interrupt_wq" +#define DIS_SUSPEND_INTERRUPT_TIMEOUT 5 +#define IPA_IRQ_NUM_MAX 32 + +struct ipa3_interrupt_info { + ipa_irq_handler_t handler; + enum ipa_irq_type interrupt; + void *private_data; + bool deferred_flag; +}; + +struct ipa3_interrupt_work_wrap { + struct work_struct interrupt_work; + ipa_irq_handler_t handler; + enum ipa_irq_type interrupt; + void *private_data; + void *interrupt_data; +}; + +static struct ipa3_interrupt_info ipa_interrupt_to_cb[IPA_IRQ_NUM_MAX]; +static struct workqueue_struct *ipa_interrupt_wq; +static u32 ipa_ee; + +static void ipa3_tx_suspend_interrupt_wa(void); +static void ipa3_enable_tx_suspend_wa(struct work_struct *work); +static DECLARE_DELAYED_WORK(dwork_en_suspend_int, + ipa3_enable_tx_suspend_wa); +static spinlock_t suspend_wa_lock; +static void ipa3_process_interrupts(bool isr_context); + +static int ipa3_irq_mapping[IPA_IRQ_MAX] = { + [IPA_BAD_SNOC_ACCESS_IRQ] = 0, + [IPA_UC_IRQ_0] = 2, + [IPA_UC_IRQ_1] = 3, + [IPA_UC_IRQ_2] = 4, + [IPA_UC_IRQ_3] = 5, + [IPA_UC_IN_Q_NOT_EMPTY_IRQ] = 6, + [IPA_UC_RX_CMD_Q_NOT_FULL_IRQ] = 7, + [IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ] = 8, + [IPA_RX_ERR_IRQ] = 9, + [IPA_DEAGGR_ERR_IRQ] = 10, + [IPA_TX_ERR_IRQ] = 11, + [IPA_STEP_MODE_IRQ] = 12, + [IPA_PROC_ERR_IRQ] = 13, + [IPA_TX_SUSPEND_IRQ] = 14, + [IPA_TX_HOLB_DROP_IRQ] = 15, + [IPA_BAM_GSI_IDLE_IRQ] = 16, + [IPA_PIPE_YELLOW_MARKER_BELOW_IRQ] = 17, + [IPA_PIPE_RED_MARKER_BELOW_IRQ] = 18, + [IPA_PIPE_YELLOW_MARKER_ABOVE_IRQ] = 19, + [IPA_PIPE_RED_MARKER_ABOVE_IRQ] = 20, + [IPA_UCP_IRQ] = 21, + [IPA_DCMP_IRQ] = 22, + [IPA_GSI_EE_IRQ] = 23, + [IPA_GSI_IPA_IF_TLV_RCVD_IRQ] = 24, + [IPA_GSI_UC_IRQ] = 25, + [IPA_TLV_LEN_MIN_DSM_IRQ] = 26, +}; + +static void ipa3_interrupt_defer(struct work_struct *work); +static DECLARE_WORK(ipa3_interrupt_defer_work, ipa3_interrupt_defer); + +static void ipa3_deferred_interrupt_work(struct work_struct *work) +{ + struct ipa3_interrupt_work_wrap *work_data = + container_of(work, + struct ipa3_interrupt_work_wrap, + interrupt_work); + IPADBG("call handler from workq for interrupt %d...\n", + work_data->interrupt); + work_data->handler(work_data->interrupt, work_data->private_data, + work_data->interrupt_data); + kfree(work_data->interrupt_data); + kfree(work_data); +} + +static bool ipa3_is_valid_ep(u32 ep_suspend_data) +{ + u32 bmsk = 1; + u32 i = 0; + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if ((ep_suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) + return true; + bmsk = bmsk << 1; + } + return false; +} + +static int ipa3_handle_interrupt(int irq_num, bool isr_context) +{ + struct ipa3_interrupt_info interrupt_info; + struct ipa3_interrupt_work_wrap *work_data; + u32 suspend_data; + void *interrupt_data = NULL; + struct ipa_tx_suspend_irq_data *suspend_interrupt_data = NULL; + int res; + + interrupt_info = ipa_interrupt_to_cb[irq_num]; + if (interrupt_info.handler == NULL) { + IPAERR("A callback function wasn't set for interrupt num %d\n", + irq_num); + return -EINVAL; + } + + switch (interrupt_info.interrupt) { + case IPA_TX_SUSPEND_IRQ: + IPADBG_LOW("processing TX_SUSPEND interrupt\n"); + ipa3_tx_suspend_interrupt_wa(); + suspend_data = ipahal_read_reg_n(IPA_SUSPEND_IRQ_INFO_EE_n, + ipa_ee); + IPADBG_LOW("get interrupt %d\n", suspend_data); + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) { + /* Clearing L2 interrupts status */ + ipahal_write_reg_n(IPA_SUSPEND_IRQ_CLR_EE_n, + ipa_ee, suspend_data); + } + if (!ipa3_is_valid_ep(suspend_data)) + return 0; + + suspend_interrupt_data = + kzalloc(sizeof(*suspend_interrupt_data), GFP_ATOMIC); + if (!suspend_interrupt_data) { + IPAERR("failed allocating suspend_interrupt_data\n"); + return -ENOMEM; + } + suspend_interrupt_data->endpoints = suspend_data; + interrupt_data = suspend_interrupt_data; + break; + case IPA_UC_IRQ_0: + if (ipa3_ctx->apply_rg10_wa) { + /* + * Early detect of uC crash. If RG10 workaround is + * enable uC crash will not be detected as before + * processing uC event the interrupt is cleared using + * uC register write which times out as it crashed + * already. + */ + if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_ERROR) + ipa3_ctx->uc_ctx.uc_failed = true; + } + break; + default: + break; + } + + /* Force defer processing if in ISR context. */ + if (interrupt_info.deferred_flag || isr_context) { + IPADBG_LOW("Defer handling interrupt %d\n", + interrupt_info.interrupt); + work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap), + GFP_ATOMIC); + if (!work_data) { + IPAERR("failed allocating ipa3_interrupt_work_wrap\n"); + res = -ENOMEM; + goto fail_alloc_work; + } + INIT_WORK(&work_data->interrupt_work, + ipa3_deferred_interrupt_work); + work_data->handler = interrupt_info.handler; + work_data->interrupt = interrupt_info.interrupt; + work_data->private_data = interrupt_info.private_data; + work_data->interrupt_data = interrupt_data; + queue_work(ipa_interrupt_wq, &work_data->interrupt_work); + + } else { + IPADBG_LOW("Handle interrupt %d\n", interrupt_info.interrupt); + interrupt_info.handler(interrupt_info.interrupt, + interrupt_info.private_data, + interrupt_data); + kfree(interrupt_data); + } + + return 0; + +fail_alloc_work: + kfree(interrupt_data); + return res; +} + +static void ipa3_enable_tx_suspend_wa(struct work_struct *work) +{ + u32 en; + u32 suspend_bmask; + int irq_num; + + IPADBG_LOW("Enter\n"); + + irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ]; + + if (irq_num == -1) { + WARN_ON(1); + return; + } + + /* make sure ipa hw is clocked on*/ + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee); + suspend_bmask = 1 << irq_num; + /*enable TX_SUSPEND_IRQ*/ + en |= suspend_bmask; + IPADBG("enable TX_SUSPEND_IRQ, IPA_IRQ_EN_EE reg, write val = %u\n" + , en); + ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, en); + ipa3_process_interrupts(false); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + IPADBG_LOW("Exit\n"); +} + +static void ipa3_tx_suspend_interrupt_wa(void) +{ + u32 val; + u32 suspend_bmask; + int irq_num; + int wa_delay; + + IPADBG_LOW("Enter\n"); + irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ]; + + if (irq_num == -1) { + WARN_ON(1); + return; + } + + /*disable TX_SUSPEND_IRQ*/ + val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee); + suspend_bmask = 1 << irq_num; + val &= ~suspend_bmask; + IPADBG("Disabling TX_SUSPEND_IRQ, write val: %u to IPA_IRQ_EN_EE reg\n", + val); + ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val); + + IPADBG_LOW(" processing suspend interrupt work-around, delayed work\n"); + + wa_delay = DIS_SUSPEND_INTERRUPT_TIMEOUT; + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + wa_delay *= 400; + } + + IPADBG_LOW("Delay period %d msec\n", wa_delay); + + queue_delayed_work(ipa_interrupt_wq, &dwork_en_suspend_int, + msecs_to_jiffies(wa_delay)); + + IPADBG_LOW("Exit\n"); +} + +static inline bool is_uc_irq(int irq_num) +{ + if (ipa_interrupt_to_cb[irq_num].interrupt >= IPA_UC_IRQ_0 && + ipa_interrupt_to_cb[irq_num].interrupt <= IPA_UC_IRQ_3) + return true; + else + return false; +} + +static void ipa3_process_interrupts(bool isr_context) +{ + u32 reg; + u32 bmsk; + u32 i = 0; + u32 en; + unsigned long flags; + bool uc_irq; + + IPADBG_LOW("Enter isr_context=%d\n", isr_context); + + spin_lock_irqsave(&suspend_wa_lock, flags); + en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee); + reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee); + while (en & reg) { + IPADBG_LOW("en=0x%x reg=0x%x\n", en, reg); + bmsk = 1; + for (i = 0; i < IPA_IRQ_NUM_MAX; i++) { + IPADBG_LOW("Check irq number %d\n", i); + if (en & reg & bmsk) { + IPADBG_LOW("Irq number %d asserted\n", i); + uc_irq = is_uc_irq(i); + + /* + * Clear uC interrupt before processing to avoid + * clearing unhandled interrupts + */ + if (uc_irq) + ipa3_uc_rg10_write_reg(IPA_IRQ_CLR_EE_n, + ipa_ee, bmsk); + + /* + * handle the interrupt with spin_lock + * unlocked to avoid calling client in atomic + * context. mutual exclusion still preserved + * as the read/clr is done with spin_lock + * locked. + */ + spin_unlock_irqrestore(&suspend_wa_lock, flags); + ipa3_handle_interrupt(i, isr_context); + spin_lock_irqsave(&suspend_wa_lock, flags); + + /* + * Clear non uC interrupt after processing + * to avoid clearing interrupt data + */ + if (!uc_irq) + ipa3_uc_rg10_write_reg(IPA_IRQ_CLR_EE_n, + ipa_ee, bmsk); + } + bmsk = bmsk << 1; + } + /* + * In case uC failed interrupt cannot be cleared. + * Device will crash as part of handling uC event handler. + */ + if (ipa3_ctx->apply_rg10_wa && ipa3_ctx->uc_ctx.uc_failed) + break; + + reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee); + /* since the suspend interrupt HW bug we must + * read again the EN register, otherwise the while is endless + */ + en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee); + } + + spin_unlock_irqrestore(&suspend_wa_lock, flags); + IPADBG_LOW("Exit\n"); +} + +static void ipa3_interrupt_defer(struct work_struct *work) +{ + IPADBG("processing interrupts in wq\n"); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipa3_process_interrupts(false); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPADBG("Done\n"); +} + +static irqreturn_t ipa3_isr(int irq, void *ctxt) +{ + struct ipa_active_client_logging_info log_info; + + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); + IPADBG_LOW("Enter\n"); + /* defer interrupt handling in case IPA is not clocked on */ + if (ipa3_inc_client_enable_clks_no_block(&log_info)) { + IPADBG("defer interrupt processing\n"); + queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work); + return IRQ_HANDLED; + } + + ipa3_process_interrupts(true); + IPADBG_LOW("Exit\n"); + + ipa3_dec_client_disable_clks_no_block(&log_info); + return IRQ_HANDLED; +} + +irq_handler_t ipa3_get_isr(void) +{ + return ipa3_isr; +} + +/** + * ipa3_add_interrupt_handler() - Adds handler to an interrupt type + * @interrupt: Interrupt type + * @handler: The handler to be added + * @deferred_flag: whether the handler processing should be deferred in + * a workqueue + * @private_data: the client's private data + * + * Adds handler to an interrupt type and enable the specific bit + * in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled + */ +int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data) +{ + u32 val; + u32 bmsk; + int irq_num; + int client_idx, ep_idx; + + IPADBG("interrupt_enum(%d)\n", interrupt); + if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ || + interrupt >= IPA_IRQ_MAX) { + IPAERR("invalid interrupt number %d\n", interrupt); + return -EINVAL; + } + + irq_num = ipa3_irq_mapping[interrupt]; + if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) { + IPAERR("interrupt %d not supported\n", interrupt); + WARN_ON(1); + return -EFAULT; + } + IPADBG("ipa_interrupt_to_cb irq_num(%d)\n", irq_num); + + ipa_interrupt_to_cb[irq_num].deferred_flag = deferred_flag; + ipa_interrupt_to_cb[irq_num].handler = handler; + ipa_interrupt_to_cb[irq_num].private_data = private_data; + ipa_interrupt_to_cb[irq_num].interrupt = interrupt; + + val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee); + IPADBG("read IPA_IRQ_EN_EE_n register. reg = %d\n", val); + bmsk = 1 << irq_num; + val |= bmsk; + ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val); + IPADBG("wrote IPA_IRQ_EN_EE_n register. reg = %d\n", val); + + /* register SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt*/ + if ((interrupt == IPA_TX_SUSPEND_IRQ) && + (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) { + val = ~0; + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) + if (IPA_CLIENT_IS_Q6_CONS(client_idx) || + IPA_CLIENT_IS_Q6_PROD(client_idx)) { + ep_idx = ipa3_get_ep_mapping(client_idx); + IPADBG("modem ep_idx(%d) client_idx = %d\n", + ep_idx, client_idx); + if (ep_idx == -1) + IPADBG("Invalid IPA client\n"); + else + val &= ~(1 << ep_idx); + } + + ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, val); + IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", val); + } + return 0; +} + +/** + * ipa3_remove_interrupt_handler() - Removes handler to an interrupt type + * @interrupt: Interrupt type + * + * Removes the handler and disable the specific bit in IRQ_EN register + */ +int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt) +{ + u32 val; + u32 bmsk; + int irq_num; + + if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ || + interrupt >= IPA_IRQ_MAX) { + IPAERR("invalid interrupt number %d\n", interrupt); + return -EINVAL; + } + + irq_num = ipa3_irq_mapping[interrupt]; + if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) { + IPAERR("interrupt %d not supported\n", interrupt); + WARN_ON(1); + return -EFAULT; + } + + kfree(ipa_interrupt_to_cb[irq_num].private_data); + ipa_interrupt_to_cb[irq_num].deferred_flag = false; + ipa_interrupt_to_cb[irq_num].handler = NULL; + ipa_interrupt_to_cb[irq_num].private_data = NULL; + ipa_interrupt_to_cb[irq_num].interrupt = -1; + + /* clean SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt */ + if ((interrupt == IPA_TX_SUSPEND_IRQ) && + (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) { + ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, 0); + IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", 0); + } + + val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee); + bmsk = 1 << irq_num; + val &= ~bmsk; + ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val); + + return 0; +} + +/** + * ipa3_interrupts_init() - Initialize the IPA interrupts framework + * @ipa_irq: The interrupt number to allocate + * @ee: Execution environment + * @ipa_dev: The basic device structure representing the IPA driver + * + * - Initialize the ipa_interrupt_to_cb array + * - Clear interrupts status + * - Register the ipa interrupt handler - ipa3_isr + * - Enable apps processor wakeup by IPA interrupts + */ +int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev) +{ + int idx; + int res = 0; + + ipa_ee = ee; + for (idx = 0; idx < IPA_IRQ_NUM_MAX; idx++) { + ipa_interrupt_to_cb[idx].deferred_flag = false; + ipa_interrupt_to_cb[idx].handler = NULL; + ipa_interrupt_to_cb[idx].private_data = NULL; + ipa_interrupt_to_cb[idx].interrupt = -1; + } + + ipa_interrupt_wq = create_singlethread_workqueue( + INTERRUPT_WORKQUEUE_NAME); + if (!ipa_interrupt_wq) { + IPAERR("workqueue creation failed\n"); + return -ENOMEM; + } + + /* + * NOTE: + * + * We'll only register an isr on non-emulator (ie. real UE) + * systems. + * + * On the emulator, emulator_soft_irq_isr() will be calling + * ipa3_isr, so hence, no isr registration here, and instead, + * we'll pass the address of ipa3_isr to the gsi layer where + * emulator interrupts are handled... + */ + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) { + res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr, + IRQF_TRIGGER_RISING, "ipa", ipa_dev); + if (res) { + IPAERR( + "fail to register IPA IRQ handler irq=%d\n", + ipa_irq); + destroy_workqueue(ipa_interrupt_wq); + ipa_interrupt_wq = NULL; + return -ENODEV; + } + IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq); + + res = enable_irq_wake(ipa_irq); + if (res) + IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n", + ipa_irq, res); + else + IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq); + } + spin_lock_init(&suspend_wa_lock); + return 0; +} + +/** + * ipa3_interrupts_destroy() - Destroy the IPA interrupts framework + * @ipa_irq: The interrupt number to allocate + * @ee: Execution environment + * @ipa_dev: The basic device structure representing the IPA driver + * + * - Disable apps processor wakeup by IPA interrupts + * - Unregister the ipa interrupt handler - ipa3_isr + * - Destroy the interrupt workqueue + */ +void ipa3_interrupts_destroy(u32 ipa_irq, struct device *ipa_dev) +{ + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) { + disable_irq_wake(ipa_irq); + free_irq(ipa_irq, ipa_dev); + } + destroy_workqueue(ipa_interrupt_wq); + ipa_interrupt_wq = NULL; +} + +/** + * ipa3_suspend_active_aggr_wa() - Emulate suspend IRQ + * @clnt_hndl: suspended client handle, IRQ is emulated for this pipe + * + * Emulate suspend IRQ to unsuspend client which was suspended with an open + * aggregation frame in order to bypass HW bug of IRQ not generated when + * endpoint is suspended during an open aggregation. + */ +void ipa3_suspend_active_aggr_wa(u32 clnt_hdl) +{ + struct ipa3_interrupt_info interrupt_info; + struct ipa3_interrupt_work_wrap *work_data; + struct ipa_tx_suspend_irq_data *suspend_interrupt_data; + int irq_num; + int aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE); + + if (aggr_active_bitmap & (1 << clnt_hdl)) { + /* force close aggregation */ + ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl)); + + /* simulate suspend IRQ */ + irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ]; + interrupt_info = ipa_interrupt_to_cb[irq_num]; + if (interrupt_info.handler == NULL) { + IPAERR("no CB function for IPA_TX_SUSPEND_IRQ\n"); + return; + } + suspend_interrupt_data = kzalloc( + sizeof(*suspend_interrupt_data), + GFP_ATOMIC); + if (!suspend_interrupt_data) { + IPAERR("failed allocating suspend_interrupt_data\n"); + return; + } + suspend_interrupt_data->endpoints = 1 << clnt_hdl; + + work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap), + GFP_ATOMIC); + if (!work_data) { + IPAERR("failed allocating ipa3_interrupt_work_wrap\n"); + goto fail_alloc_work; + } + INIT_WORK(&work_data->interrupt_work, + ipa3_deferred_interrupt_work); + work_data->handler = interrupt_info.handler; + work_data->interrupt = IPA_TX_SUSPEND_IRQ; + work_data->private_data = interrupt_info.private_data; + work_data->interrupt_data = (void *)suspend_interrupt_data; + queue_work(ipa_interrupt_wq, &work_data->interrupt_work); + return; +fail_alloc_work: + kfree(suspend_interrupt_data); + } +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c new file mode 100644 index 000000000000..8c3a1c2d8e86 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c @@ -0,0 +1,807 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include "ipa_i.h" +#include + +struct ipa3_intf { + char name[IPA_RESOURCE_NAME_MAX]; + struct list_head link; + u32 num_tx_props; + u32 num_rx_props; + u32 num_ext_props; + struct ipa_ioc_tx_intf_prop *tx; + struct ipa_ioc_rx_intf_prop *rx; + struct ipa_ioc_ext_intf_prop *ext; + enum ipa_client_type excp_pipe; +}; + +struct ipa3_push_msg { + struct ipa_msg_meta meta; + ipa_msg_free_fn callback; + void *buff; + struct list_head link; +}; + +struct ipa3_pull_msg { + struct ipa_msg_meta meta; + ipa_msg_pull_fn callback; + struct list_head link; +}; + +/** + * ipa3_register_intf() - register "logical" interface + * @name: [in] interface name + * @tx: [in] TX properties of the interface + * @rx: [in] RX properties of the interface + * + * Register an interface and its tx and rx properties, this allows + * configuration of rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx) +{ + return ipa3_register_intf_ext(name, tx, rx, NULL); +} + +/** + * ipa3_register_intf_ext() - register "logical" interface which has only + * extended properties + * @name: [in] interface name + * @tx: [in] TX properties of the interface + * @rx: [in] RX properties of the interface + * @ext: [in] EXT properties of the interface + * + * Register an interface and its tx, rx and ext properties, this allows + * configuration of rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext) +{ + struct ipa3_intf *intf; + u32 len; + + if (name == NULL || (tx == NULL && rx == NULL && ext == NULL)) { + IPAERR_RL("invalid params name=%pK tx=%pK rx=%pK ext=%pK\n", + name, tx, rx, ext); + return -EINVAL; + } + + if (tx && tx->num_props > IPA_NUM_PROPS_MAX) { + IPAERR_RL("invalid tx num_props=%d max=%d\n", tx->num_props, + IPA_NUM_PROPS_MAX); + return -EINVAL; + } + + if (rx && rx->num_props > IPA_NUM_PROPS_MAX) { + IPAERR_RL("invalid rx num_props=%d max=%d\n", rx->num_props, + IPA_NUM_PROPS_MAX); + return -EINVAL; + } + + if (ext && ext->num_props > IPA_NUM_PROPS_MAX) { + IPAERR_RL("invalid ext num_props=%d max=%d\n", ext->num_props, + IPA_NUM_PROPS_MAX); + return -EINVAL; + } + + len = sizeof(struct ipa3_intf); + intf = kzalloc(len, GFP_KERNEL); + if (intf == NULL) + return -ENOMEM; + + strlcpy(intf->name, name, IPA_RESOURCE_NAME_MAX); + + if (tx) { + intf->num_tx_props = tx->num_props; + len = tx->num_props * sizeof(struct ipa_ioc_tx_intf_prop); + intf->tx = kmemdup(tx->prop, len, GFP_KERNEL); + if (intf->tx == NULL) { + kfree(intf); + return -ENOMEM; + } + } + + if (rx) { + intf->num_rx_props = rx->num_props; + len = rx->num_props * sizeof(struct ipa_ioc_rx_intf_prop); + intf->rx = kmemdup(rx->prop, len, GFP_KERNEL); + if (intf->rx == NULL) { + kfree(intf->tx); + kfree(intf); + return -ENOMEM; + } + memcpy(intf->rx, rx->prop, len); + } + + if (ext) { + intf->num_ext_props = ext->num_props; + len = ext->num_props * sizeof(struct ipa_ioc_ext_intf_prop); + intf->ext = kmemdup(ext->prop, len, GFP_KERNEL); + if (intf->ext == NULL) { + kfree(intf->rx); + kfree(intf->tx); + kfree(intf); + return -ENOMEM; + } + memcpy(intf->ext, ext->prop, len); + } + + if (ext && ext->excp_pipe_valid) + intf->excp_pipe = ext->excp_pipe; + else + intf->excp_pipe = IPA_CLIENT_APPS_LAN_CONS; + + mutex_lock(&ipa3_ctx->lock); + list_add_tail(&intf->link, &ipa3_ctx->intf_list); + mutex_unlock(&ipa3_ctx->lock); + + return 0; +} + +/** + * ipa3_deregister_intf() - de-register previously registered logical interface + * @name: [in] interface name + * + * De-register a previously registered interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_deregister_intf(const char *name) +{ + struct ipa3_intf *entry; + struct ipa3_intf *next; + int result = -EINVAL; + + if ((name == NULL) || + (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX)) { + IPAERR_RL("invalid param name=%s\n", name); + return result; + } + + mutex_lock(&ipa3_ctx->lock); + list_for_each_entry_safe(entry, next, &ipa3_ctx->intf_list, link) { + if (!strcmp(entry->name, name)) { + list_del(&entry->link); + kfree(entry->ext); + kfree(entry->rx); + kfree(entry->tx); + kfree(entry); + result = 0; + break; + } + } + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_query_intf() - query logical interface properties + * @lookup: [inout] interface name and number of properties + * + * Obtain the handle and number of tx and rx properties for the named + * interface, used as part of querying the tx and rx properties for + * configuration of various rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_query_intf(struct ipa_ioc_query_intf *lookup) +{ + struct ipa3_intf *entry; + int result = -EINVAL; + + if (lookup == NULL) { + IPAERR_RL("invalid param lookup=%pK\n", lookup); + return result; + } + + lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + if (strnlen(lookup->name, IPA_RESOURCE_NAME_MAX) == + IPA_RESOURCE_NAME_MAX) { + IPAERR_RL("Interface name too long. (%s)\n", lookup->name); + return result; + } + + mutex_lock(&ipa3_ctx->lock); + list_for_each_entry(entry, &ipa3_ctx->intf_list, link) { + if (!strcmp(entry->name, lookup->name)) { + lookup->num_tx_props = entry->num_tx_props; + lookup->num_rx_props = entry->num_rx_props; + lookup->num_ext_props = entry->num_ext_props; + lookup->excp_pipe = entry->excp_pipe; + result = 0; + break; + } + } + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_query_intf_tx_props() - qeury TX props of an interface + * @tx: [inout] interface tx attributes + * + * Obtain the tx properties for the specified interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx) +{ + struct ipa3_intf *entry; + int result = -EINVAL; + + if (tx == NULL) { + IPAERR_RL("null args: tx\n"); + return result; + } + + tx->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + if (strnlen(tx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) { + IPAERR_RL("Interface name too long. (%s)\n", tx->name); + return result; + } + + mutex_lock(&ipa3_ctx->lock); + list_for_each_entry(entry, &ipa3_ctx->intf_list, link) { + if (!strcmp(entry->name, tx->name)) { + /* add the entry check */ + if (entry->num_tx_props != tx->num_tx_props) { + IPAERR("invalid entry number(%u %u)\n", + entry->num_tx_props, + tx->num_tx_props); + mutex_unlock(&ipa3_ctx->lock); + return result; + } + memcpy(tx->tx, entry->tx, entry->num_tx_props * + sizeof(struct ipa_ioc_tx_intf_prop)); + result = 0; + break; + } + } + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_query_intf_rx_props() - qeury RX props of an interface + * @rx: [inout] interface rx attributes + * + * Obtain the rx properties for the specified interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx) +{ + struct ipa3_intf *entry; + int result = -EINVAL; + + if (rx == NULL) { + IPAERR_RL("null args: rx\n"); + return result; + } + + rx->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + if (strnlen(rx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) { + IPAERR_RL("Interface name too long. (%s)\n", rx->name); + return result; + } + + mutex_lock(&ipa3_ctx->lock); + list_for_each_entry(entry, &ipa3_ctx->intf_list, link) { + if (!strcmp(entry->name, rx->name)) { + /* add the entry check */ + if (entry->num_rx_props != rx->num_rx_props) { + IPAERR("invalid entry number(%u %u)\n", + entry->num_rx_props, + rx->num_rx_props); + mutex_unlock(&ipa3_ctx->lock); + return result; + } + memcpy(rx->rx, entry->rx, entry->num_rx_props * + sizeof(struct ipa_ioc_rx_intf_prop)); + result = 0; + break; + } + } + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_query_intf_ext_props() - qeury EXT props of an interface + * @ext: [inout] interface ext attributes + * + * Obtain the ext properties for the specified interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext) +{ + struct ipa3_intf *entry; + int result = -EINVAL; + + if (ext == NULL) { + IPAERR_RL("invalid param ext=%pK\n", ext); + return result; + } + + mutex_lock(&ipa3_ctx->lock); + list_for_each_entry(entry, &ipa3_ctx->intf_list, link) { + if (!strcmp(entry->name, ext->name)) { + /* add the entry check */ + if (entry->num_ext_props != ext->num_ext_props) { + IPAERR("invalid entry number(%u %u)\n", + entry->num_ext_props, + ext->num_ext_props); + mutex_unlock(&ipa3_ctx->lock); + return result; + } + memcpy(ext->ext, entry->ext, entry->num_ext_props * + sizeof(struct ipa_ioc_ext_intf_prop)); + result = 0; + break; + } + } + mutex_unlock(&ipa3_ctx->lock); + return result; +} + +static void ipa3_send_msg_free(void *buff, u32 len, u32 type) +{ + kfree(buff); +} + +static int wlan_msg_process(struct ipa_msg_meta *meta, void *buff) +{ + struct ipa3_push_msg *msg_dup; + struct ipa_wlan_msg_ex *event_ex_cur_con = NULL; + struct ipa_wlan_msg_ex *event_ex_list = NULL; + struct ipa_wlan_msg *event_ex_cur_discon = NULL; + void *data_dup = NULL; + struct ipa3_push_msg *entry; + struct ipa3_push_msg *next; + int cnt = 0, total = 0, max = 0; + uint8_t mac[IPA_MAC_ADDR_SIZE]; + uint8_t mac2[IPA_MAC_ADDR_SIZE]; + + if (meta->msg_type == WLAN_CLIENT_CONNECT_EX) { + /* debug print */ + event_ex_cur_con = buff; + for (cnt = 0; cnt < event_ex_cur_con->num_of_attribs; cnt++) { + if (event_ex_cur_con->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + IPADBG("%02x:%02x:%02x:%02x:%02x:%02x,(%d)\n", + event_ex_cur_con->attribs[cnt].u.mac_addr[0], + event_ex_cur_con->attribs[cnt].u.mac_addr[1], + event_ex_cur_con->attribs[cnt].u.mac_addr[2], + event_ex_cur_con->attribs[cnt].u.mac_addr[3], + event_ex_cur_con->attribs[cnt].u.mac_addr[4], + event_ex_cur_con->attribs[cnt].u.mac_addr[5], + meta->msg_type); + } + } + + mutex_lock(&ipa3_ctx->msg_wlan_client_lock); + msg_dup = kzalloc(sizeof(*msg_dup), GFP_KERNEL); + if (msg_dup == NULL) { + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + msg_dup->meta = *meta; + if (meta->msg_len > 0 && buff) { + data_dup = kmemdup(buff, meta->msg_len, GFP_KERNEL); + if (data_dup == NULL) { + kfree(msg_dup); + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + memcpy(data_dup, buff, meta->msg_len); + msg_dup->buff = data_dup; + msg_dup->callback = ipa3_send_msg_free; + } else { + IPAERR("msg_len %d\n", meta->msg_len); + kfree(msg_dup); + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + list_add_tail(&msg_dup->link, &ipa3_ctx->msg_wlan_client_list); + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + } + + /* remove the cache */ + if (meta->msg_type == WLAN_CLIENT_DISCONNECT) { + /* debug print */ + event_ex_cur_discon = buff; + IPADBG("Mac %pM, msg %d\n", + event_ex_cur_discon->mac_addr, + meta->msg_type); + memcpy(mac2, + event_ex_cur_discon->mac_addr, + sizeof(mac2)); + + mutex_lock(&ipa3_ctx->msg_wlan_client_lock); + list_for_each_entry_safe(entry, next, + &ipa3_ctx->msg_wlan_client_list, + link) { + event_ex_list = entry->buff; + max = event_ex_list->num_of_attribs; + for (cnt = 0; cnt < max; cnt++) { + memcpy(mac, + event_ex_list->attribs[cnt].u.mac_addr, + sizeof(mac)); + if (event_ex_list->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + pr_debug("%pM\n", mac); + + /* compare to delete one*/ + if (memcmp(mac2, mac, + sizeof(mac)) == 0) { + IPADBG("clean %d\n", total); + list_del(&entry->link); + kfree(entry); + break; + } + } + } + total++; + } + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + } + return 0; +} + +/** + * ipa3_send_msg() - Send "message" from kernel client to IPA driver + * @meta: [in] message meta-data + * @buff: [in] the payload for message + * @callback: [in] free callback + * + * Client supplies the message meta-data and payload which IPA driver buffers + * till read by user-space. After read from user space IPA driver invokes the + * callback supplied to free the message payload. Client must not touch/free + * the message payload after calling this API. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback) +{ + struct ipa3_push_msg *msg; + void *data = NULL; + + if (meta == NULL || (buff == NULL && callback != NULL) || + (buff != NULL && callback == NULL)) { + IPAERR_RL("invalid param meta=%pK buff=%pK, callback=%pK\n", + meta, buff, callback); + return -EINVAL; + } + + if (meta->msg_type >= IPA_EVENT_MAX_NUM) { + IPAERR_RL("unsupported message type %d\n", meta->msg_type); + return -EINVAL; + } + + msg = kzalloc(sizeof(struct ipa3_push_msg), GFP_KERNEL); + if (msg == NULL) + return -ENOMEM; + + msg->meta = *meta; + if (meta->msg_len > 0 && buff) { + data = kmemdup(buff, meta->msg_len, GFP_KERNEL); + if (data == NULL) { + kfree(msg); + return -ENOMEM; + } + msg->buff = data; + msg->callback = ipa3_send_msg_free; + } + + mutex_lock(&ipa3_ctx->msg_lock); + list_add_tail(&msg->link, &ipa3_ctx->msg_list); + /* support for softap client event cache */ + if (wlan_msg_process(meta, buff)) + IPAERR_RL("wlan_msg_process failed\n"); + + /* unlock only after process */ + mutex_unlock(&ipa3_ctx->msg_lock); + IPA_STATS_INC_CNT(ipa3_ctx->stats.msg_w[meta->msg_type]); + + wake_up(&ipa3_ctx->msg_waitq); + if (buff) + callback(buff, meta->msg_len, meta->msg_type); + + return 0; +} + +/** + * ipa3_resend_wlan_msg() - Resend cached "message" to IPACM + * + * resend wlan client connect events to user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_resend_wlan_msg(void) +{ + struct ipa_wlan_msg_ex *event_ex_list = NULL; + struct ipa3_push_msg *entry; + struct ipa3_push_msg *next; + int cnt = 0, total = 0; + struct ipa3_push_msg *msg; + void *data = NULL; + + IPADBG("\n"); + + mutex_lock(&ipa3_ctx->msg_wlan_client_lock); + list_for_each_entry_safe(entry, next, &ipa3_ctx->msg_wlan_client_list, + link) { + + event_ex_list = entry->buff; + for (cnt = 0; cnt < event_ex_list->num_of_attribs; cnt++) { + if (event_ex_list->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + IPADBG("%d-Mac %pM\n", total, + event_ex_list->attribs[cnt].u.mac_addr); + } + } + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (msg == NULL) { + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + msg->meta = entry->meta; + data = kmemdup(entry->buff, entry->meta.msg_len, GFP_KERNEL); + if (data == NULL) { + kfree(msg); + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + msg->buff = data; + msg->callback = ipa3_send_msg_free; + mutex_lock(&ipa3_ctx->msg_lock); + list_add_tail(&msg->link, &ipa3_ctx->msg_list); + mutex_unlock(&ipa3_ctx->msg_lock); + wake_up(&ipa3_ctx->msg_waitq); + + total++; + } + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return 0; +} + +/** + * ipa3_register_pull_msg() - register pull message type + * @meta: [in] message meta-data + * @callback: [in] pull callback + * + * Register message callback by kernel client with IPA driver for IPA driver to + * pull message on-demand. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback) +{ + struct ipa3_pull_msg *msg; + + if (meta == NULL || callback == NULL) { + IPAERR_RL("invalid param meta=%pK callback=%pK\n", + meta, callback); + return -EINVAL; + } + + msg = kzalloc(sizeof(struct ipa3_pull_msg), GFP_KERNEL); + if (msg == NULL) + return -ENOMEM; + + msg->meta = *meta; + msg->callback = callback; + + mutex_lock(&ipa3_ctx->msg_lock); + list_add_tail(&msg->link, &ipa3_ctx->pull_msg_list); + mutex_unlock(&ipa3_ctx->msg_lock); + + return 0; +} + +/** + * ipa3_deregister_pull_msg() - De-register pull message type + * @meta: [in] message meta-data + * + * De-register "message" by kernel client from IPA driver + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta) +{ + struct ipa3_pull_msg *entry; + struct ipa3_pull_msg *next; + int result = -EINVAL; + + if (meta == NULL) { + IPAERR_RL("null arg: meta\n"); + return result; + } + + mutex_lock(&ipa3_ctx->msg_lock); + list_for_each_entry_safe(entry, next, &ipa3_ctx->pull_msg_list, link) { + if (entry->meta.msg_len == meta->msg_len && + entry->meta.msg_type == meta->msg_type) { + list_del(&entry->link); + kfree(entry); + result = 0; + break; + } + } + mutex_unlock(&ipa3_ctx->msg_lock); + return result; +} + +/** + * ipa3_read() - read message from IPA device + * @filp: [in] file pointer + * @buf: [out] buffer to read into + * @count: [in] size of above buffer + * @f_pos: [inout] file position + * + * Uer-space should continually read from /dev/ipa, read wll block when there + * are no messages to read. Upon return, user-space should read the ipa_msg_meta + * from the start of the buffer to know what type of message was read and its + * length in the remainder of the buffer. Buffer supplied must be big enough to + * hold the message meta-data and the largest defined message type + * + * Returns: how many bytes copied to buffer + * + * Note: Should not be called from atomic context + */ +ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos) +{ + char __user *start; + struct ipa3_push_msg *msg = NULL; + int ret; + DEFINE_WAIT_FUNC(wait, woken_wake_function); + int locked; + + start = buf; + + add_wait_queue(&ipa3_ctx->msg_waitq, &wait); + while (1) { + mutex_lock(&ipa3_ctx->msg_lock); + locked = 1; + + if (!list_empty(&ipa3_ctx->msg_list)) { + msg = list_first_entry(&ipa3_ctx->msg_list, + struct ipa3_push_msg, link); + list_del(&msg->link); + } + + IPADBG_LOW("msg=%pK\n", msg); + + if (msg) { + locked = 0; + mutex_unlock(&ipa3_ctx->msg_lock); + if (copy_to_user(buf, &msg->meta, + sizeof(struct ipa_msg_meta))) { + ret = -EFAULT; + kfree(msg); + msg = NULL; + break; + } + buf += sizeof(struct ipa_msg_meta); + count -= sizeof(struct ipa_msg_meta); + if (msg->buff) { + if (copy_to_user(buf, msg->buff, + msg->meta.msg_len)) { + ret = -EFAULT; + kfree(msg); + msg = NULL; + break; + } + buf += msg->meta.msg_len; + count -= msg->meta.msg_len; + msg->callback(msg->buff, msg->meta.msg_len, + msg->meta.msg_type); + } + IPA_STATS_INC_CNT( + ipa3_ctx->stats.msg_r[msg->meta.msg_type]); + kfree(msg); + } + + ret = -EAGAIN; + if (filp->f_flags & O_NONBLOCK) + break; + + ret = -EINTR; + if (signal_pending(current)) + break; + + if (start != buf) + break; + + locked = 0; + mutex_unlock(&ipa3_ctx->msg_lock); + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } + + remove_wait_queue(&ipa3_ctx->msg_waitq, &wait); + if (start != buf && ret != -EFAULT) + ret = buf - start; + + if (locked) + mutex_unlock(&ipa3_ctx->msg_lock); + + return ret; +} + +/** + * ipa3_pull_msg() - pull the specified message from client + * @meta: [in] message meta-data + * @buf: [out] buffer to read into + * @count: [in] size of above buffer + * + * Populate the supplied buffer with the pull message which is fetched + * from client, the message must have previously been registered with + * the IPA driver + * + * Returns: how many bytes copied to buffer + * + * Note: Should not be called from atomic context + */ +int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count) +{ + struct ipa3_pull_msg *entry; + int result = -EINVAL; + + if (meta == NULL || buff == NULL || !count) { + IPAERR_RL("invalid param name=%pK buff=%pK count=%zu\n", + meta, buff, count); + return result; + } + + mutex_lock(&ipa3_ctx->msg_lock); + list_for_each_entry(entry, &ipa3_ctx->pull_msg_list, link) { + if (entry->meta.msg_len == meta->msg_len && + entry->meta.msg_type == meta->msg_type) { + result = entry->callback(buff, count, meta->msg_type); + break; + } + } + mutex_unlock(&ipa3_ctx->msg_lock); + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c new file mode 100644 index 000000000000..31a325505690 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c @@ -0,0 +1,748 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ipa_common_i.h" +#include "ipa_i.h" +#include "ipa_qmi_service.h" + +#define IPA_MHI_DRV_NAME "ipa_mhi" + + +#define IPA_MHI_DBG(fmt, args...) \ + do { \ + pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_MHI_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + + +#define IPA_MHI_ERR(fmt, args...) \ + do { \ + pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + + +#define IPA_MHI_FUNC_ENTRY() \ + IPA_MHI_DBG("ENTRY\n") +#define IPA_MHI_FUNC_EXIT() \ + IPA_MHI_DBG("EXIT\n") + +#define IPA_MHI_MAX_UL_CHANNELS 1 +#define IPA_MHI_MAX_DL_CHANNELS 1 + +/* bit #40 in address should be asserted for MHI transfers over pcie */ +#define IPA_MHI_HOST_ADDR_COND(addr) \ + ((params->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr)) + +enum ipa3_mhi_polling_mode { + IPA_MHI_POLLING_MODE_DB_MODE, + IPA_MHI_POLLING_MODE_POLL_MODE, +}; + +bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client) +{ + int res; + int ipa_ep_idx; + struct ipa3_ep_context *ep; + + IPA_MHI_FUNC_ENTRY(); + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPA_MHI_ERR("Invalid client.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + IPA_MHI_DBG_LOW("Stopping GSI channel %ld\n", ep->gsi_chan_hdl); + res = gsi_stop_channel(ep->gsi_chan_hdl); + if (res != 0 && + res != -GSI_STATUS_AGAIN && + res != -GSI_STATUS_TIMED_OUT) { + IPA_MHI_ERR("GSI stop channel failed %d\n", + res); + WARN_ON(1); + return false; + } + + if (res == 0) { + IPA_MHI_DBG_LOW("GSI channel %ld STOP\n", + ep->gsi_chan_hdl); + return true; + } + + return false; +} + +static int ipa3_mhi_reset_gsi_channel(enum ipa_client_type client) +{ + int res; + int clnt_hdl; + + IPA_MHI_FUNC_ENTRY(); + + clnt_hdl = ipa3_get_ep_mapping(client); + if (clnt_hdl < 0) + return -EFAULT; + + res = ipa3_reset_gsi_channel(clnt_hdl); + if (res) { + IPA_MHI_ERR("ipa3_reset_gsi_channel failed %d\n", res); + return -EFAULT; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +int ipa3_mhi_reset_channel_internal(enum ipa_client_type client) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + + res = ipa3_mhi_reset_gsi_channel(client); + if (res) { + IPAERR("ipa3_mhi_reset_gsi_channel failed\n"); + ipa_assert(); + return res; + } + + res = ipa3_disable_data_path(ipa3_get_ep_mapping(client)); + if (res) { + IPA_MHI_ERR("ipa3_disable_data_path failed %d\n", res); + return res; + } + IPA_MHI_FUNC_EXIT(); + + return 0; +} + +int ipa3_mhi_start_channel_internal(enum ipa_client_type client) +{ + int res; + int ipa_ep_idx; + + IPA_MHI_FUNC_ENTRY(); + + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx < 0) { + IPA_MHI_ERR("Invalid client %d\n", client); + return -EINVAL; + } + res = ipa3_enable_data_path(ipa_ep_idx); + if (res) { + IPA_MHI_ERR("ipa3_enable_data_path failed %d\n", res); + return res; + } + IPA_MHI_FUNC_EXIT(); + + return 0; +} + +static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client, + struct ipa_mhi_ch_ctx *ch_ctx_host, int ring_size) +{ + switch (ch_ctx_host->pollcfg) { + case 0: + /*set default polling configuration according to MHI spec*/ + if (IPA_CLIENT_IS_PROD(client)) + return 7; + else + return (ring_size/2)/8; + break; + default: + return ch_ctx_host->pollcfg; + } +} + +static int ipa_mhi_start_gsi_channel(enum ipa_client_type client, + int ipa_ep_idx, struct start_gsi_channel *params) +{ + int res = 0; + struct gsi_evt_ring_props ev_props; + struct ipa_mhi_msi_info *msi; + struct gsi_chan_props ch_props; + union __packed gsi_channel_scratch ch_scratch; + struct ipa3_ep_context *ep; + const struct ipa_gsi_ep_config *ep_cfg; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + bool burst_mode_enabled = false; + + IPA_MHI_FUNC_ENTRY(); + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + msi = params->msi; + ep_cfg = ipa3_get_gsi_ep_info(client); + if (!ep_cfg) { + IPA_MHI_ERR("Wrong parameter, ep_cfg is NULL\n"); + return -EPERM; + } + + /* allocate event ring only for the first time pipe is connected */ + if (params->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) { + memset(&ev_props, 0, sizeof(ev_props)); + ev_props.intf = GSI_EVT_CHTYPE_MHI_EV; + ev_props.intr = GSI_INTR_MSI; + ev_props.re_size = GSI_EVT_RING_RE_SIZE_16B; + ev_props.ring_len = params->ev_ctx_host->rlen; + ev_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND( + params->ev_ctx_host->rbase); + ev_props.int_modt = params->ev_ctx_host->intmodt * + IPA_SLEEP_CLK_RATE_KHZ; + ev_props.int_modc = params->ev_ctx_host->intmodc; + ev_props.intvec = ((msi->data & ~msi->mask) | + (params->ev_ctx_host->msivec & msi->mask)); + ev_props.msi_addr = IPA_MHI_HOST_ADDR_COND( + (((u64)msi->addr_hi << 32) | msi->addr_low)); + ev_props.rp_update_addr = IPA_MHI_HOST_ADDR_COND( + params->event_context_addr + + offsetof(struct ipa_mhi_ev_ctx, rp)); + ev_props.exclusive = true; + ev_props.err_cb = params->ev_err_cb; + ev_props.user_data = params->channel; + ev_props.evchid_valid = true; + ev_props.evchid = params->evchid; + IPA_MHI_DBG("allocating event ring ep:%u evchid:%u\n", + ipa_ep_idx, ev_props.evchid); + res = gsi_alloc_evt_ring(&ev_props, ipa3_ctx->gsi_dev_hdl, + &ep->gsi_evt_ring_hdl); + if (res) { + IPA_MHI_ERR("gsi_alloc_evt_ring failed %d\n", res); + goto fail_alloc_evt; + } + IPA_MHI_DBG("client %d, caching event ring hdl %lu\n", + client, + ep->gsi_evt_ring_hdl); + *params->cached_gsi_evt_ring_hdl = + ep->gsi_evt_ring_hdl; + + } else { + IPA_MHI_DBG("event ring already exists: evt_ring_hdl=%lu\n", + *params->cached_gsi_evt_ring_hdl); + ep->gsi_evt_ring_hdl = *params->cached_gsi_evt_ring_hdl; + } + + if (params->ev_ctx_host->wp == params->ev_ctx_host->rbase) { + IPA_MHI_ERR("event ring wp is not updated. base=wp=0x%llx\n", + params->ev_ctx_host->wp); + goto fail_alloc_ch; + } + + IPA_MHI_DBG("Ring event db: evt_ring_hdl=%lu host_wp=0x%llx\n", + ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp); + res = gsi_ring_evt_ring_db(ep->gsi_evt_ring_hdl, + params->ev_ctx_host->wp); + if (res) { + IPA_MHI_ERR("fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n", + res, ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp); + goto fail_alloc_ch; + } + + memset(&ch_props, 0, sizeof(ch_props)); + ch_props.prot = GSI_CHAN_PROT_MHI; + ch_props.dir = IPA_CLIENT_IS_PROD(client) ? + GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI; + ch_props.ch_id = ep_cfg->ipa_gsi_chan_num; + ch_props.evt_ring_hdl = *params->cached_gsi_evt_ring_hdl; + ch_props.re_size = GSI_CHAN_RE_SIZE_16B; + ch_props.ring_len = params->ch_ctx_host->rlen; + ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND( + params->ch_ctx_host->rbase); + + if (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT || + params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE) { + burst_mode_enabled = true; + } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 && + !burst_mode_enabled) + ch_props.use_db_eng = GSI_CHAN_DIRECT_MODE; + else + ch_props.use_db_eng = GSI_CHAN_DB_MODE; + + ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG; + ch_props.low_weight = 1; + ch_props.prefetch_mode = ep_cfg->prefetch_mode; + ch_props.empty_lvl_threshold = ep_cfg->prefetch_threshold; + ch_props.err_cb = params->ch_err_cb; + ch_props.chan_user_data = params->channel; + res = gsi_alloc_channel(&ch_props, ipa3_ctx->gsi_dev_hdl, + &ep->gsi_chan_hdl); + if (res) { + IPA_MHI_ERR("gsi_alloc_channel failed %d\n", + res); + goto fail_alloc_ch; + } + + memset(&ch_scratch, 0, sizeof(ch_scratch)); + ch_scratch.mhi.mhi_host_wp_addr = IPA_MHI_HOST_ADDR_COND( + params->channel_context_addr + + offsetof(struct ipa_mhi_ch_ctx, wp)); + ch_scratch.mhi.assert_bit40 = params->assert_bit40; + + /* + * Update scratch for MCS smart prefetch: + * Starting IPA4.5, smart prefetch implemented by H/W. + * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch + * so keep the fields zero. + */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + ch_scratch.mhi.max_outstanding_tre = + ep_cfg->ipa_if_tlv * ch_props.re_size; + ch_scratch.mhi.outstanding_threshold = + min(ep_cfg->ipa_if_tlv / 2, 8) * ch_props.re_size; + } + ch_scratch.mhi.oob_mod_threshold = 4; + + if (burst_mode_enabled) { + ch_scratch.mhi.burst_mode_enabled = burst_mode_enabled; + ch_scratch.mhi.polling_configuration = + ipa3_mhi_get_ch_poll_cfg(client, params->ch_ctx_host, + (ch_props.ring_len / ch_props.re_size)); + ch_scratch.mhi.polling_mode = IPA_MHI_POLLING_MODE_DB_MODE; + } else { + ch_scratch.mhi.burst_mode_enabled = false; + } + res = gsi_write_channel_scratch(ep->gsi_chan_hdl, + ch_scratch); + if (res) { + IPA_MHI_ERR("gsi_write_channel_scratch failed %d\n", + res); + goto fail_ch_scratch; + } + + *params->mhi = ch_scratch.mhi; + + if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = true; + ep->ep_delay_set = true; + res = ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl); + if (res) + IPA_MHI_ERR("client (ep: %d) failed result=%d\n", + ipa_ep_idx, res); + else + IPA_MHI_DBG("client (ep: %d) success\n", ipa_ep_idx); + } else { + ep->ep_delay_set = false; + } + + IPA_MHI_DBG("Starting channel\n"); + res = gsi_start_channel(ep->gsi_chan_hdl); + if (res) { + IPA_MHI_ERR("gsi_start_channel failed %d\n", res); + goto fail_ch_start; + } + + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_ch_start: +fail_ch_scratch: + gsi_dealloc_channel(ep->gsi_chan_hdl); +fail_alloc_ch: + gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); + ep->gsi_evt_ring_hdl = ~0; +fail_alloc_evt: + return res; +} + +int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params) +{ + int res; + struct gsi_device_scratch gsi_scratch; + const struct ipa_gsi_ep_config *gsi_ep_info; + + IPA_MHI_FUNC_ENTRY(); + + if (!params) { + IPA_MHI_ERR("null args\n"); + return -EINVAL; + } + + if ((IPA_MHI_MAX_UL_CHANNELS + IPA_MHI_MAX_DL_CHANNELS) > + ((ipa3_ctx->mhi_evid_limits[1] - + ipa3_ctx->mhi_evid_limits[0]) + 1)) { + IPAERR("Not enough event rings for MHI\n"); + ipa_assert(); + return -EINVAL; + } + + /* Initialize IPA MHI engine */ + gsi_ep_info = ipa3_get_gsi_ep_info(IPA_CLIENT_MHI_PROD); + if (!gsi_ep_info) { + IPAERR("MHI PROD has no ep allocated\n"); + ipa_assert(); + } + memset(&gsi_scratch, 0, sizeof(gsi_scratch)); + gsi_scratch.mhi_base_chan_idx_valid = true; + gsi_scratch.mhi_base_chan_idx = gsi_ep_info->ipa_gsi_chan_num + + params->gsi.first_ch_idx; + res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl, + &gsi_scratch); + if (res) { + IPA_MHI_ERR("failed to write device scratch %d\n", res); + goto fail_init_engine; + } + + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_init_engine: + return res; +} + +/** + * ipa3_connect_mhi_pipe() - Connect pipe to IPA and start corresponding + * MHI channel + * @in: connect parameters + * @clnt_hdl: [out] client handle for this pipe + * + * This function is called by IPA MHI client driver on MHI channel start. + * This function is called after MHI engine was started. + * + * Return codes: 0 : success + * negative : error + */ +int ipa3_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in, + u32 *clnt_hdl) +{ + struct ipa3_ep_context *ep; + int ipa_ep_idx; + int res; + enum ipa_client_type client; + + IPA_MHI_FUNC_ENTRY(); + + if (!in || !clnt_hdl) { + IPA_MHI_ERR("NULL args\n"); + return -EINVAL; + } + + in->start.gsi.evchid += ipa3_ctx->mhi_evid_limits[0]; + + client = in->sys->client; + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPA_MHI_ERR("Invalid client.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (ep->valid == 1) { + IPA_MHI_ERR("EP already allocated.\n"); + return -EPERM; + } + + memset(ep, 0, offsetof(struct ipa3_ep_context, sys)); + ep->valid = 1; + ep->skip_ep_cfg = in->sys->skip_ep_cfg; + ep->client = client; + ep->client_notify = in->sys->notify; + ep->priv = in->sys->priv; + ep->keep_ipa_awake = in->sys->keep_ipa_awake; + + res = ipa_mhi_start_gsi_channel(client, + ipa_ep_idx, &in->start.gsi); + if (res) { + IPA_MHI_ERR("ipa_mhi_start_gsi_channel failed %d\n", + res); + goto fail_start_channel; + } + + res = ipa3_enable_data_path(ipa_ep_idx); + if (res) { + IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res, + ipa_ep_idx); + goto fail_ep_cfg; + } + + if (!ep->skip_ep_cfg) { + if (ipa3_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto fail_ep_cfg; + } + if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) { + IPAERR("fail to configure status of EP.\n"); + goto fail_ep_cfg; + } + IPA_MHI_DBG("ep configuration successful\n"); + } else { + IPA_MHI_DBG("skipping ep configuration\n"); + } + + *clnt_hdl = ipa_ep_idx; + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(client)) + ipa3_install_dflt_flt_rules(ipa_ep_idx); + + ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + IPA_MHI_DBG("client %d (ep: %d) connected\n", client, + ipa_ep_idx); + + IPA_MHI_FUNC_EXIT(); + + return 0; + +fail_ep_cfg: + ipa3_disable_data_path(ipa_ep_idx); +fail_start_channel: + memset(ep, 0, offsetof(struct ipa3_ep_context, sys)); + return -EPERM; +} + +/** + * ipa3_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding + * MHI channel + * @clnt_hdl: client handle for this pipe + * + * This function is called by IPA MHI client driver on MHI channel reset. + * This function is called after MHI channel was started. + * This function is doing the following: + * - Send command to uC/GSI to reset corresponding MHI channel + * - Configure IPA EP control + * + * Return codes: 0 : success + * negative : error + */ +int ipa3_disconnect_mhi_pipe(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + int res; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + + IPA_MHI_FUNC_ENTRY(); + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes) { + IPAERR("invalid handle %d\n", clnt_hdl); + return -EINVAL; + } + + if (ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("pipe was not connected %d\n", clnt_hdl); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + if (ep->ep_delay_set) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = false; + res = ipa3_cfg_ep_ctrl(clnt_hdl, + &ep_cfg_ctrl); + if (res) { + IPAERR + ("client(ep:%d) failed to remove delay res=%d\n", + clnt_hdl, res); + } else { + IPADBG("client (ep: %d) delay removed\n", + clnt_hdl); + ep->ep_delay_set = false; + } + } + + res = gsi_dealloc_channel(ep->gsi_chan_hdl); + if (res) { + IPAERR("gsi_dealloc_channel failed %d\n", res); + goto fail_reset_channel; + } + + ep->valid = 0; + ipa3_delete_dflt_flt_rules(clnt_hdl); + + IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl); + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_reset_channel: + return res; +} + +int ipa3_mhi_resume_channels_internal(enum ipa_client_type client, + bool LPTransitionRejected, bool brstmode_enabled, + union __packed gsi_channel_scratch ch_scratch, u8 index) +{ + int res; + int ipa_ep_idx; + struct ipa3_ep_context *ep; + union __packed gsi_channel_scratch gsi_ch_scratch; + + IPA_MHI_FUNC_ENTRY(); + + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx < 0) { + IPA_MHI_ERR("Invalid client %d\n", client); + return -EINVAL; + } + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (brstmode_enabled && !LPTransitionRejected) { + + res = gsi_read_channel_scratch(ep->gsi_chan_hdl, + &gsi_ch_scratch); + if (res) { + IPA_MHI_ERR("read ch scratch fail %d %d\n", res); + return res; + } + + /* + * set polling mode bit to DB mode before + * resuming the channel + * + * For MHI-->IPA pipes: + * when resuming due to transition to M0, + * set the polling mode bit to 0. + * In other cases, restore it's value form + * when you stopped the channel. + * Here, after successful resume client move to M0 state. + * So, by default setting polling mode bit to 0. + * + * For IPA-->MHI pipe: + * always restore the polling mode bit. + */ + if (IPA_CLIENT_IS_PROD(client)) + ch_scratch.mhi.polling_mode = + IPA_MHI_POLLING_MODE_DB_MODE; + else + ch_scratch.mhi.polling_mode = + gsi_ch_scratch.mhi.polling_mode; + + /* Use GSI update API to not affect non-SWI fields + * inside the scratch while in suspend-resume operation + */ + res = gsi_update_mhi_channel_scratch( + ep->gsi_chan_hdl, ch_scratch.mhi); + if (res) { + IPA_MHI_ERR("write ch scratch fail %d\n" + , res); + return res; + } + } + + res = gsi_start_channel(ep->gsi_chan_hdl); + if (res) { + IPA_MHI_ERR("failed to resume channel error %d\n", res); + return res; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +int ipa3_mhi_query_ch_info(enum ipa_client_type client, + struct gsi_chan_info *ch_info) +{ + int ipa_ep_idx; + int res; + struct ipa3_ep_context *ep; + + IPA_MHI_FUNC_ENTRY(); + + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx < 0) { + IPA_MHI_ERR("Invalid client %d\n", client); + return -EINVAL; + } + ep = &ipa3_ctx->ep[ipa_ep_idx]; + res = gsi_query_channel_info(ep->gsi_chan_hdl, ch_info); + if (res) { + IPA_MHI_ERR("gsi_query_channel_info failed\n"); + return res; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +bool ipa3_has_open_aggr_frame(enum ipa_client_type client) +{ + u32 aggr_state_active; + int ipa_ep_idx; + + aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE); + IPA_MHI_DBG_LOW("IPA_STATE_AGGR_ACTIVE_OFST 0x%x\n", aggr_state_active); + + ipa_ep_idx = ipa_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + ipa_assert(); + return false; + } + + if ((1 << ipa_ep_idx) & aggr_state_active) + return true; + + return false; +} + +int ipa3_mhi_destroy_channel(enum ipa_client_type client) +{ + int res; + int ipa_ep_idx; + struct ipa3_ep_context *ep; + + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx < 0) { + IPA_MHI_ERR("Invalid client %d\n", client); + return -EINVAL; + } + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + IPA_ACTIVE_CLIENTS_INC_EP(client); + + IPA_MHI_DBG("reset event ring (hdl: %lu, ep: %d)\n", + ep->gsi_evt_ring_hdl, ipa_ep_idx); + + res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl); + if (res) { + IPAERR(" failed to reset evt ring %lu, err %d\n" + , ep->gsi_evt_ring_hdl, res); + goto fail; + } + + IPA_MHI_DBG("dealloc event ring (hdl: %lu, ep: %d)\n", + ep->gsi_evt_ring_hdl, ipa_ep_idx); + + res = gsi_dealloc_evt_ring( + ep->gsi_evt_ring_hdl); + if (res) { + IPAERR("dealloc evt ring %lu failed, err %d\n" + , ep->gsi_evt_ring_hdl, res); + goto fail; + } + + IPA_ACTIVE_CLIENTS_DEC_EP(client); + return 0; +fail: + IPA_ACTIVE_CLIENTS_DEC_EP(client); + return res; +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA MHI driver"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c new file mode 100644 index 000000000000..c2d2934020d5 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c @@ -0,0 +1,1058 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include "ipa_qmi_service.h" +#include "../ipa_common_i.h" +#include "ipa_i.h" + +#define IMP_DRV_NAME "ipa_mhi_proxy" + +#define IMP_DBG(fmt, args...) \ + do { \ + pr_debug(IMP_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IMP_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IMP_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IMP_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IMP_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IMP_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + + +#define IMP_ERR(fmt, args...) \ + do { \ + pr_err(IMP_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IMP_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IMP_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + + +#define IMP_FUNC_ENTRY() \ + IMP_DBG_LOW("ENTRY\n") +#define IMP_FUNC_EXIT() \ + IMP_DBG_LOW("EXIT\n") + +#define IMP_IPA_UC_UL_CH_n 0 +#define IMP_IPA_UC_UL_EV_n 1 +#define IMP_IPA_UC_DL_CH_n 2 +#define IMP_IPA_UC_DL_EV_n 3 +#define IMP_IPA_UC_m 1 + +/* each pair of UL/DL channels are defined below */ +static const struct mhi_device_id mhi_driver_match_table[] = { + { .chan = "IP_HW_OFFLOAD_0" }, + {}, +}; + +static int imp_mhi_probe_cb(struct mhi_device *, const struct mhi_device_id *); +static void imp_mhi_remove_cb(struct mhi_device *); +static void imp_mhi_status_cb(struct mhi_device *, enum MHI_CB); + +static struct mhi_driver mhi_driver = { + .id_table = mhi_driver_match_table, + .probe = imp_mhi_probe_cb, + .remove = imp_mhi_remove_cb, + .status_cb = imp_mhi_status_cb, + .driver = { + .name = IMP_DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +struct imp_channel_context_type { + u32 chstate:8; + u32 brsmode:2; + u32 pollcfg:6; + u32 reserved:16; + + u32 chtype; + + u32 erindex; + + u64 rbase; + + u64 rlen; + + u64 rpp; + + u64 wpp; +} __packed; + +struct imp_event_context_type { + u32 reserved:8; + u32 intmodc:8; + u32 intmodt:16; + + u32 ertype; + + u32 msivec; + + u64 rbase; + + u64 rlen; + + u64 rpp; + + u64 wpp; +} __packed; + +struct imp_iova_addr { + dma_addr_t base; + unsigned int size; +}; + +struct imp_dev_info { + struct platform_device *pdev; + bool smmu_enabled; + struct imp_iova_addr ctrl; + struct imp_iova_addr data; + u32 chdb_base; + u32 erdb_base; +}; + +struct imp_event_props { + u16 id; + phys_addr_t doorbell; + u16 uc_mbox_n; + struct imp_event_context_type ev_ctx; +}; + +struct imp_event { + struct imp_event_props props; +}; + +struct imp_channel_props { + enum dma_data_direction dir; + u16 id; + phys_addr_t doorbell; + u16 uc_mbox_n; + struct imp_channel_context_type ch_ctx; + +}; + +struct imp_channel { + struct imp_channel_props props; + struct imp_event event; +}; + +enum imp_state { + IMP_INVALID = 0, + IMP_PROBED, + IMP_READY, + IMP_STARTED +}; + +struct imp_qmi_cache { + struct ipa_mhi_ready_indication_msg_v01 ready_ind; + struct ipa_mhi_alloc_channel_req_msg_v01 alloc_ch_req; + struct ipa_mhi_alloc_channel_resp_msg_v01 alloc_ch_resp; +}; + +struct imp_mhi_driver { + struct mhi_device *mhi_dev; + struct imp_channel ul_chan; + struct imp_channel dl_chan; +}; + +struct imp_context { + struct imp_dev_info dev_info; + struct imp_mhi_driver md; + struct mutex mutex; + struct mutex lpm_mutex; + enum imp_state state; + bool in_lpm; + bool lpm_disabled; + struct imp_qmi_cache qmi; + +}; + +static struct imp_context *imp_ctx; + +static void _populate_smmu_info(struct ipa_mhi_ready_indication_msg_v01 *req) +{ + req->smmu_info_valid = true; + req->smmu_info.iova_ctl_base_addr = imp_ctx->dev_info.ctrl.base; + req->smmu_info.iova_ctl_size = imp_ctx->dev_info.ctrl.size; + req->smmu_info.iova_data_base_addr = imp_ctx->dev_info.data.base; + req->smmu_info.iova_data_size = imp_ctx->dev_info.data.size; +} + +static void imp_mhi_trigger_ready_ind(void) +{ + struct ipa_mhi_ready_indication_msg_v01 *req + = &imp_ctx->qmi.ready_ind; + int ret; + struct imp_channel *ch; + struct ipa_mhi_ch_init_info_type_v01 *ch_info; + + IMP_FUNC_ENTRY(); + if (imp_ctx->state != IMP_PROBED) { + IMP_ERR("invalid state %d\n", imp_ctx->state); + goto exit; + } + + if (imp_ctx->dev_info.smmu_enabled) + _populate_smmu_info(req); + + req->ch_info_arr_len = 0; + BUILD_BUG_ON(QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01 < 2); + + /* UL channel */ + ch = &imp_ctx->md.ul_chan; + ch_info = &req->ch_info_arr[req->ch_info_arr_len]; + + ch_info->ch_id = ch->props.id; + ch_info->direction_type = ch->props.dir; + ch_info->er_id = ch->event.props.id; + + /* uC is a doorbell proxy between local Q6 and remote Q6 */ + ch_info->ch_doorbell_addr = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IMP_IPA_UC_m, + ch->props.uc_mbox_n); + + ch_info->er_doorbell_addr = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IMP_IPA_UC_m, + ch->event.props.uc_mbox_n); + req->ch_info_arr_len++; + + /* DL channel */ + ch = &imp_ctx->md.dl_chan; + ch_info = &req->ch_info_arr[req->ch_info_arr_len]; + + ch_info->ch_id = ch->props.id; + ch_info->direction_type = ch->props.dir; + ch_info->er_id = ch->event.props.id; + + /* uC is a doorbell proxy between local Q6 and remote Q6 */ + ch_info->ch_doorbell_addr = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IMP_IPA_UC_m, + ch->props.uc_mbox_n); + + ch_info->er_doorbell_addr = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IMP_IPA_UC_m, + ch->event.props.uc_mbox_n); + req->ch_info_arr_len++; + + IMP_DBG("sending IND to modem\n"); + ret = ipa3_qmi_send_mhi_ready_indication(req); + if (ret) { + IMP_ERR("failed to send ready indication to modem %d\n", ret); + return; + } + + imp_ctx->state = IMP_READY; + +exit: + IMP_FUNC_EXIT(); +} + +static struct imp_channel *imp_get_ch_by_id(u16 id) +{ + if (imp_ctx->md.ul_chan.props.id == id) + return &imp_ctx->md.ul_chan; + + if (imp_ctx->md.dl_chan.props.id == id) + return &imp_ctx->md.dl_chan; + + return NULL; +} + +static struct ipa_mhi_er_info_type_v01 * + _find_ch_in_er_info_arr(struct ipa_mhi_alloc_channel_req_msg_v01 *req, + u16 id) +{ + int i; + + if (req->er_info_arr_len > QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01) + return NULL; + + for (i = 0; i < req->tr_info_arr_len; i++) + if (req->er_info_arr[i].er_id == id) + return &req->er_info_arr[i]; + return NULL; +} + +/* round addresses for closest page per SMMU requirements */ +static inline void imp_smmu_round_to_page(uint64_t iova, uint64_t pa, + uint64_t size, unsigned long *iova_p, phys_addr_t *pa_p, u32 *size_p) +{ + *iova_p = rounddown(iova, PAGE_SIZE); + *pa_p = rounddown(pa, PAGE_SIZE); + *size_p = roundup(size + pa - *pa_p, PAGE_SIZE); +} + +static void __map_smmu_info(struct device *dev, + struct imp_iova_addr *partition, int num_mapping, + struct ipa_mhi_mem_addr_info_type_v01 *map_info, + bool map) +{ + int i; + struct iommu_domain *domain; + unsigned long iova_p; + phys_addr_t pa_p; + u32 size_p; + + domain = iommu_get_domain_for_dev(dev); + if (!domain) { + IMP_ERR("domain is NULL for dev\n"); + return; + } + + for (i = 0; i < num_mapping; i++) { + int prot = IOMMU_READ | IOMMU_WRITE; + u32 ipa_base = ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst; + u32 ipa_size = ipa3_ctx->ipa_wrapper_size; + + imp_smmu_round_to_page(map_info[i].iova, map_info[i].pa, + map_info[i].size, &iova_p, &pa_p, &size_p); + + if (map) { + /* boundary check */ + WARN_ON(partition->base > iova_p || + (partition->base + partition->size) < + (iova_p + size_p)); + + /* for IPA uC MBOM we need to map with device type */ + if (pa_p - ipa_base < ipa_size) + prot |= IOMMU_MMIO; + + IMP_DBG("mapping 0x%lx to 0x%pa size %d\n", + iova_p, &pa_p, size_p); + iommu_map(domain, + iova_p, pa_p, size_p, prot); + } else { + IMP_DBG("unmapping 0x%lx to 0x%pa size %d\n", + iova_p, &pa_p, size_p); + iommu_unmap(domain, iova_p, size_p); + } + } +} + +static int __imp_configure_mhi_device( + struct ipa_mhi_alloc_channel_req_msg_v01 *req, + struct ipa_mhi_alloc_channel_resp_msg_v01 *resp) +{ + struct mhi_buf ch_config[2]; + int i; + struct ipa_mhi_er_info_type_v01 *er_info; + struct imp_channel *ch; + int ridx = 0; + int ret; + + IMP_FUNC_ENTRY(); + + /* configure MHI */ + for (i = 0; i < req->tr_info_arr_len; i++) { + ch = imp_get_ch_by_id(req->tr_info_arr[i].ch_id); + if (!ch) { + IMP_ERR("unknown channel %d\n", + req->tr_info_arr[i].ch_id); + resp->alloc_resp_arr[ridx].ch_id = + req->tr_info_arr[i].ch_id; + resp->alloc_resp_arr[ridx].is_success = 0; + ridx++; + resp->alloc_resp_arr_len = ridx; + resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp->resp.error = IPA_QMI_ERR_INVALID_ID_V01; + return -EINVAL; + } + + /* populate CCA */ + if (req->tr_info_arr[i].brst_mode_type == + QMI_IPA_BURST_MODE_ENABLED_V01) + ch->props.ch_ctx.brsmode = 3; + else if (req->tr_info_arr[i].brst_mode_type == + QMI_IPA_BURST_MODE_DISABLED_V01) + ch->props.ch_ctx.brsmode = 2; + else + ch->props.ch_ctx.brsmode = 0; + + ch->props.ch_ctx.pollcfg = req->tr_info_arr[i].poll_cfg; + ch->props.ch_ctx.chtype = ch->props.dir; + ch->props.ch_ctx.erindex = ch->event.props.id; + ch->props.ch_ctx.rbase = req->tr_info_arr[i].ring_iova; + ch->props.ch_ctx.rlen = req->tr_info_arr[i].ring_len; + ch->props.ch_ctx.rpp = req->tr_info_arr[i].rp; + ch->props.ch_ctx.wpp = req->tr_info_arr[i].wp; + + ch_config[0].buf = &ch->props.ch_ctx; + ch_config[0].len = sizeof(ch->props.ch_ctx); + ch_config[0].name = "CCA"; + + /* populate ECA */ + er_info = _find_ch_in_er_info_arr(req, ch->event.props.id); + if (!er_info) { + IMP_ERR("no event ring for ch %d\n", + req->tr_info_arr[i].ch_id); + resp->alloc_resp_arr[ridx].ch_id = + req->tr_info_arr[i].ch_id; + resp->alloc_resp_arr[ridx].is_success = 0; + ridx++; + resp->alloc_resp_arr_len = ridx; + resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp->resp.error = IPA_QMI_ERR_INTERNAL_V01; + return -EINVAL; + } + + ch->event.props.ev_ctx.intmodc = er_info->intmod_count; + ch->event.props.ev_ctx.intmodt = er_info->intmod_cycles; + ch->event.props.ev_ctx.ertype = 1; + ch->event.props.ev_ctx.msivec = er_info->msi_addr; + ch->event.props.ev_ctx.rbase = er_info->ring_iova; + ch->event.props.ev_ctx.rlen = er_info->ring_len; + ch->event.props.ev_ctx.rpp = er_info->rp; + ch->event.props.ev_ctx.wpp = er_info->wp; + ch_config[1].buf = &ch->event.props.ev_ctx; + ch_config[1].len = sizeof(ch->event.props.ev_ctx); + ch_config[1].name = "ECA"; + + IMP_DBG("Configuring MHI device for ch %d\n", ch->props.id); + ret = mhi_device_configure(imp_ctx->md.mhi_dev, ch->props.dir, + ch_config, 2); + if (ret) { + IMP_ERR("mhi_device_configure failed for ch %d\n", + req->tr_info_arr[i].ch_id); + resp->alloc_resp_arr[ridx].ch_id = + req->tr_info_arr[i].ch_id; + resp->alloc_resp_arr[ridx].is_success = 0; + ridx++; + resp->alloc_resp_arr_len = ridx; + resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp->resp.error = IPA_QMI_ERR_INTERNAL_V01; + return -EINVAL; + } + } + + IMP_FUNC_EXIT(); + + return 0; +} + +/** + * imp_handle_allocate_channel_req() - Allocate a new MHI channel + * + * Allocates MHI channel and start them. + * + * Return: QMI return codes + */ +struct ipa_mhi_alloc_channel_resp_msg_v01 *imp_handle_allocate_channel_req( + struct ipa_mhi_alloc_channel_req_msg_v01 *req) +{ + int ret; + struct ipa_mhi_alloc_channel_resp_msg_v01 *resp = + &imp_ctx->qmi.alloc_ch_resp; + + IMP_FUNC_ENTRY(); + + mutex_lock(&imp_ctx->mutex); + + memset(resp, 0, sizeof(*resp)); + + if (imp_ctx->state != IMP_READY) { + IMP_ERR("invalid state %d\n", imp_ctx->state); + resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp->resp.error = IPA_QMI_ERR_INCOMPATIBLE_STATE_V01; + mutex_unlock(&imp_ctx->mutex); + return resp; + } + + /* cache the req */ + memcpy(&imp_ctx->qmi.alloc_ch_req, req, sizeof(*req)); + + if (req->tr_info_arr_len > QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01) { + IMP_ERR("invalid tr_info_arr_len %d\n", req->tr_info_arr_len); + resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp->resp.error = IPA_QMI_ERR_NO_MEMORY_V01; + mutex_unlock(&imp_ctx->mutex); + return resp; + } + + if ((req->ctrl_addr_map_info_len == 0 || + req->data_addr_map_info_len == 0) && + imp_ctx->dev_info.smmu_enabled) { + IMP_ERR("no mapping provided, but smmu is enabled\n"); + resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp->resp.error = IPA_QMI_ERR_INTERNAL_V01; + mutex_unlock(&imp_ctx->mutex); + return resp; + } + + if (imp_ctx->dev_info.smmu_enabled) { + /* map CTRL */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.ctrl, + req->ctrl_addr_map_info_len, + req->ctrl_addr_map_info, + true); + + /* map DATA */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.data, + req->data_addr_map_info_len, + req->data_addr_map_info, + true); + } + + resp->alloc_resp_arr_valid = true; + ret = __imp_configure_mhi_device(req, resp); + if (ret) + goto fail_smmu; + + IMP_DBG("Starting MHI channels %d and %d\n", + imp_ctx->md.ul_chan.props.id, + imp_ctx->md.dl_chan.props.id); + ret = mhi_prepare_for_transfer(imp_ctx->md.mhi_dev); + if (ret) { + IMP_ERR("mhi_prepare_for_transfer failed %d\n", ret); + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .ch_id = imp_ctx->md.ul_chan.props.id; + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .is_success = 0; + resp->alloc_resp_arr_len++; + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .ch_id = imp_ctx->md.dl_chan.props.id; + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .is_success = 0; + resp->alloc_resp_arr_len++; + resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp->resp.error = IPA_QMI_ERR_INTERNAL_V01; + goto fail_smmu; + } + + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .ch_id = imp_ctx->md.ul_chan.props.id; + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .is_success = 1; + resp->alloc_resp_arr_len++; + + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .ch_id = imp_ctx->md.dl_chan.props.id; + resp->alloc_resp_arr[resp->alloc_resp_arr_len] + .is_success = 1; + resp->alloc_resp_arr_len++; + + imp_ctx->state = IMP_STARTED; + mutex_unlock(&imp_ctx->mutex); + IMP_FUNC_EXIT(); + + resp->resp.result = IPA_QMI_RESULT_SUCCESS_V01; + return resp; + +fail_smmu: + if (imp_ctx->dev_info.smmu_enabled) { + /* unmap CTRL */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.ctrl, + req->ctrl_addr_map_info_len, + req->ctrl_addr_map_info, + false); + + /* unmap DATA */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.data, + req->data_addr_map_info_len, + req->data_addr_map_info, + false); + } + mutex_unlock(&imp_ctx->mutex); + return resp; +} + +/** + * imp_handle_vote_req() - Votes for MHI / PCIe clocks + * + * Hold a vote to prevent / allow low power mode on MHI. + * + * Return: 0 on success, negative otherwise + */ +int imp_handle_vote_req(bool vote) +{ + int ret; + + IMP_DBG_LOW("vote %d\n", vote); + + mutex_lock(&imp_ctx->mutex); + if (imp_ctx->state != IMP_STARTED) { + IMP_ERR("unexpected vote when in state %d\n", imp_ctx->state); + mutex_unlock(&imp_ctx->mutex); + return -EPERM; + } + + if (vote == imp_ctx->lpm_disabled) { + IMP_ERR("already voted/devoted %d\n", vote); + mutex_unlock(&imp_ctx->mutex); + return -EPERM; + } + mutex_unlock(&imp_ctx->mutex); + + /* + * Unlock the mutex before calling into mhi for clock vote + * to avoid deadlock on imp mutex. + * Calls into mhi are synchronous and imp callbacks are + * executed from mhi context. + */ + if (vote) { + ret = mhi_device_get_sync(imp_ctx->md.mhi_dev); + if (ret) { + IMP_ERR("mhi_sync_get failed %d\n", ret); + return ret; + } + } else { + mhi_device_put(imp_ctx->md.mhi_dev); + } + + mutex_lock(&imp_ctx->mutex); + if (vote) + imp_ctx->lpm_disabled = true; + else + imp_ctx->lpm_disabled = false; + mutex_unlock(&imp_ctx->mutex); + + return 0; +} + +static int imp_read_iova_from_dtsi(const char *node, struct imp_iova_addr *out) +{ + u32 iova_mapping[2]; + struct device_node *of_node = imp_ctx->dev_info.pdev->dev.of_node; + + if (of_property_read_u32_array(of_node, node, iova_mapping, 2)) { + IMP_DBG("failed to read of_node %s\n", node); + return -EINVAL; + } + + out->base = iova_mapping[0]; + out->size = iova_mapping[1]; + IMP_DBG("%s: base: 0x%pad size: 0x%x\n", node, &out->base, out->size); + + return 0; +} + +static void imp_mhi_shutdown(void) +{ + struct ipa_mhi_cleanup_req_msg_v01 req = { 0 }; + + IMP_FUNC_ENTRY(); + + if (imp_ctx->state == IMP_STARTED) { + req.cleanup_valid = true; + req.cleanup = true; + ipa3_qmi_send_mhi_cleanup_request(&req); + if (imp_ctx->dev_info.smmu_enabled) { + struct ipa_mhi_alloc_channel_req_msg_v01 *creq + = &imp_ctx->qmi.alloc_ch_req; + + /* unmap CTRL */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.ctrl, + creq->ctrl_addr_map_info_len, + creq->ctrl_addr_map_info, + false); + + /* unmap DATA */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.data, + creq->data_addr_map_info_len, + creq->data_addr_map_info, + false); + } + if (imp_ctx->lpm_disabled) { + mhi_device_put(imp_ctx->md.mhi_dev); + imp_ctx->lpm_disabled = false; + } + + /* unmap MHI doorbells from IPA uC SMMU */ + if (!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) { + struct ipa_smmu_cb_ctx *cb = + ipa3_get_smmu_ctx(IPA_SMMU_CB_UC); + unsigned long iova_p; + phys_addr_t pa_p; + u32 size_p; + + imp_smmu_round_to_page(imp_ctx->dev_info.chdb_base, + imp_ctx->dev_info.chdb_base, PAGE_SIZE, + &iova_p, &pa_p, &size_p); + + iommu_unmap(cb->mapping->domain, iova_p, size_p); + } + } + if (!imp_ctx->in_lpm && + (imp_ctx->state == IMP_READY || + imp_ctx->state == IMP_STARTED)) { + IMP_DBG("devote IMP with state= %d\n", imp_ctx->state); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP"); + } + imp_ctx->in_lpm = false; + imp_ctx->state = IMP_PROBED; + + IMP_FUNC_EXIT(); +} + +static int imp_mhi_probe_cb(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct imp_channel *ch; + struct imp_event *ev; + int ret; + + IMP_FUNC_ENTRY(); + + if (id != &mhi_driver_match_table[0]) { + IMP_ERR("only chan=%s is supported for now\n", + mhi_driver_match_table[0].chan); + return -EPERM; + } + + /* vote for IPA clock. IPA clock will be devoted when MHI enters LPM */ + IPA_ACTIVE_CLIENTS_INC_SPECIAL("IMP"); + + imp_ctx->md.mhi_dev = mhi_dev; + + mutex_lock(&imp_ctx->mutex); + /* store UL channel properties */ + ch = &imp_ctx->md.ul_chan; + ev = &imp_ctx->md.ul_chan.event; + + ch->props.id = mhi_dev->ul_chan_id; + ch->props.dir = DMA_TO_DEVICE; + ch->props.doorbell = imp_ctx->dev_info.chdb_base + ch->props.id * 8; + ch->props.uc_mbox_n = IMP_IPA_UC_UL_CH_n; + IMP_DBG("ul ch id %d doorbell 0x%pa uc_mbox_n %d\n", + ch->props.id, &ch->props.doorbell, ch->props.uc_mbox_n); + + ret = ipa3_uc_send_remote_ipa_info(ch->props.doorbell, + ch->props.uc_mbox_n); + if (ret) + goto fail; + IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ch->props.doorbell, + ch->props.uc_mbox_n); + + ev->props.id = mhi_dev->ul_event_id; + ev->props.doorbell = imp_ctx->dev_info.erdb_base + ev->props.id * 8; + ev->props.uc_mbox_n = IMP_IPA_UC_UL_EV_n; + IMP_DBG("allocated ev %d\n", ev->props.id); + + ret = ipa3_uc_send_remote_ipa_info(ev->props.doorbell, + ev->props.uc_mbox_n); + if (ret) + goto fail; + IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ev->props.doorbell, + ev->props.uc_mbox_n); + + /* store DL channel properties */ + ch = &imp_ctx->md.dl_chan; + ev = &imp_ctx->md.dl_chan.event; + + ch->props.dir = DMA_FROM_DEVICE; + ch->props.id = mhi_dev->dl_chan_id; + ch->props.doorbell = imp_ctx->dev_info.chdb_base + ch->props.id * 8; + ch->props.uc_mbox_n = IMP_IPA_UC_DL_CH_n; + IMP_DBG("dl ch id %d doorbell 0x%pa uc_mbox_n %d\n", + ch->props.id, &ch->props.doorbell, ch->props.uc_mbox_n); + + ret = ipa3_uc_send_remote_ipa_info(ch->props.doorbell, + ch->props.uc_mbox_n); + if (ret) + goto fail; + IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ch->props.doorbell, + ch->props.uc_mbox_n); + + ev->props.id = mhi_dev->dl_event_id; + ev->props.doorbell = imp_ctx->dev_info.erdb_base + ev->props.id * 8; + ev->props.uc_mbox_n = IMP_IPA_UC_DL_EV_n; + IMP_DBG("allocated ev %d\n", ev->props.id); + + ret = ipa3_uc_send_remote_ipa_info(ev->props.doorbell, + ev->props.uc_mbox_n); + if (ret) + goto fail; + IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ev->props.doorbell, + ev->props.uc_mbox_n); + + /* + * Map MHI doorbells to IPA uC SMMU. + * Both channel and event doorbells resides in a single page. + */ + if (!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) { + struct ipa_smmu_cb_ctx *cb = + ipa3_get_smmu_ctx(IPA_SMMU_CB_UC); + unsigned long iova_p; + phys_addr_t pa_p; + u32 size_p; + + imp_smmu_round_to_page(imp_ctx->dev_info.chdb_base, + imp_ctx->dev_info.chdb_base, PAGE_SIZE, + &iova_p, &pa_p, &size_p); + + ret = ipa3_iommu_map(cb->mapping->domain, iova_p, pa_p, size_p, + IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); + if (ret) + goto fail; + } + + imp_mhi_trigger_ready_ind(); + + mutex_unlock(&imp_ctx->mutex); + + IMP_FUNC_EXIT(); + return 0; + +fail: + mutex_unlock(&imp_ctx->mutex); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP"); + return ret; +} + +static void imp_mhi_remove_cb(struct mhi_device *mhi_dev) +{ + IMP_FUNC_ENTRY(); + + mutex_lock(&imp_ctx->mutex); + imp_mhi_shutdown(); + mutex_unlock(&imp_ctx->mutex); + IMP_FUNC_EXIT(); +} + +static void imp_mhi_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb) +{ + IMP_DBG("%d\n", mhi_cb); + + mutex_lock(&imp_ctx->lpm_mutex); + if (mhi_dev != imp_ctx->md.mhi_dev) { + IMP_DBG("ignoring secondary callbacks\n"); + mutex_unlock(&imp_ctx->lpm_mutex); + return; + } + + switch (mhi_cb) { + case MHI_CB_IDLE: + break; + case MHI_CB_LPM_ENTER: + if (imp_ctx->state == IMP_STARTED) { + if (!imp_ctx->in_lpm) { + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP"); + imp_ctx->in_lpm = true; + } else { + IMP_ERR("already in LPM\n"); + } + } + break; + case MHI_CB_LPM_EXIT: + if (imp_ctx->state == IMP_STARTED) { + if (imp_ctx->in_lpm) { + IPA_ACTIVE_CLIENTS_INC_SPECIAL("IMP"); + imp_ctx->in_lpm = false; + } else { + IMP_ERR("not in LPM\n"); + } + } + break; + + case MHI_CB_EE_RDDM: + case MHI_CB_PENDING_DATA: + IMP_ERR("unexpected event %d\n", mhi_cb); + break; + } + mutex_unlock(&imp_ctx->lpm_mutex); +} + +static int imp_probe(struct platform_device *pdev) +{ + int ret; + + IMP_FUNC_ENTRY(); + + if (ipa3_uc_state_check()) { + IMP_DBG("uC not ready yet\n"); + return -EPROBE_DEFER; + } + + imp_ctx->dev_info.pdev = pdev; + imp_ctx->dev_info.smmu_enabled = true; + ret = imp_read_iova_from_dtsi("qcom,ctrl-iova", + &imp_ctx->dev_info.ctrl); + if (ret) + imp_ctx->dev_info.smmu_enabled = false; + + ret = imp_read_iova_from_dtsi("qcom,data-iova", + &imp_ctx->dev_info.data); + if (ret) + imp_ctx->dev_info.smmu_enabled = false; + + IMP_DBG("smmu_enabled=%d\n", imp_ctx->dev_info.smmu_enabled); + + if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-chdb-base", + &imp_ctx->dev_info.chdb_base)) { + IMP_ERR("failed to read of_node %s\n", "qcom,mhi-chdb-base"); + return -EINVAL; + } + IMP_DBG("chdb-base=0x%x\n", imp_ctx->dev_info.chdb_base); + + if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-erdb-base", + &imp_ctx->dev_info.erdb_base)) { + IMP_ERR("failed to read of_node %s\n", "qcom,mhi-erdb-base"); + return -EINVAL; + } + IMP_DBG("erdb-base=0x%x\n", imp_ctx->dev_info.erdb_base); + + imp_ctx->state = IMP_PROBED; + ret = mhi_driver_register(&mhi_driver); + if (ret) { + IMP_ERR("mhi_driver_register failed %d\n", ret); + mutex_unlock(&imp_ctx->mutex); + return ret; + } + + IMP_FUNC_EXIT(); + return 0; +} + +static int imp_remove(struct platform_device *pdev) +{ + IMP_FUNC_ENTRY(); + mhi_driver_unregister(&mhi_driver); + mutex_lock(&imp_ctx->mutex); + if (!imp_ctx->in_lpm && (imp_ctx->state == IMP_READY || + imp_ctx->state == IMP_STARTED)) { + IMP_DBG("devote IMP with state= %d\n", imp_ctx->state); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP"); + } + imp_ctx->lpm_disabled = false; + imp_ctx->state = IMP_INVALID; + mutex_unlock(&imp_ctx->mutex); + + mutex_lock(&imp_ctx->lpm_mutex); + imp_ctx->in_lpm = false; + mutex_unlock(&imp_ctx->lpm_mutex); + + return 0; +} + +static const struct of_device_id imp_dt_match[] = { + { .compatible = "qcom,ipa-mhi-proxy" }, + {}, +}; +MODULE_DEVICE_TABLE(of, imp_dt_match); + +static struct platform_driver ipa_mhi_proxy_driver = { + .driver = { + .name = "ipa_mhi_proxy", + .of_match_table = imp_dt_match, + }, + .probe = imp_probe, + .remove = imp_remove, +}; + +/** + * imp_handle_modem_ready() - Registers IMP as a platform device + * + * This function is called after modem is loaded and QMI handshake is done. + * IMP will register itself as a platform device, and on support device the + * probe function will get called. + * + * Return: None + */ +void imp_handle_modem_ready(void) +{ + + if (!imp_ctx) { + imp_ctx = kzalloc(sizeof(*imp_ctx), GFP_KERNEL); + if (!imp_ctx) + return; + + mutex_init(&imp_ctx->mutex); + mutex_init(&imp_ctx->lpm_mutex); + } + + if (imp_ctx->state != IMP_INVALID) { + IMP_ERR("unexpected state %d\n", imp_ctx->state); + return; + } + + IMP_DBG("register platform device\n"); + platform_driver_register(&ipa_mhi_proxy_driver); +} + +/** + * imp_handle_modem_shutdown() - Handles modem SSR + * + * Performs MHI cleanup when modem is going to SSR (Subsystem Restart). + * + * Return: None + */ +void imp_handle_modem_shutdown(void) +{ + IMP_FUNC_ENTRY(); + + if (!imp_ctx) + return; + + mutex_lock(&imp_ctx->mutex); + + if (imp_ctx->state == IMP_INVALID) { + mutex_unlock(&imp_ctx->mutex); + return; + } + if (imp_ctx->state == IMP_STARTED) { + mhi_unprepare_from_transfer(imp_ctx->md.mhi_dev); + imp_ctx->state = IMP_READY; + } + + if (imp_ctx->state == IMP_READY) { + if (imp_ctx->dev_info.smmu_enabled) { + struct ipa_mhi_alloc_channel_req_msg_v01 *creq + = &imp_ctx->qmi.alloc_ch_req; + + /* unmap CTRL */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.ctrl, + creq->ctrl_addr_map_info_len, + creq->ctrl_addr_map_info, + false); + + /* unmap DATA */ + __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, + &imp_ctx->dev_info.data, + creq->data_addr_map_info_len, + creq->data_addr_map_info, + false); + } + } + + imp_ctx->state = IMP_PROBED; + mutex_unlock(&imp_ctx->mutex); + + IMP_FUNC_EXIT(); + + platform_driver_unregister(&ipa_mhi_proxy_driver); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA MHI Proxy Driver"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.h b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.h new file mode 100644 index 000000000000..201d68517cef --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef __IMP_H_ +#define __IMP_H_ + +#ifdef CONFIG_IPA3_MHI_PROXY + +#include "ipa_qmi_service.h" + +void imp_handle_modem_ready(void); + +struct ipa_mhi_alloc_channel_resp_msg_v01 *imp_handle_allocate_channel_req( + struct ipa_mhi_alloc_channel_req_msg_v01 *req); + +int imp_handle_vote_req(bool vote); + +void imp_handle_modem_shutdown(void); + +#else /* CONFIG_IPA3_MHI_PROXY */ + +static inline void imp_handle_modem_ready(void) +{ + +} + +static inline struct ipa_mhi_alloc_channel_resp_msg_v01 + *imp_handle_allocate_channel_req( + struct ipa_mhi_alloc_channel_req_msg_v01 *req) +{ + return NULL; +} + +static inline int imp_handle_vote_req(bool vote) +{ + return -EPERM; +} + +static inline void imp_handle_modem_shutdown(void) +{ + +} + +#endif /* CONFIG_IPA3_MHI_PROXY */ + +#endif /* __IMP_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c new file mode 100644 index 000000000000..9d8e4dac9dac --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c @@ -0,0 +1,1592 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_nat.h" + +/* + * The following for adding code (ie. for EMULATION) not found on x86. + */ +#if defined(CONFIG_IPA_EMULATION) +# include "ipa_emulation_stubs.h" +#endif + +#define IPA_NAT_PHYS_MEM_OFFSET 0 +#define IPA_IPV6CT_PHYS_MEM_OFFSET 0 +#define IPA_NAT_PHYS_MEM_SIZE IPA_RAM_NAT_SIZE +#define IPA_IPV6CT_PHYS_MEM_SIZE IPA_RAM_IPV6CT_SIZE + +#define IPA_NAT_IPV6CT_TEMP_MEM_SIZE 128 + +#define IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC 3 +#define IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC 2 +#define IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC 4 + +/* + * The base table max entries is limited by index into table 13 bits number. + * Limit the memory size required by user to prevent kernel memory starvation + */ +#define IPA_TABLE_MAX_ENTRIES 8192 +#define MAX_ALLOC_NAT_SIZE(size) (IPA_TABLE_MAX_ENTRIES * size) + +enum ipa_nat_ipv6ct_table_type { + IPA_NAT_BASE_TBL = 0, + IPA_NAT_EXPN_TBL = 1, + IPA_NAT_INDX_TBL = 2, + IPA_NAT_INDEX_EXPN_TBL = 3, + IPA_IPV6CT_BASE_TBL = 4, + IPA_IPV6CT_EXPN_TBL = 5 +}; + +static int ipa3_nat_ipv6ct_vma_fault_remap(struct vm_fault *vmf) +{ + vmf->page = NULL; + + IPADBG("\n"); + return VM_FAULT_SIGBUS; +} + +/* VMA related file operations functions */ +static const struct vm_operations_struct ipa3_nat_ipv6ct_remap_vm_ops = { + .fault = ipa3_nat_ipv6ct_vma_fault_remap, +}; + +static int ipa3_nat_ipv6ct_open(struct inode *inode, struct file *filp) +{ + struct ipa3_nat_ipv6ct_common_mem *dev; + + IPADBG("\n"); + dev = container_of(inode->i_cdev, + struct ipa3_nat_ipv6ct_common_mem, cdev); + filp->private_data = dev; + IPADBG("return\n"); + + return 0; +} + +static int ipa3_nat_ipv6ct_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct ipa3_nat_ipv6ct_common_mem *dev = + (struct ipa3_nat_ipv6ct_common_mem *)filp->private_data; + unsigned long vsize = vma->vm_end - vma->vm_start; + unsigned long phys_addr; + int result = 0; + struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP); + + IPADBG("\n"); + + if (!dev->is_dev_init) { + IPAERR("attempt to mmap %s before dev init\n", dev->name); + return -EPERM; + } + + mutex_lock(&dev->lock); + if (!dev->is_mem_allocated) { + IPAERR_RL("attempt to mmap %s before the memory allocation\n", + dev->name); + result = -EPERM; + goto bail; + } + + if (dev->is_sys_mem) { + if (dev->is_mapped) { + IPAERR("%s already mapped, only 1 mapping supported\n", + dev->name); + result = -EINVAL; + goto bail; + } + } else { + if ((dev->phys_mem_size == 0) || (vsize > dev->phys_mem_size)) { + IPAERR_RL("wrong parameters to %s mapping\n", + dev->name); + result = -EINVAL; + goto bail; + } + } + /* check if smmu enable & dma_coherent mode */ + if (!cb->valid || + !is_device_dma_coherent(cb->dev)) { + vma->vm_page_prot = + pgprot_noncached(vma->vm_page_prot); + IPADBG("App smmu enable in DMA mode\n"); + } + + if (dev->is_sys_mem) { + IPADBG("Mapping system memory\n"); + IPADBG("map sz=0x%zx\n", dev->size); + result = + dma_mmap_coherent( + ipa3_ctx->pdev, vma, + dev->vaddr, dev->dma_handle, + dev->size); + if (result) { + IPAERR("unable to map memory. Err:%d\n", result); + goto bail; + } + dev->base_address = dev->vaddr; + } else { + IPADBG("Mapping shared(local) memory\n"); + IPADBG("map sz=0x%lx\n", vsize); + + phys_addr = ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, + dev->smem_offset); + + if (remap_pfn_range( + vma, vma->vm_start, + phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) { + IPAERR("remap failed\n"); + result = -EAGAIN; + goto bail; + } + dev->base_address = (void *)vma->vm_start; + } + result = 0; + vma->vm_ops = &ipa3_nat_ipv6ct_remap_vm_ops; + dev->is_mapped = true; + IPADBG("return\n"); +bail: + mutex_unlock(&dev->lock); + return result; +} + +static const struct file_operations ipa3_nat_ipv6ct_fops = { + .owner = THIS_MODULE, + .open = ipa3_nat_ipv6ct_open, + .mmap = ipa3_nat_ipv6ct_mmap +}; + +/** + * ipa3_allocate_nat_ipv6ct_tmp_memory() - Allocates the NAT\IPv6CT temp memory + */ +static struct ipa3_nat_ipv6ct_tmp_mem *ipa3_nat_ipv6ct_allocate_tmp_memory(void) +{ + struct ipa3_nat_ipv6ct_tmp_mem *tmp_mem; + gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; + + IPADBG("\n"); + + tmp_mem = kzalloc(sizeof(*tmp_mem), GFP_KERNEL); + if (tmp_mem == NULL) + return NULL; + + tmp_mem->vaddr = + dma_alloc_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE, + &tmp_mem->dma_handle, gfp_flags); + if (tmp_mem->vaddr == NULL) + goto bail_tmp_mem; + + IPADBG("IPA successfully allocated temp memory\n"); + return tmp_mem; + +bail_tmp_mem: + kfree(tmp_mem); + return NULL; +} + +static int ipa3_nat_ipv6ct_init_device( + struct ipa3_nat_ipv6ct_common_mem *dev, + const char *name, + u32 phys_mem_size, + u32 smem_offset, + struct ipa3_nat_ipv6ct_tmp_mem *tmp_mem) +{ + int result; + + IPADBG("Init %s\n", name); + + if (strnlen(name, IPA_DEV_NAME_MAX_LEN) == IPA_DEV_NAME_MAX_LEN) { + IPAERR("device name is too long\n"); + return -ENODEV; + } + + strlcpy(dev->name, name, IPA_DEV_NAME_MAX_LEN); + + dev->class = class_create(THIS_MODULE, name); + if (IS_ERR(dev->class)) { + IPAERR("unable to create the class for %s\n", name); + return -ENODEV; + } + result = alloc_chrdev_region(&dev->dev_num, 0, 1, name); + if (result) { + IPAERR("alloc_chrdev_region err. for %s\n", name); + result = -ENODEV; + goto alloc_chrdev_region_fail; + } + + dev->dev = device_create(dev->class, NULL, dev->dev_num, NULL, name); + + if (IS_ERR(dev->dev)) { + IPAERR("device_create err:%ld\n", PTR_ERR(dev->dev)); + result = -ENODEV; + goto device_create_fail; + } + + cdev_init(&dev->cdev, &ipa3_nat_ipv6ct_fops); + dev->cdev.owner = THIS_MODULE; + + mutex_init(&dev->lock); + mutex_lock(&dev->lock); + + result = cdev_add(&dev->cdev, dev->dev_num, 1); + if (result) { + IPAERR("cdev_add err=%d\n", -result); + goto cdev_add_fail; + } + + dev->phys_mem_size = phys_mem_size; + dev->smem_offset = smem_offset; + + dev->is_dev_init = true; + dev->tmp_mem = tmp_mem; + mutex_unlock(&dev->lock); + + IPADBG("ipa dev %s added successful. major:%d minor:%d\n", name, + MAJOR(dev->dev_num), MINOR(dev->dev_num)); + return 0; + +cdev_add_fail: + mutex_unlock(&dev->lock); + device_destroy(dev->class, dev->dev_num); +device_create_fail: + unregister_chrdev_region(dev->dev_num, 1); +alloc_chrdev_region_fail: + class_destroy(dev->class); + return result; +} + +static void ipa3_nat_ipv6ct_destroy_device( + struct ipa3_nat_ipv6ct_common_mem *dev) +{ + IPADBG("\n"); + + mutex_lock(&dev->lock); + + device_destroy(dev->class, dev->dev_num); + unregister_chrdev_region(dev->dev_num, 1); + class_destroy(dev->class); + dev->is_dev_init = false; + + mutex_unlock(&dev->lock); + + IPADBG("return\n"); +} + +/** + * ipa3_nat_ipv6ct_init_devices() - Initialize the NAT and IPv6CT devices + * + * Called during IPA init to create memory device + * + * Returns: 0 on success, negative on failure + */ +int ipa3_nat_ipv6ct_init_devices(void) +{ + struct ipa3_nat_ipv6ct_tmp_mem *tmp_mem; + int result; + + IPADBG("\n"); + + /* + * Allocate NAT/IPv6CT temporary memory. The memory is never deleted, + * because provided to HW once NAT or IPv6CT table is deleted. + * NULL is a legal value + */ + tmp_mem = ipa3_nat_ipv6ct_allocate_tmp_memory(); + + if (ipa3_nat_ipv6ct_init_device( + &ipa3_ctx->nat_mem.dev, + IPA_NAT_DEV_NAME, + IPA_NAT_PHYS_MEM_SIZE, + IPA_NAT_PHYS_MEM_OFFSET, + tmp_mem)) { + IPAERR("unable to create nat device\n"); + result = -ENODEV; + goto fail_init_nat_dev; + } + + if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) && + ipa3_nat_ipv6ct_init_device( + &ipa3_ctx->ipv6ct_mem.dev, + IPA_IPV6CT_DEV_NAME, + IPA_IPV6CT_PHYS_MEM_SIZE, + IPA_IPV6CT_PHYS_MEM_OFFSET, + tmp_mem)) { + IPAERR("unable to create IPv6CT device\n"); + result = -ENODEV; + goto fail_init_ipv6ct_dev; + } + + return 0; + +fail_init_ipv6ct_dev: + ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->nat_mem.dev); +fail_init_nat_dev: + if (tmp_mem != NULL) { + dma_free_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE, + tmp_mem->vaddr, tmp_mem->dma_handle); + kfree(tmp_mem); + } + return result; +} + +/** + * ipa3_nat_ipv6ct_destroy_devices() - destroy the NAT and IPv6CT devices + * + * Called during IPA init to destroy nat device + */ +void ipa3_nat_ipv6ct_destroy_devices(void) +{ + ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->nat_mem.dev); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->ipv6ct_mem.dev); +} + +static int ipa3_nat_ipv6ct_allocate_mem(struct ipa3_nat_ipv6ct_common_mem *dev, + struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc, + enum ipahal_nat_type nat_type) +{ + gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; + int result = 0; + size_t nat_entry_size; + + IPADBG("passed memory size %zu for %s\n", + table_alloc->size, dev->name); + + if (!dev->is_dev_init) { + IPAERR("%s hasn't been initialized\n", dev->name); + result = -EPERM; + goto bail; + } + + if (dev->is_mem_allocated) { + IPAERR("Memory already allocated\n"); + result = 0; + goto bail; + } + + ipahal_nat_entry_size(nat_type, &nat_entry_size); + if (table_alloc->size > MAX_ALLOC_NAT_SIZE(nat_entry_size)) { + IPAERR("Trying allocate more size = %zu, Max allowed = %zu\n", + table_alloc->size, + MAX_ALLOC_NAT_SIZE(nat_entry_size)); + result = -EPERM; + goto bail; + } + + if (!table_alloc->size) { + IPAERR_RL("Invalid Parameters\n"); + result = -EPERM; + goto bail; + } + + if (table_alloc->size > IPA_NAT_PHYS_MEM_SIZE) { + IPADBG("Allocating system memory\n"); + dev->is_sys_mem = true; + dev->vaddr = + dma_alloc_coherent(ipa3_ctx->pdev, table_alloc->size, + &dev->dma_handle, gfp_flags); + if (dev->vaddr == NULL) { + IPAERR("memory alloc failed\n"); + result = -ENOMEM; + goto bail; + } + dev->size = table_alloc->size; + } else { + IPADBG("using shared(local) memory\n"); + dev->is_sys_mem = false; + } + + IPADBG("return\n"); + +bail: + return result; +} + +/** + * ipa3_allocate_nat_device() - Allocates memory for the NAT device + * @mem: [in/out] memory parameters + * + * Called by NAT client driver to allocate memory for the NAT entries. Based on + * the request size either shared or system memory will be used. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem) +{ + int result; + struct ipa_ioc_nat_ipv6ct_table_alloc tmp; + + tmp.size = mem->size; + tmp.offset = 0; + + result = ipa3_allocate_nat_table(&tmp); + if (result) + goto bail; + + mem->offset = tmp.offset; + +bail: + return result; +} + +/** + * ipa3_allocate_nat_table() - Allocates memory for the NAT table + * @table_alloc: [in/out] memory parameters + * + * Called by NAT client to allocate memory for the table entries. + * Based on the request size either shared or system memory will be used. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_allocate_nat_table(struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc) +{ + struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem); + gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; + int result; + + IPADBG("\n"); + + mutex_lock(&nat_ctx->dev.lock); + + result = ipa3_nat_ipv6ct_allocate_mem(&nat_ctx->dev, table_alloc, + IPAHAL_NAT_IPV4); + if (result) + goto bail; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + size_t pdn_entry_size; + struct ipa_mem_buffer *pdn_mem = &nat_ctx->pdn_mem; + + ipahal_nat_entry_size(IPAHAL_NAT_IPV4_PDN, &pdn_entry_size); + pdn_mem->size = pdn_entry_size * IPA_MAX_PDN_NUM; + if (IPA_MEM_PART(pdn_config_size) < pdn_mem->size) { + IPAERR( + "number of PDN entries exceeds SRAM available space\n"); + result = -ENOMEM; + goto fail_alloc_pdn; + } + + pdn_mem->base = dma_zalloc_coherent(ipa3_ctx->pdev, + pdn_mem->size, + &pdn_mem->phys_base, + gfp_flags); + if (!pdn_mem->base) { + IPAERR("fail to allocate PDN memory\n"); + result = -ENOMEM; + goto fail_alloc_pdn; + } + IPADBG("IPA NAT dev allocated PDN memory successfully\n"); + } + + nat_ctx->dev.is_mem_allocated = true; + IPADBG("IPA NAT dev init successfully\n"); + mutex_unlock(&nat_ctx->dev.lock); + + IPADBG("return\n"); + + return 0; + +fail_alloc_pdn: + if (nat_ctx->dev.vaddr) { + dma_free_coherent(ipa3_ctx->pdev, table_alloc->size, + nat_ctx->dev.vaddr, nat_ctx->dev.dma_handle); + nat_ctx->dev.vaddr = NULL; + } +bail: + mutex_unlock(&nat_ctx->dev.lock); + + return result; +} + +/** + * ipa3_allocate_ipv6ct_table() - Allocates memory for the IPv6CT table + * @table_alloc: [in/out] memory parameters + * + * Called by IPv6CT client to allocate memory for the table entries. + * Based on the request size either shared or system memory will be used. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_allocate_ipv6ct_table( + struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc) +{ + int result; + + IPADBG("\n"); + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + IPAERR_RL("IPv6 connection tracking isn't supported\n"); + return -EPERM; + } + + mutex_lock(&ipa3_ctx->ipv6ct_mem.dev.lock); + + result = ipa3_nat_ipv6ct_allocate_mem( + &ipa3_ctx->ipv6ct_mem.dev, table_alloc, IPAHAL_NAT_IPV6CT); + if (result) + goto bail; + + ipa3_ctx->ipv6ct_mem.dev.is_mem_allocated = true; + IPADBG("IPA IPv6CT dev init successfully\n"); + +bail: + mutex_unlock(&ipa3_ctx->ipv6ct_mem.dev.lock); + return result; +} + +static int ipa3_nat_ipv6ct_check_table_params( + struct ipa3_nat_ipv6ct_common_mem *dev, + uint32_t offset, uint16_t entries_num, + enum ipahal_nat_type nat_type) +{ + int result; + size_t entry_size, table_size; + + result = ipahal_nat_entry_size(nat_type, &entry_size); + if (result) { + IPAERR("Failed to retrieve size of entry for %s\n", + ipahal_nat_type_str(nat_type)); + return result; + } + table_size = entry_size * entries_num; + + /* check for integer overflow */ + if (offset > UINT_MAX - table_size) { + IPAERR_RL("Detected overflow\n"); + return -EPERM; + } + + /* Check offset is not beyond allocated size */ + if (dev->size < offset + table_size) { + IPAERR_RL("Table offset not valid\n"); + IPAERR_RL("offset:%d entries:%d table_size:%zu mem_size:%zu\n", + offset, entries_num, table_size, dev->size); + return -EPERM; + } + + if (dev->is_sys_mem && offset > UINT_MAX - dev->dma_handle) { + IPAERR_RL("Failed due to integer overflow\n"); + IPAERR_RL("%s dma_handle: 0x%pa offset: 0x%x\n", + dev->name, &dev->dma_handle, offset); + return -EPERM; + } + + return 0; +} + +static inline void ipa3_nat_ipv6ct_create_init_cmd( + struct ipahal_imm_cmd_nat_ipv6ct_init_common *table_init_cmd, + bool is_shared, + dma_addr_t base_addr, + uint8_t tbl_index, + uint32_t base_table_offset, + uint32_t expn_table_offset, + uint16_t table_entries, + uint16_t expn_table_entries, + const char *table_name) +{ + table_init_cmd->base_table_addr_shared = is_shared; + table_init_cmd->expansion_table_addr_shared = is_shared; + + table_init_cmd->base_table_addr = base_addr + base_table_offset; + IPADBG("%s base table offset:0x%x\n", table_name, base_table_offset); + + table_init_cmd->expansion_table_addr = base_addr + expn_table_offset; + IPADBG("%s expn table offset:0x%x\n", table_name, expn_table_offset); + + table_init_cmd->table_index = tbl_index; + IPADBG("%s table index:0x%x\n", table_name, tbl_index); + + table_init_cmd->size_base_table = table_entries; + IPADBG("%s base table size:0x%x\n", table_name, table_entries); + + table_init_cmd->size_expansion_table = expn_table_entries; + IPADBG("%s expansion table size:0x%x\n", + table_name, expn_table_entries); +} + +static inline void ipa3_nat_ipv6ct_init_device_structure( + struct ipa3_nat_ipv6ct_common_mem *dev, + uint32_t base_table_offset, + uint32_t expn_table_offset, + uint16_t table_entries, + uint16_t expn_table_entries) +{ + dev->base_table_addr = (char *)dev->base_address + base_table_offset; + IPADBG("%s base_table_addr: 0x%pK\n", dev->name, dev->base_table_addr); + + dev->expansion_table_addr = + (char *)dev->base_address + expn_table_offset; + IPADBG("%s expansion_table_addr: 0x%pK\n", + dev->name, dev->expansion_table_addr); + + IPADBG("%s table_entries: %d\n", dev->name, table_entries); + dev->table_entries = table_entries; + + IPADBG("%s expn_table_entries: %d\n", dev->name, expn_table_entries); + dev->expn_table_entries = expn_table_entries; +} + +static void ipa3_nat_create_init_cmd( + struct ipa_ioc_v4_nat_init *init, + bool is_shared, + dma_addr_t base_addr, + struct ipahal_imm_cmd_ip_v4_nat_init *cmd) +{ + IPADBG("\n"); + + ipa3_nat_ipv6ct_create_init_cmd( + &cmd->table_init, + is_shared, + base_addr, + init->tbl_index, + init->ipv4_rules_offset, + init->expn_rules_offset, + init->table_entries, + init->expn_table_entries, + ipa3_ctx->nat_mem.dev.name); + + cmd->index_table_addr_shared = is_shared; + cmd->index_table_expansion_addr_shared = is_shared; + + cmd->index_table_addr = + base_addr + init->index_offset; + IPADBG("index_offset:0x%x\n", init->index_offset); + + cmd->index_table_expansion_addr = + base_addr + init->index_expn_offset; + IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + /* + * starting IPAv4.0 public ip field changed to store the + * PDN config table offset in SMEM + */ + cmd->public_addr_info = IPA_MEM_PART(pdn_config_ofst); + IPADBG("pdn config base:0x%x\n", cmd->public_addr_info); + } else { + cmd->public_addr_info = init->ip_addr; + IPADBG("Public IP address:%pI4h\n", &cmd->public_addr_info); + } + + IPADBG("return\n"); +} + +static void ipa3_nat_create_modify_pdn_cmd( + struct ipahal_imm_cmd_dma_shared_mem *mem_cmd, bool zero_mem) +{ + size_t pdn_entry_size, mem_size; + + IPADBG("\n"); + + ipahal_nat_entry_size(IPAHAL_NAT_IPV4_PDN, &pdn_entry_size); + mem_size = pdn_entry_size * IPA_MAX_PDN_NUM; + + if (zero_mem) + memset(ipa3_ctx->nat_mem.pdn_mem.base, 0, mem_size); + + /* Copy the PDN config table to SRAM */ + mem_cmd->is_read = false; + mem_cmd->skip_pipeline_clear = false; + mem_cmd->pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd->size = mem_size; + mem_cmd->system_addr = ipa3_ctx->nat_mem.pdn_mem.phys_base; + mem_cmd->local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(pdn_config_ofst); + + IPADBG("return\n"); +} + +static int ipa3_nat_send_init_cmd(struct ipahal_imm_cmd_ip_v4_nat_init *cmd, + bool zero_pdn_table) +{ + struct ipa3_desc desc[IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC]; + struct ipahal_imm_cmd_pyld *cmd_pyld[IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC]; + int i, num_cmd = 0, result; + + IPADBG("\n"); + + /* NO-OP IC for ensuring that IPA pipeline is empty */ + cmd_pyld[num_cmd] = + ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("failed to construct NOP imm cmd\n"); + return -ENOMEM; + } + + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + ++num_cmd; + + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_V4_NAT_INIT, cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR_RL("fail to construct NAT init imm cmd\n"); + result = -EPERM; + goto destroy_imm_cmd; + } + + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + ++num_cmd; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 }; + + if (num_cmd >= IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC) { + IPAERR("number of commands is out of range\n"); + result = -ENOBUFS; + goto destroy_imm_cmd; + } + + /* Copy the PDN config table to SRAM */ + ipa3_nat_create_modify_pdn_cmd(&mem_cmd, zero_pdn_table); + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR( + "fail construct dma_shared_mem cmd: for pdn table"); + result = -ENOMEM; + goto destroy_imm_cmd; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + ++num_cmd; + IPADBG("added PDN table copy cmd\n"); + } + + result = ipa3_send_cmd(num_cmd, desc); + if (result) { + IPAERR("fail to send NAT init immediate command\n"); + goto destroy_imm_cmd; + } + + IPADBG("return\n"); + +destroy_imm_cmd: + for (i = 0; i < num_cmd; ++i) + ipahal_destroy_imm_cmd(cmd_pyld[i]); + + return result; +} + +static int ipa3_ipv6ct_send_init_cmd(struct ipahal_imm_cmd_ip_v6_ct_init *cmd) +{ + struct ipa3_desc desc[IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC]; + struct ipahal_imm_cmd_pyld + *cmd_pyld[IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC]; + int i, num_cmd = 0, result; + + IPADBG("\n"); + + /* NO-OP IC for ensuring that IPA pipeline is empty */ + cmd_pyld[num_cmd] = + ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("failed to construct NOP imm cmd\n"); + return -ENOMEM; + } + + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + ++num_cmd; + + if (num_cmd >= IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC) { + IPAERR("number of commands is out of range\n"); + result = -ENOBUFS; + goto destroy_imm_cmd; + } + + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_V6_CT_INIT, cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR_RL("fail to construct IPv6CT init imm cmd\n"); + result = -EPERM; + goto destroy_imm_cmd; + } + + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + ++num_cmd; + + result = ipa3_send_cmd(num_cmd, desc); + if (result) { + IPAERR("Fail to send IPv6CT init immediate command\n"); + goto destroy_imm_cmd; + } + + IPADBG("return\n"); + +destroy_imm_cmd: + for (i = 0; i < num_cmd; ++i) + ipahal_destroy_imm_cmd(cmd_pyld[i]); + + return result; +} + +/* IOCTL function handlers */ +/** + * ipa3_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW + * @init: [in] initialization command attributes + * + * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) +{ + struct ipahal_imm_cmd_ip_v4_nat_init cmd; + int result; + + IPADBG("\n"); + + if (!ipa3_ctx->nat_mem.dev.is_mapped) { + IPAERR_RL("attempt to init %s before mmap\n", + ipa3_ctx->nat_mem.dev.name); + return -EPERM; + } + + if (init->tbl_index >= 1) { + IPAERR_RL("Unsupported table index %d\n", init->tbl_index); + return -EPERM; + } + + if (init->table_entries == 0) { + IPAERR_RL("Table entries is zero\n"); + return -EPERM; + } + + result = ipa3_nat_ipv6ct_check_table_params( + &ipa3_ctx->nat_mem.dev, + init->ipv4_rules_offset, + init->table_entries + 1, + IPAHAL_NAT_IPV4); + if (result) { + IPAERR_RL("Bad params for NAT base table\n"); + return result; + } + + result = ipa3_nat_ipv6ct_check_table_params( + &ipa3_ctx->nat_mem.dev, + init->expn_rules_offset, + init->expn_table_entries, + IPAHAL_NAT_IPV4); + if (result) { + IPAERR_RL("Bad params for NAT expansion table\n"); + return result; + } + + result = ipa3_nat_ipv6ct_check_table_params( + &ipa3_ctx->nat_mem.dev, + init->index_offset, + init->table_entries + 1, + IPAHAL_NAT_IPV4_INDEX); + if (result) { + IPAERR_RL("Bad params for index table\n"); + return result; + } + + result = ipa3_nat_ipv6ct_check_table_params( + &ipa3_ctx->nat_mem.dev, + init->index_expn_offset, + init->expn_table_entries, + IPAHAL_NAT_IPV4_INDEX); + if (result) { + IPAERR_RL("Bad params for index expansion table\n"); + return result; + } + + if (ipa3_ctx->nat_mem.dev.is_sys_mem) { + IPADBG("using system memory for nat table\n"); + /* + * Safe to process, since integer overflow was + * checked in ipa3_nat_ipv6ct_check_table_params + */ + ipa3_nat_create_init_cmd(init, false, + ipa3_ctx->nat_mem.dev.dma_handle, &cmd); + } else { + IPADBG("using shared(local) memory for nat table\n"); + ipa3_nat_create_init_cmd(init, true, IPA_RAM_NAT_OFST, &cmd); + } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + struct ipa_pdn_entry *pdn_entries; + + /* store ip in pdn entries cache array */ + pdn_entries = ipa3_ctx->nat_mem.pdn_mem.base; + pdn_entries[0].public_ip = init->ip_addr; + pdn_entries[0].dst_metadata = 0; + pdn_entries[0].src_metadata = 0; + pdn_entries[0].resrvd = 0; + + IPADBG("Public ip address:0x%x\n", init->ip_addr); + } + + IPADBG("posting NAT init command\n"); + result = ipa3_nat_send_init_cmd(&cmd, false); + if (result) { + IPAERR("Fail to send NAT init immediate command\n"); + return result; + } + + ipa3_nat_ipv6ct_init_device_structure( + &ipa3_ctx->nat_mem.dev, + init->ipv4_rules_offset, + init->expn_rules_offset, + init->table_entries, + init->expn_table_entries); + + ipa3_ctx->nat_mem.public_ip_addr = init->ip_addr; + IPADBG("Public IP address:%pI4h\n", &ipa3_ctx->nat_mem.public_ip_addr); + + ipa3_ctx->nat_mem.index_table_addr = + (char *)ipa3_ctx->nat_mem.dev.base_address + + init->index_offset; + IPADBG("index_table_addr: 0x%pK\n", + ipa3_ctx->nat_mem.index_table_addr); + + ipa3_ctx->nat_mem.index_table_expansion_addr = + (char *)ipa3_ctx->nat_mem.dev.base_address + init->index_expn_offset; + IPADBG("index_table_expansion_addr: 0x%pK\n", + ipa3_ctx->nat_mem.index_table_expansion_addr); + + ipa3_ctx->nat_mem.dev.is_hw_init = true; + IPADBG("return\n"); + return 0; +} + +/** + * ipa3_ipv6ct_init_cmd() - Post IP_V6_CONN_TRACK_INIT command to IPA HW + * @init: [in] initialization command attributes + * + * Called by NAT client driver to post IP_V6_CONN_TRACK_INIT command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa3_ipv6ct_init_cmd(struct ipa_ioc_ipv6ct_init *init) +{ + struct ipahal_imm_cmd_ip_v6_ct_init cmd; + int result; + + IPADBG("\n"); + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + IPAERR_RL("IPv6 connection tracking isn't supported\n"); + return -EPERM; + } + + if (!ipa3_ctx->ipv6ct_mem.dev.is_mapped) { + IPAERR_RL("attempt to init %s before mmap\n", + ipa3_ctx->ipv6ct_mem.dev.name); + return -EPERM; + } + + if (init->tbl_index >= 1) { + IPAERR_RL("Unsupported table index %d\n", init->tbl_index); + return -EPERM; + } + + if (init->table_entries == 0) { + IPAERR_RL("Table entries is zero\n"); + return -EPERM; + } + + result = ipa3_nat_ipv6ct_check_table_params( + &ipa3_ctx->ipv6ct_mem.dev, + init->base_table_offset, + init->table_entries + 1, + IPAHAL_NAT_IPV6CT); + if (result) { + IPAERR_RL("Bad params for IPv6CT base table\n"); + return result; + } + + result = ipa3_nat_ipv6ct_check_table_params( + &ipa3_ctx->ipv6ct_mem.dev, + init->expn_table_offset, + init->expn_table_entries, + IPAHAL_NAT_IPV6CT); + if (result) { + IPAERR_RL("Bad params for IPv6CT expansion table\n"); + return result; + } + + if (ipa3_ctx->ipv6ct_mem.dev.is_sys_mem) { + IPADBG("using system memory for nat table\n"); + /* + * Safe to process, since integer overflow was + * checked in ipa3_nat_ipv6ct_check_table_params + */ + ipa3_nat_ipv6ct_create_init_cmd( + &cmd.table_init, + false, + ipa3_ctx->ipv6ct_mem.dev.dma_handle, + init->tbl_index, + init->base_table_offset, + init->expn_table_offset, + init->table_entries, + init->expn_table_entries, + ipa3_ctx->ipv6ct_mem.dev.name); + } else { + IPADBG("using shared(local) memory for nat table\n"); + ipa3_nat_ipv6ct_create_init_cmd( + &cmd.table_init, + true, + IPA_RAM_IPV6CT_OFST, + init->tbl_index, + init->base_table_offset, + init->expn_table_offset, + init->table_entries, + init->expn_table_entries, + ipa3_ctx->ipv6ct_mem.dev.name); + } + + IPADBG("posting ip_v6_ct_init imm command\n"); + result = ipa3_ipv6ct_send_init_cmd(&cmd); + if (result) { + IPAERR("fail to send IPv6CT init immediate command\n"); + return result; + } + + ipa3_nat_ipv6ct_init_device_structure( + &ipa3_ctx->ipv6ct_mem.dev, + init->base_table_offset, + init->expn_table_offset, + init->table_entries, + init->expn_table_entries); + + ipa3_ctx->ipv6ct_mem.dev.is_hw_init = true; + IPADBG("return\n"); + return 0; +} + +/** + * ipa3_nat_mdfy_pdn() - Modify a PDN entry in PDN config table in IPA SRAM + * @mdfy_pdn: [in] PDN info to be written to SRAM + * + * Called by NAT client driver to modify an entry in the PDN config table + * + * Returns: 0 on success, negative on failure + */ +int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn) +{ + struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 }; + struct ipa3_desc desc; + struct ipahal_imm_cmd_pyld *cmd_pyld; + int result = 0; + struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem); + struct ipa_pdn_entry *pdn_entries = NULL; + + IPADBG("\n"); + + mutex_lock(&nat_ctx->dev.lock); + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + IPAERR_RL("IPA HW does not support multi PDN\n"); + result = -EPERM; + goto bail; + } + + if (!nat_ctx->dev.is_mem_allocated) { + IPAERR_RL( + "attempt to modify a PDN entry before the PDN table memory allocation\n"); + result = -EPERM; + goto bail; + } + + if (mdfy_pdn->pdn_index > (IPA_MAX_PDN_NUM - 1)) { + IPAERR_RL("pdn index out of range %d\n", mdfy_pdn->pdn_index); + result = -EPERM; + goto bail; + } + + pdn_entries = nat_ctx->pdn_mem.base; + + /* store ip in pdn entries cache array */ + pdn_entries[mdfy_pdn->pdn_index].public_ip = + mdfy_pdn->public_ip; + pdn_entries[mdfy_pdn->pdn_index].dst_metadata = + mdfy_pdn->dst_metadata; + pdn_entries[mdfy_pdn->pdn_index].src_metadata = + mdfy_pdn->src_metadata; + + /* mark tethering bit for remote modem */ + if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_1) + pdn_entries[mdfy_pdn->pdn_index].src_metadata |= + IPA_QMAP_TETH_BIT; + + IPADBG("Modify PDN in index: %d Public ip address:%pI4h\n", + mdfy_pdn->pdn_index, + &pdn_entries[mdfy_pdn->pdn_index].public_ip); + IPADBG("Modify PDN dst metadata: 0x%x src metadata: 0x%x\n", + pdn_entries[mdfy_pdn->pdn_index].dst_metadata, + pdn_entries[mdfy_pdn->pdn_index].src_metadata); + + /* Copy the PDN config table to SRAM */ + ipa3_nat_create_modify_pdn_cmd(&mem_cmd, false); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld) { + IPAERR( + "fail construct dma_shared_mem cmd: for pdn table\n"); + result = -ENOMEM; + goto bail; + } + ipa3_init_imm_cmd_desc(&desc, cmd_pyld); + + IPADBG("sending PDN table copy cmd\n"); + result = ipa3_send_cmd(1, &desc); + if (result) + IPAERR("Fail to send PDN table copy immediate command\n"); + + ipahal_destroy_imm_cmd(cmd_pyld); + + IPADBG("return\n"); + +bail: + mutex_unlock(&nat_ctx->dev.lock); + return result; +} + +static uint32_t ipa3_nat_ipv6ct_calculate_table_size(uint8_t base_addr) +{ + size_t entry_size; + u32 entries_num; + enum ipahal_nat_type nat_type; + + switch (base_addr) { + case IPA_NAT_BASE_TBL: + entries_num = ipa3_ctx->nat_mem.dev.table_entries + 1; + nat_type = IPAHAL_NAT_IPV4; + break; + case IPA_NAT_EXPN_TBL: + entries_num = ipa3_ctx->nat_mem.dev.expn_table_entries; + nat_type = IPAHAL_NAT_IPV4; + break; + case IPA_NAT_INDX_TBL: + entries_num = ipa3_ctx->nat_mem.dev.table_entries + 1; + nat_type = IPAHAL_NAT_IPV4_INDEX; + break; + case IPA_NAT_INDEX_EXPN_TBL: + entries_num = ipa3_ctx->nat_mem.dev.expn_table_entries; + nat_type = IPAHAL_NAT_IPV4_INDEX; + break; + case IPA_IPV6CT_BASE_TBL: + entries_num = ipa3_ctx->ipv6ct_mem.dev.table_entries + 1; + nat_type = IPAHAL_NAT_IPV6CT; + break; + case IPA_IPV6CT_EXPN_TBL: + entries_num = ipa3_ctx->ipv6ct_mem.dev.expn_table_entries; + nat_type = IPAHAL_NAT_IPV6CT; + break; + default: + IPAERR_RL("Invalid base_addr %d for table DMA command\n", + base_addr); + return 0; + } + + ipahal_nat_entry_size(nat_type, &entry_size); + return entry_size * entries_num; +} + +static int ipa3_table_validate_table_dma_one(struct ipa_ioc_nat_dma_one *param) +{ + uint32_t table_size; + + if (param->table_index >= 1) { + IPAERR_RL("Unsupported table index %d\n", param->table_index); + return -EPERM; + } + + switch (param->base_addr) { + case IPA_NAT_BASE_TBL: + case IPA_NAT_EXPN_TBL: + case IPA_NAT_INDX_TBL: + case IPA_NAT_INDEX_EXPN_TBL: + if (!ipa3_ctx->nat_mem.dev.is_hw_init) { + IPAERR_RL("attempt to write to %s before HW int\n", + ipa3_ctx->nat_mem.dev.name); + return -EPERM; + } + break; + case IPA_IPV6CT_BASE_TBL: + case IPA_IPV6CT_EXPN_TBL: + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + IPAERR_RL("IPv6 connection tracking isn't supported\n"); + return -EPERM; + } + + if (!ipa3_ctx->ipv6ct_mem.dev.is_hw_init) { + IPAERR_RL("attempt to write to %s before HW int\n", + ipa3_ctx->ipv6ct_mem.dev.name); + return -EPERM; + } + break; + default: + IPAERR_RL("Invalid base_addr %d for table DMA command\n", + param->base_addr); + return -EPERM; + } + + table_size = ipa3_nat_ipv6ct_calculate_table_size(param->base_addr); + if (!table_size) { + IPAERR_RL("Failed to calculate table size for base_addr %d\n", + param->base_addr); + return -EPERM; + } + + if (param->offset >= table_size) { + IPAERR_RL("Invalid offset %d for table DMA command\n", + param->offset); + IPAERR_RL("table_index %d base addr %d size %d\n", + param->table_index, param->base_addr, table_size); + return -EPERM; + } + + return 0; +} + + +/** + * ipa3_table_dma_cmd() - Post TABLE_DMA command to IPA HW + * @dma: [in] initialization command attributes + * + * Called by NAT/IPv6CT clients to post TABLE_DMA command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa3_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma) +{ + struct ipahal_imm_cmd_table_dma cmd; + enum ipahal_imm_cmd_name cmd_name = IPA_IMM_CMD_NAT_DMA; + struct ipahal_imm_cmd_pyld *cmd_pyld[IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC]; + struct ipa3_desc desc[IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC]; + uint8_t cnt, num_cmd = 0; + int result = 0; + + IPADBG("\n"); + if (!dma->entries || + dma->entries >= IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC) { + IPAERR_RL("Invalid number of entries %d\n", + dma->entries); + result = -EPERM; + goto bail; + } + + if (!ipa3_ctx->nat_mem.dev.is_dev_init) { + IPAERR_RL("NAT hasn't been initialized\n"); + return -EPERM; + } + + for (cnt = 0; cnt < dma->entries; ++cnt) { + result = ipa3_table_validate_table_dma_one(&dma->dma[cnt]); + if (result) { + IPAERR_RL("Table DMA command parameter %d is invalid\n", + cnt); + goto bail; + } + } + + /* NO-OP IC for ensuring that IPA pipeline is empty */ + cmd_pyld[num_cmd] = + ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("Failed to construct NOP imm cmd\n"); + result = -ENOMEM; + goto destroy_imm_cmd; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + ++num_cmd; + + /* NAT_DMA was renamed to TABLE_DMA starting from IPAv4 */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + cmd_name = IPA_IMM_CMD_TABLE_DMA; + + for (cnt = 0; cnt < dma->entries; ++cnt) { + cmd.table_index = dma->dma[cnt].table_index; + cmd.base_addr = dma->dma[cnt].base_addr; + cmd.offset = dma->dma[cnt].offset; + cmd.data = dma->dma[cnt].data; + cmd_pyld[num_cmd] = + ipahal_construct_imm_cmd(cmd_name, &cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR_RL("Fail to construct table_dma imm cmd\n"); + result = -ENOMEM; + goto destroy_imm_cmd; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + ++num_cmd; + } + result = ipa3_send_cmd(num_cmd, desc); + if (result) + IPAERR("Fail to send table_dma immediate command\n"); + + IPADBG("return\n"); + +destroy_imm_cmd: + for (cnt = 0; cnt < num_cmd; ++cnt) + ipahal_destroy_imm_cmd(cmd_pyld[cnt]); +bail: + return result; +} + +/** + * ipa3_nat_dma_cmd() - Post NAT_DMA command to IPA HW + * @dma: [in] initialization command attributes + * + * Called by NAT client driver to post NAT_DMA command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma) +{ + return ipa3_table_dma_cmd(dma); +} + +static void ipa3_nat_ipv6ct_free_mem(struct ipa3_nat_ipv6ct_common_mem *dev) +{ + IPADBG("\n"); + if (!dev->is_mem_allocated) { + IPADBG("attempt to delete %s before memory allocation\n", + dev->name); + /* Deletion of partly initialized table is not an error */ + goto clear; + } + + if (dev->is_sys_mem) { + IPADBG("freeing the dma memory for %s\n", dev->name); + dma_free_coherent( + ipa3_ctx->pdev, dev->size, + dev->vaddr, dev->dma_handle); + dev->size = 0; + dev->vaddr = NULL; + } + + dev->is_mem_allocated = false; + +clear: + dev->table_entries = 0; + dev->expn_table_entries = 0; + dev->base_table_addr = NULL; + dev->expansion_table_addr = NULL; + + dev->is_hw_init = false; + dev->is_mapped = false; + dev->is_sys_mem = false; + + IPADBG("return\n"); +} + +static int ipa3_nat_ipv6ct_create_del_table_cmd( + uint8_t tbl_index, + u32 base_addr, + struct ipa3_nat_ipv6ct_common_mem *dev, + struct ipahal_imm_cmd_nat_ipv6ct_init_common *table_init_cmd) +{ + bool mem_type_shared = true; + + IPADBG("\n"); + + if (tbl_index >= 1) { + IPAERR_RL("Unsupported table index %d\n", tbl_index); + return -EPERM; + } + + if (dev->tmp_mem != NULL) { + IPADBG("using temp memory during %s del\n", dev->name); + mem_type_shared = false; + base_addr = dev->tmp_mem->dma_handle; + } + + table_init_cmd->table_index = tbl_index; + table_init_cmd->base_table_addr = base_addr; + table_init_cmd->base_table_addr_shared = mem_type_shared; + table_init_cmd->expansion_table_addr = base_addr; + table_init_cmd->expansion_table_addr_shared = mem_type_shared; + table_init_cmd->size_base_table = 0; + table_init_cmd->size_expansion_table = 0; + IPADBG("return\n"); + + return 0; +} + +static int ipa3_nat_send_del_table_cmd(uint8_t tbl_index) +{ + struct ipahal_imm_cmd_ip_v4_nat_init cmd; + int result; + + IPADBG("\n"); + + result = ipa3_nat_ipv6ct_create_del_table_cmd( + tbl_index, + IPA_NAT_PHYS_MEM_OFFSET, + &ipa3_ctx->nat_mem.dev, + &cmd.table_init); + if (result) { + IPAERR( + "Fail to create immediate command to delete NAT table\n"); + return result; + } + + cmd.index_table_addr = cmd.table_init.base_table_addr; + cmd.index_table_addr_shared = cmd.table_init.base_table_addr_shared; + cmd.index_table_expansion_addr = cmd.index_table_addr; + cmd.index_table_expansion_addr_shared = cmd.index_table_addr_shared; + cmd.public_addr_info = 0; + + IPADBG("posting NAT delete command\n"); + result = ipa3_nat_send_init_cmd(&cmd, true); + if (result) { + IPAERR("Fail to send NAT delete immediate command\n"); + return result; + } + + IPADBG("return\n"); + return 0; +} + +static int ipa3_ipv6ct_send_del_table_cmd(uint8_t tbl_index) +{ + struct ipahal_imm_cmd_ip_v6_ct_init cmd; + int result; + + IPADBG("\n"); + + result = ipa3_nat_ipv6ct_create_del_table_cmd( + tbl_index, + IPA_IPV6CT_PHYS_MEM_OFFSET, + &ipa3_ctx->ipv6ct_mem.dev, + &cmd.table_init); + if (result) { + IPAERR( + "Fail to create immediate command to delete IPv6CT table\n"); + return result; + } + + IPADBG("posting IPv6CT delete command\n"); + result = ipa3_ipv6ct_send_init_cmd(&cmd); + if (result) { + IPAERR("Fail to send IPv6CT delete immediate command\n"); + return result; + } + + IPADBG("return\n"); + return 0; +} + +/** + * ipa3_nat_del_cmd() - Delete a NAT table + * @del: [in] delete table table table parameters + * + * Called by NAT client driver to delete the nat table + * + * Returns: 0 on success, negative on failure + */ +int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del) +{ + struct ipa_ioc_nat_ipv6ct_table_del tmp; + + tmp.table_index = del->table_index; + + return ipa3_del_nat_table(&tmp); +} + +/** + * ipa3_del_nat_table() - Delete the NAT table + * @del: [in] delete table parameters + * + * Called by NAT client to delete the table + * + * Returns: 0 on success, negative on failure + */ +int ipa3_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del) +{ + int result = 0; + + IPADBG("\n"); + if (!ipa3_ctx->nat_mem.dev.is_dev_init) { + IPAERR("NAT hasn't been initialized\n"); + return -EPERM; + } + + mutex_lock(&ipa3_ctx->nat_mem.dev.lock); + + if (ipa3_ctx->nat_mem.dev.is_hw_init) { + result = ipa3_nat_send_del_table_cmd(del->table_index); + if (result) { + IPAERR( + "Fail to send immediate command to delete NAT table\n"); + goto bail; + } + } + + ipa3_ctx->nat_mem.public_ip_addr = 0; + ipa3_ctx->nat_mem.index_table_addr = 0; + ipa3_ctx->nat_mem.index_table_expansion_addr = 0; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 && + ipa3_ctx->nat_mem.dev.is_mem_allocated) { + IPADBG("freeing the PDN memory\n"); + dma_free_coherent(ipa3_ctx->pdev, + ipa3_ctx->nat_mem.pdn_mem.size, + ipa3_ctx->nat_mem.pdn_mem.base, + ipa3_ctx->nat_mem.pdn_mem.phys_base); + ipa3_ctx->nat_mem.pdn_mem.base = NULL; + ipa3_ctx->nat_mem.dev.is_mem_allocated = false; + } + + ipa3_nat_ipv6ct_free_mem(&ipa3_ctx->nat_mem.dev); + IPADBG("return\n"); + +bail: + mutex_unlock(&ipa3_ctx->nat_mem.dev.lock); + return result; +} + +/** + * ipa3_del_ipv6ct_table() - Delete the IPv6CT table + * @del: [in] delete table parameters + * + * Called by IPv6CT client to delete the table + * + * Returns: 0 on success, negative on failure + */ +int ipa3_del_ipv6ct_table(struct ipa_ioc_nat_ipv6ct_table_del *del) +{ + int result = 0; + + IPADBG("\n"); + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + IPAERR_RL("IPv6 connection tracking isn't supported\n"); + return -EPERM; + } + + if (!ipa3_ctx->ipv6ct_mem.dev.is_dev_init) { + IPAERR("IPv6 connection tracking hasn't been initialized\n"); + return -EPERM; + } + + mutex_lock(&ipa3_ctx->ipv6ct_mem.dev.lock); + + if (ipa3_ctx->ipv6ct_mem.dev.is_hw_init) { + result = ipa3_ipv6ct_send_del_table_cmd(del->table_index); + if (result) { + IPAERR( + "Fail to send immediate command to delete IPv6CT table\n"); + goto bail; + } + } + + ipa3_nat_ipv6ct_free_mem(&ipa3_ctx->ipv6ct_mem.dev); + IPADBG("return\n"); + +bail: + mutex_unlock(&ipa3_ctx->ipv6ct_mem.dev.lock); + return result; +} + diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c new file mode 100644 index 000000000000..21f81047c1c9 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c @@ -0,0 +1,684 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include "ipa_i.h" +#include "ipa_odl.h" +#include +#include +#include + +struct ipa_odl_context *ipa3_odl_ctx; + +static DECLARE_WAIT_QUEUE_HEAD(odl_ctl_msg_wq); + +static void print_ipa_odl_state_bit_mask(void) +{ + IPADBG("ipa3_odl_ctx->odl_state.odl_init --> %d\n", + ipa3_odl_ctx->odl_state.odl_init); + IPADBG("ipa3_odl_ctx->odl_state.odl_open --> %d\n", + ipa3_odl_ctx->odl_state.odl_open); + IPADBG("ipa3_odl_ctx->odl_state.adpl_open --> %d\n", + ipa3_odl_ctx->odl_state.adpl_open); + IPADBG("ipa3_odl_ctx->odl_state.aggr_byte_limit_sent --> %d\n", + ipa3_odl_ctx->odl_state.aggr_byte_limit_sent); + IPADBG("ipa3_odl_ctx->odl_state.odl_ep_setup --> %d\n", + ipa3_odl_ctx->odl_state.odl_ep_setup); + IPADBG("ipa3_odl_ctx->odl_state.odl_setup_done_sent --> %d\n", + ipa3_odl_ctx->odl_state.odl_setup_done_sent); + IPADBG("ipa3_odl_ctx->odl_state.odl_ep_info_sent --> %d\n", + ipa3_odl_ctx->odl_state.odl_ep_info_sent); + IPADBG("ipa3_odl_ctx->odl_state.odl_connected --> %d\n", + ipa3_odl_ctx->odl_state.odl_connected); + IPADBG("ipa3_odl_ctx->odl_state.odl_disconnected --> %d\n\n", + ipa3_odl_ctx->odl_state.odl_disconnected); +} + +static int ipa_odl_ctl_fops_open(struct inode *inode, struct file *filp) +{ + int ret = 0; + + if (ipa3_odl_ctx->odl_state.odl_init) { + ipa3_odl_ctx->odl_state.odl_open = true; + } else { + IPAERR("Before odl init trying to open odl ctl pipe\n"); + print_ipa_odl_state_bit_mask(); + ret = -ENODEV; + } + + return ret; +} + +static int ipa_odl_ctl_fops_release(struct inode *inode, struct file *filp) +{ + IPADBG("QTI closed ipa_odl_ctl node\n"); + ipa3_odl_ctx->odl_state.odl_open = false; + return 0; +} + +/** + * ipa_odl_ctl_fops_read() - read message from IPA ODL device + * @filp: [in] file pointer + * @buf: [out] buffer to read into + * @count: [in] size of above buffer + * @f_pos: [inout] file position + * + * Uer-space should continuously read from /dev/ipa_odl_ctl, + * read will block when there are no messages to read. + * Upon return, user-space should read the u32 data from the + * start of the buffer. + * + * 0 --> ODL disconnected. + * 1 --> ODL connected. + * + * Buffer supplied must be big enough to + * hold the message of size u32. + * + * Returns: how many bytes copied to buffer + * + * Note: Should not be called from atomic context + */ + +static ssize_t ipa_odl_ctl_fops_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + char __user *start; + u8 data; + int ret = 0; + static bool old_state; + bool new_state = false; + + start = buf; + ipa3_odl_ctx->odl_ctl_msg_wq_flag = false; + + if (!ipa3_odl_ctx->odl_state.adpl_open && + !ipa3_odl_ctx->odl_state.odl_disconnected) { + IPADBG("Failed to send data odl pipe already disconnected\n"); + ret = -EFAULT; + goto send_failed; + } + + if (ipa3_odl_ctx->odl_state.odl_ep_setup) + new_state = true; + else if (ipa3_odl_ctx->odl_state.odl_disconnected) + new_state = false; + else { + IPADBG("Failed to send data odl already running\n"); + ret = -EFAULT; + goto send_failed; + } + + if (old_state != new_state) { + old_state = new_state; + + if (new_state) + data = 1; + else if (!new_state) + data = 0; + + if (copy_to_user(buf, &data, + sizeof(data))) { + IPADBG("Cpoying data to user failed\n"); + ret = -EFAULT; + goto send_failed; + } + + buf += sizeof(data); + + if (data == 1) + ipa3_odl_ctx->odl_state.odl_setup_done_sent = + true; + } + + + if (start != buf && ret != -EFAULT) + ret = buf - start; +send_failed: + return ret; +} + +static unsigned int ipa_odl_ctl_fops_poll(struct file *file, poll_table *wait) +{ + unsigned int mask = 0; + + poll_wait(file, &odl_ctl_msg_wq, wait); + + if (ipa3_odl_ctx->odl_ctl_msg_wq_flag) { + IPADBG("Sending read mask to odl control pipe\n"); + mask |= POLLIN | POLLRDNORM; + } + return mask; +} + +static long ipa_odl_ctl_fops_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct ipa_odl_ep_info ep_info = {0}; + struct ipa_odl_modem_config status; + int retval = 0; + + IPADBG("Calling odl ioctl cmd = %d\n", cmd); + if (!ipa3_odl_ctx->odl_state.odl_setup_done_sent) { + IPAERR("Before complete the odl setup trying calling ioctl\n"); + print_ipa_odl_state_bit_mask(); + retval = -ENODEV; + goto fail; + } + + switch (cmd) { + case IPA_IOC_ODL_QUERY_ADAPL_EP_INFO: + /* Send ep_info to user APP */ + ep_info.ep_type = ODL_EP_TYPE_HSUSB; + ep_info.peripheral_iface_id = ODL_EP_PERIPHERAL_IFACE_ID; + ep_info.cons_pipe_num = -1; + ep_info.prod_pipe_num = + ipa3_odl_ctx->odl_client_hdl; + if (copy_to_user((void __user *)arg, &ep_info, + sizeof(ep_info))) { + retval = -EFAULT; + goto fail; + } + ipa3_odl_ctx->odl_state.odl_ep_info_sent = true; + break; + case IPA_IOC_ODL_QUERY_MODEM_CONFIG: + IPADBG("Received the IPA_IOC_ODL_QUERY_MODEM_CONFIG :\n"); + if (copy_from_user(&status, (const void __user *)arg, + sizeof(status))) { + retval = -EFAULT; + break; + } + if (status.config_status == CONFIG_SUCCESS) + ipa3_odl_ctx->odl_state.odl_connected = true; + IPADBG("status.config_status = %d odl_connected = %d\n", + status.config_status, ipa3_odl_ctx->odl_state.odl_connected); + break; + default: + retval = -ENOIOCTLCMD; + break; + } + +fail: + return retval; +} + +static void delete_first_node(void) +{ + struct ipa3_push_msg_odl *msg; + + if (!list_empty(&ipa3_odl_ctx->adpl_msg_list)) { + msg = list_first_entry(&ipa3_odl_ctx->adpl_msg_list, + struct ipa3_push_msg_odl, link); + if (msg) { + list_del(&msg->link); + kfree(msg->buff); + kfree(msg); + ipa3_odl_ctx->stats.odl_drop_pkt++; + if (atomic_read(&ipa3_odl_ctx->stats.numer_in_queue)) + atomic_dec(&ipa3_odl_ctx->stats.numer_in_queue); + } + } else { + IPADBG("List Empty\n"); + } +} + +int ipa3_send_adpl_msg(unsigned long skb_data) +{ + struct ipa3_push_msg_odl *msg; + struct sk_buff *skb = (struct sk_buff *)skb_data; + void *data; + + IPADBG_LOW("Processing DPL data\n"); + msg = kzalloc(sizeof(struct ipa3_push_msg_odl), GFP_KERNEL); + if (msg == NULL) { + IPADBG("Memory allocation failed\n"); + return -ENOMEM; + } + + data = kmemdup(skb->data, skb->len, GFP_KERNEL); + if (data == NULL) { + kfree(msg); + return -ENOMEM; + } + memcpy(data, skb->data, skb->len); + msg->buff = data; + msg->len = skb->len; + mutex_lock(&ipa3_odl_ctx->adpl_msg_lock); + if (atomic_read(&ipa3_odl_ctx->stats.numer_in_queue) >= + MAX_QUEUE_TO_ODL) + delete_first_node(); + list_add_tail(&msg->link, &ipa3_odl_ctx->adpl_msg_list); + atomic_inc(&ipa3_odl_ctx->stats.numer_in_queue); + mutex_unlock(&ipa3_odl_ctx->adpl_msg_lock); + IPA_STATS_INC_CNT(ipa3_odl_ctx->stats.odl_rx_pkt); + + return 0; +} + +/** + * odl_ipa_packet_receive_notify() - Rx notify + * + * @priv: driver context + * @evt: event type + * @data: data provided with event + * + * IPA will pass a packet to the Linux network stack with skb->data + */ +static void odl_ipa_packet_receive_notify(void *priv, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + IPADBG_LOW("Rx packet was received\n"); + if (evt == IPA_RECEIVE) + ipa3_send_adpl_msg(data); + else + IPAERR("Invalid evt %d received in wan_ipa_receive\n", evt); +} + +int ipa_setup_odl_pipe(void) +{ + struct ipa_sys_connect_params *ipa_odl_ep_cfg; + int ret; + + ipa_odl_ep_cfg = &ipa3_odl_ctx->odl_sys_param; + + IPADBG("Setting up the odl endpoint\n"); + ipa_odl_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL; + + ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR; + ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = 1; + ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr = IPA_GENERIC; + ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_byte_limit = + IPA_ODL_AGGR_BYTE_LIMIT; + ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit = 0; + + ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 4; + ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1; + ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 1; + ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1; + ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2; + + ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true; + ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0; + ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = true; + ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0; + ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_little_endian = 0; + ipa_odl_ep_cfg->ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000; + + ipa_odl_ep_cfg->client = IPA_CLIENT_ODL_DPL_CONS; + ipa_odl_ep_cfg->notify = odl_ipa_packet_receive_notify; + + ipa_odl_ep_cfg->napi_obj = NULL; + ipa_odl_ep_cfg->desc_fifo_sz = IPA_ODL_RX_RING_SIZE * + IPA_FIFO_ELEMENT_SIZE; + + ret = ipa3_setup_sys_pipe(ipa_odl_ep_cfg, + &ipa3_odl_ctx->odl_client_hdl); + return ret; + +} + +int ipa3_odl_pipe_open(void) +{ + int ret = 0; + struct ipa_ep_cfg_holb holb_cfg; + + if (!ipa3_odl_ctx->odl_state.adpl_open) { + IPAERR("adpl pipe not configured\n"); + return 0; + } + + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.tmr_val = 0; + holb_cfg.en = 1; + + ipa3_cfg_ep_holb_by_client(IPA_CLIENT_USB_DPL_CONS, &holb_cfg); + ret = ipa_setup_odl_pipe(); + if (ret) { + IPAERR(" Setup endpoint config failed\n"); + goto fail; + } + ipa3_cfg_ep_holb_by_client(IPA_CLIENT_ODL_DPL_CONS, &holb_cfg); + ipa3_odl_ctx->odl_state.odl_ep_setup = true; + IPADBG("Setup endpoint config success\n"); + + ipa3_odl_ctx->stats.odl_drop_pkt = 0; + atomic_set(&ipa3_odl_ctx->stats.numer_in_queue, 0); + ipa3_odl_ctx->stats.odl_rx_pkt = 0; + ipa3_odl_ctx->stats.odl_tx_diag_pkt = 0; + /* + * Send signal to ipa_odl_ctl_fops_read, + * to send ODL ep open notification + */ + ipa3_odl_ctx->odl_ctl_msg_wq_flag = true; + IPADBG("Wake up odl ctl\n"); + wake_up_interruptible(&odl_ctl_msg_wq); + if (ipa3_odl_ctx->odl_state.odl_disconnected) + ipa3_odl_ctx->odl_state.odl_disconnected = false; +fail: + return ret; + +} +static int ipa_adpl_open(struct inode *inode, struct file *filp) +{ + int ret = 0; + + IPADBG("Called the function :\n"); + if (ipa3_odl_ctx->odl_state.odl_init) { + ipa3_odl_ctx->odl_state.adpl_open = true; + ret = ipa3_odl_pipe_open(); + } else { + IPAERR("Before odl init trying to open adpl pipe\n"); + print_ipa_odl_state_bit_mask(); + ret = -ENODEV; + } + + return ret; +} + +static int ipa_adpl_release(struct inode *inode, struct file *filp) +{ + ipa3_odl_pipe_cleanup(false); + return 0; +} + +void ipa3_odl_pipe_cleanup(bool is_ssr) +{ + bool ipa_odl_opened = false; + struct ipa_ep_cfg_holb holb_cfg; + + if (!ipa3_odl_ctx->odl_state.adpl_open) { + IPAERR("adpl pipe not configured\n"); + return; + } + if (ipa3_odl_ctx->odl_state.odl_open) + ipa_odl_opened = true; + + memset(&ipa3_odl_ctx->odl_state, 0, sizeof(ipa3_odl_ctx->odl_state)); + + /*Since init will not be done again*/ + ipa3_odl_ctx->odl_state.odl_init = true; + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.tmr_val = 0; + holb_cfg.en = 0; + + ipa3_cfg_ep_holb_by_client(IPA_CLIENT_USB_DPL_CONS, &holb_cfg); + + ipa3_teardown_sys_pipe(ipa3_odl_ctx->odl_client_hdl); + /*Assume QTI will never close this node once opened*/ + if (ipa_odl_opened) + ipa3_odl_ctx->odl_state.odl_open = true; + + /*Assume DIAG will not close this node in SSR case*/ + if (is_ssr) + ipa3_odl_ctx->odl_state.adpl_open = true; + + ipa3_odl_ctx->odl_state.odl_disconnected = true; + ipa3_odl_ctx->odl_state.odl_ep_setup = false; + ipa3_odl_ctx->odl_state.aggr_byte_limit_sent = false; + ipa3_odl_ctx->odl_state.odl_connected = false; + /* + * Send signal to ipa_odl_ctl_fops_read, + * to send ODL ep close notification + */ + ipa3_odl_ctx->odl_ctl_msg_wq_flag = true; + ipa3_odl_ctx->stats.odl_drop_pkt = 0; + atomic_set(&ipa3_odl_ctx->stats.numer_in_queue, 0); + ipa3_odl_ctx->stats.odl_rx_pkt = 0; + ipa3_odl_ctx->stats.odl_tx_diag_pkt = 0; + IPADBG("Wake up odl ctl\n"); + wake_up_interruptible(&odl_ctl_msg_wq); + +} + +/** + * ipa_adpl_read() - read message from IPA device + * @filp: [in] file pointer + * @buf: [out] buffer to read into + * @count: [in] size of above buffer + * @f_pos: [inout] file position + * + * User-space should continually read from /dev/ipa_adpl, + * read will block when there are no messages to read. + * Upon return, user-space should read + * Buffer supplied must be big enough to + * hold the data. + * + * Returns: how many bytes copied to buffer + * + * Note: Should not be called from atomic context + */ +static ssize_t ipa_adpl_read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos) +{ + int ret = 0; + char __user *start = buf; + struct ipa3_push_msg_odl *msg; + + while (1) { + IPADBG_LOW("Writing message to adpl pipe\n"); + if (!ipa3_odl_ctx->odl_state.odl_open) + break; + + mutex_lock(&ipa3_odl_ctx->adpl_msg_lock); + msg = NULL; + if (!list_empty(&ipa3_odl_ctx->adpl_msg_list)) { + msg = list_first_entry(&ipa3_odl_ctx->adpl_msg_list, + struct ipa3_push_msg_odl, link); + list_del(&msg->link); + if (atomic_read(&ipa3_odl_ctx->stats.numer_in_queue)) + atomic_dec(&ipa3_odl_ctx->stats.numer_in_queue); + } + + mutex_unlock(&ipa3_odl_ctx->adpl_msg_lock); + + if (msg != NULL) { + if (msg->len > count) { + IPAERR("Message length greater than count\n"); + kfree(msg->buff); + kfree(msg); + msg = NULL; + ret = -EAGAIN; + break; + } + + if (msg->buff) { + if (copy_to_user(buf, msg->buff, + msg->len)) { + ret = -EFAULT; + kfree(msg->buff); + kfree(msg); + msg = NULL; + ret = -EAGAIN; + break; + } + buf += msg->len; + count -= msg->len; + kfree(msg->buff); + } + IPA_STATS_INC_CNT(ipa3_odl_ctx->stats.odl_tx_diag_pkt); + kfree(msg); + msg = NULL; + } else { + ret = -EAGAIN; + break; + } + + ret = -EAGAIN; + if (filp->f_flags & O_NONBLOCK) + break; + + ret = -EINTR; + if (signal_pending(current)) + break; + + if (start != buf) + break; + + } + + if (start != buf && ret != -EFAULT) + ret = buf - start; + + return ret; +} + +static long ipa_adpl_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + struct odl_agg_pipe_info odl_pipe_info; + int retval = 0; + + if (!ipa3_odl_ctx->odl_state.odl_connected) { + IPAERR("ODL config in progress not allowed ioctl\n"); + print_ipa_odl_state_bit_mask(); + retval = -ENODEV; + goto fail; + } + IPADBG("Calling adpl ioctl\n"); + + switch (cmd) { + case IPA_IOC_ODL_GET_AGG_BYTE_LIMIT: + odl_pipe_info.agg_byte_limit = + ipa3_odl_ctx->odl_sys_param.ipa_ep_cfg.aggr.aggr_byte_limit; + if (copy_to_user((void __user *)arg, &odl_pipe_info, + sizeof(odl_pipe_info))) { + retval = -EFAULT; + goto fail; + } + ipa3_odl_ctx->odl_state.aggr_byte_limit_sent = true; + break; + default: + retval = -ENOIOCTLCMD; + print_ipa_odl_state_bit_mask(); + break; + } + +fail: + return retval; +} + +static const struct file_operations ipa_odl_ctl_fops = { + .owner = THIS_MODULE, + .open = ipa_odl_ctl_fops_open, + .release = ipa_odl_ctl_fops_release, + .read = ipa_odl_ctl_fops_read, + .unlocked_ioctl = ipa_odl_ctl_fops_ioctl, + .poll = ipa_odl_ctl_fops_poll, +}; + +static const struct file_operations ipa_adpl_fops = { + .owner = THIS_MODULE, + .open = ipa_adpl_open, + .release = ipa_adpl_release, + .read = ipa_adpl_read, + .unlocked_ioctl = ipa_adpl_ioctl, +}; + +int ipa_odl_init(void) +{ + int result = 0; + struct cdev *cdev; + int loop = 0; + struct ipa3_odl_char_device_context *odl_cdev; + + ipa3_odl_ctx = kzalloc(sizeof(*ipa3_odl_ctx), GFP_KERNEL); + if (!ipa3_odl_ctx) { + result = -ENOMEM; + goto fail_mem_ctx; + } + + odl_cdev = ipa3_odl_ctx->odl_cdev; + INIT_LIST_HEAD(&ipa3_odl_ctx->adpl_msg_list); + mutex_init(&ipa3_odl_ctx->adpl_msg_lock); + + odl_cdev[loop].class = class_create(THIS_MODULE, "ipa_adpl"); + + if (IS_ERR(odl_cdev[loop].class)) { + IPAERR("Error: odl_cdev->class NULL\n"); + result = -ENODEV; + goto create_char_dev0_fail; + } + + result = alloc_chrdev_region(&odl_cdev[loop].dev_num, 0, 1, "ipa_adpl"); + if (result) { + IPAERR("alloc_chrdev_region error for ipa adpl pipe\n"); + result = -ENODEV; + goto alloc_chrdev0_region_fail; + } + + odl_cdev[loop].dev = device_create(odl_cdev[loop].class, NULL, + odl_cdev[loop].dev_num, ipa3_ctx, "ipa_adpl"); + if (IS_ERR(odl_cdev[loop].dev)) { + IPAERR("device_create err:%ld\n", PTR_ERR(odl_cdev[loop].dev)); + result = PTR_ERR(odl_cdev[loop].dev); + goto device0_create_fail; + } + + cdev = &odl_cdev[loop].cdev; + cdev_init(cdev, &ipa_adpl_fops); + cdev->owner = THIS_MODULE; + cdev->ops = &ipa_adpl_fops; + + result = cdev_add(cdev, odl_cdev[loop].dev_num, 1); + if (result) { + IPAERR("cdev_add err=%d\n", -result); + goto cdev0_add_fail; + } + + loop++; + + odl_cdev[loop].class = class_create(THIS_MODULE, "ipa_odl_ctl"); + + if (IS_ERR(odl_cdev[loop].class)) { + IPAERR("Error: odl_cdev->class NULL\n"); + result = -ENODEV; + goto create_char_dev1_fail; + } + + result = alloc_chrdev_region(&odl_cdev[loop].dev_num, 0, 1, + "ipa_odl_ctl"); + if (result) { + IPAERR("alloc_chrdev_region error for ipa odl ctl pipe\n"); + goto alloc_chrdev1_region_fail; + } + + odl_cdev[loop].dev = device_create(odl_cdev[loop].class, NULL, + odl_cdev[loop].dev_num, ipa3_ctx, "ipa_odl_ctl"); + if (IS_ERR(odl_cdev[loop].dev)) { + IPAERR("device_create err:%ld\n", PTR_ERR(odl_cdev[loop].dev)); + result = PTR_ERR(odl_cdev[loop].dev); + goto device1_create_fail; + } + + cdev = &odl_cdev[loop].cdev; + cdev_init(cdev, &ipa_odl_ctl_fops); + cdev->owner = THIS_MODULE; + cdev->ops = &ipa_odl_ctl_fops; + + result = cdev_add(cdev, odl_cdev[loop].dev_num, 1); + if (result) { + IPAERR(":cdev_add err=%d\n", -result); + goto cdev1_add_fail; + } + + ipa3_odl_ctx->odl_state.odl_init = true; + return 0; +cdev1_add_fail: + device_destroy(odl_cdev[1].class, odl_cdev[1].dev_num); +device1_create_fail: + unregister_chrdev_region(odl_cdev[1].dev_num, 1); +alloc_chrdev1_region_fail: + class_destroy(odl_cdev[1].class); +create_char_dev1_fail: +cdev0_add_fail: + device_destroy(odl_cdev[0].class, odl_cdev[0].dev_num); +device0_create_fail: + unregister_chrdev_region(odl_cdev[0].dev_num, 1); +alloc_chrdev0_region_fail: + class_destroy(odl_cdev[0].class); +create_char_dev0_fail: + kfree(ipa3_odl_ctx); +fail_mem_ctx: + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h new file mode 100644 index 000000000000..64bbba5bc134 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA3_ODL_H_ +#define _IPA3_ODL_H_ + +#define IPA_ODL_AGGR_BYTE_LIMIT (15 * 1024) +#define IPA_ODL_RX_RING_SIZE 192 +#define MAX_QUEUE_TO_ODL 1024 +#define CONFIG_SUCCESS 1 +#define ODL_EP_TYPE_HSUSB 2 +#define ODL_EP_PERIPHERAL_IFACE_ID 3 + +struct ipa3_odlstats { + u32 odl_rx_pkt; + u32 odl_tx_diag_pkt; + u32 odl_drop_pkt; + atomic_t numer_in_queue; +}; + +struct odl_state_bit_mask { + u32 odl_init:1; + u32 odl_open:1; + u32 adpl_open:1; + u32 aggr_byte_limit_sent:1; + u32 odl_ep_setup:1; + u32 odl_setup_done_sent:1; + u32 odl_ep_info_sent:1; + u32 odl_connected:1; + u32 odl_disconnected:1; + u32:0; +}; + +/** + * struct ipa3_odl_char_device_context - IPA ODL character device + * @class: pointer to the struct class + * @dev_num: device number + * @dev: the dev_t of the device + * @cdev: cdev of the device + */ +struct ipa3_odl_char_device_context { + struct class *class; + dev_t dev_num; + struct device *dev; + struct cdev cdev; +}; + +struct ipa_odl_context { + struct ipa3_odl_char_device_context odl_cdev[2]; + struct list_head adpl_msg_list; + struct mutex adpl_msg_lock; + struct ipa_sys_connect_params odl_sys_param; + u32 odl_client_hdl; + struct odl_state_bit_mask odl_state; + bool odl_ctl_msg_wq_flag; + struct ipa3_odlstats stats; +}; + +struct ipa3_push_msg_odl { + void *buff; + int len; + struct list_head link; +}; + +extern struct ipa_odl_context *ipa3_odl_ctx; + +int ipa_odl_init(void); +void ipa3_odl_pipe_cleanup(bool is_ssr); +int ipa3_odl_pipe_open(void); + +#endif /* _IPA3_ODL_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c new file mode 100644 index 000000000000..5f4c39516b41 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c @@ -0,0 +1,1397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include "ipa_pm.h" +#include "ipa_i.h" + + +#define IPA_PM_DRV_NAME "ipa_pm" + +#define IPA_PM_DBG(fmt, args...) \ + do { \ + pr_debug(IPA_PM_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) +#define IPA_PM_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_PM_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) +#define IPA_PM_ERR(fmt, args...) \ + do { \ + pr_err(IPA_PM_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) +#define IPA_PM_DBG_STATE(hdl, name, state) \ + IPA_PM_DBG_LOW("Client[%d] %s: %s\n", hdl, name, \ + client_state_to_str[state]) + + +#if IPA_PM_MAX_CLIENTS > 32 +#error max client greater than 32 all bitmask types should be changed +#endif + +/* + * struct ipa_pm_exception_list - holds information about an exception + * @pending: number of clients in exception that have not yet been adctivated + * @bitmask: bitmask of the clients in the exception based on handle + * @threshold: the threshold values for the exception + */ +struct ipa_pm_exception_list { + char clients[IPA_PM_MAX_EX_CL]; + int pending; + u32 bitmask; + int threshold[IPA_PM_THRESHOLD_MAX]; +}; + +/* + * struct clk_scaling_db - holds information about threshholds and exceptions + * @lock: lock the bitmasks and thresholds + * @exception_list: pointer to the list of exceptions + * @work: work for clock scaling algorithm + * @active_client_bitmask: the bits represent handles in the clients array that + * contain non-null client + * @threshold_size: size of the throughput threshold + * @exception_size: size of the exception list + * @cur_vote: idx of the threshold + * @default_threshold: the thresholds used if no exception passes + * @current_threshold: the current threshold of the clock plan + */ +struct clk_scaling_db { + spinlock_t lock; + struct ipa_pm_exception_list exception_list[IPA_PM_EXCEPTION_MAX]; + struct work_struct work; + u32 active_client_bitmask; + int threshold_size; + int exception_size; + int cur_vote; + int default_threshold[IPA_PM_THRESHOLD_MAX]; + int *current_threshold; +}; + +/* + * ipa_pm state names + * + * Timer free states: + * @IPA_PM_DEACTIVATED: client starting state when registered + * @IPA_PM_DEACTIVATE_IN_PROGRESS: deactivate was called in progress of a client + * activating + * @IPA_PM_ACTIVATE_IN_PROGRESS: client is being activated by work_queue + * @IPA_PM_ACTIVATED: client is activated without any timers + * + * Timer set states: + * @IPA_PM_ACTIVATED_PENDING_DEACTIVATION: moves to deactivate once timer pass + * @IPA_PM_ACTIVATED_TIMER_SET: client was activated while timer was set, so + * when the timer pass, client will still be activated + *@IPA_PM_ACTIVATED_PENDING_RESCHEDULE: state signifying extended timer when + * a client is deferred_deactivated when a time ris still active + */ +enum ipa_pm_state { + IPA_PM_DEACTIVATED, + IPA_PM_DEACTIVATE_IN_PROGRESS, + IPA_PM_ACTIVATE_IN_PROGRESS, + IPA_PM_ACTIVATED, + IPA_PM_ACTIVATED_PENDING_DEACTIVATION, + IPA_PM_ACTIVATED_TIMER_SET, + IPA_PM_ACTIVATED_PENDING_RESCHEDULE, + IPA_PM_STATE_MAX +}; + +#define IPA_PM_STATE_ACTIVE(state) \ + (state == IPA_PM_ACTIVATED ||\ + state == IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||\ + state == IPA_PM_ACTIVATED_TIMER_SET ||\ + state == IPA_PM_ACTIVATED_PENDING_RESCHEDULE) + +#define IPA_PM_STATE_IN_PROGRESS(state) \ + (state == IPA_PM_ACTIVATE_IN_PROGRESS \ + || state == IPA_PM_DEACTIVATE_IN_PROGRESS) + +/* + * struct ipa_pm_client - holds information about a specific IPA client + * @name: string name of the client + * @callback: pointer to the client's callback function + * @callback_params: pointer to the client's callback parameters + * @state: Activation state of the client + * @skip_clk_vote: 0 if client votes for clock when activated, 1 if no vote + * @group: the ipa_pm_group the client belongs to + * @hdl: handle of the client + * @throughput: the throughput of the client for clock scaling + * @state_lock: spinlock to lock the pm_states + * @activate_work: work for activate (blocking case) + * @deactivate work: delayed work for deferred_deactivate function + * @complete: generic wait-for-completion handler + * @wlock: wake source to prevent AP suspend + */ +struct ipa_pm_client { + char name[IPA_PM_MAX_EX_CL]; + void (*callback)(void *user_data, enum ipa_pm_cb_event); + void *callback_params; + enum ipa_pm_state state; + bool skip_clk_vote; + int group; + int hdl; + int throughput; + spinlock_t state_lock; + struct work_struct activate_work; + struct delayed_work deactivate_work; + struct completion complete; + struct wakeup_source wlock; +}; + +/* + * struct ipa_pm_ctx - global ctx that will hold the client arrays and tput info + * @clients: array to the clients with the handle as its index + * @clients_by_pipe: array to the clients with endpoint as the index + * @wq: work queue for deferred deactivate, activate, and clk_scaling work + 8 @clk_scaling: pointer to clock scaling database + * @client_mutex: global mutex to lock the client arrays + * @aggragated_tput: aggragated tput value of all valid activated clients + * @group_tput: combined throughput for the groups + */ +struct ipa_pm_ctx { + struct ipa_pm_client *clients[IPA_PM_MAX_CLIENTS]; + struct ipa_pm_client *clients_by_pipe[IPA3_MAX_NUM_PIPES]; + struct workqueue_struct *wq; + struct clk_scaling_db clk_scaling; + struct mutex client_mutex; + int aggregated_tput; + int group_tput[IPA_PM_GROUP_MAX]; +}; + +static struct ipa_pm_ctx *ipa_pm_ctx; + +static const char *client_state_to_str[IPA_PM_STATE_MAX] = { + __stringify(IPA_PM_DEACTIVATED), + __stringify(IPA_PM_DEACTIVATE_IN_PROGRESS), + __stringify(IPA_PM_ACTIVATE_IN_PROGRESS), + __stringify(IPA_PM_ACTIVATED), + __stringify(IPA_PM_ACTIVATED_PENDING_DEACTIVATION), + __stringify(IPA_PM_ACTIVATED_TIMER_SET), + __stringify(IPA_PM_ACTIVATED_PENDING_RESCHEDULE), +}; + +static const char *ipa_pm_group_to_str[IPA_PM_GROUP_MAX] = { + __stringify(IPA_PM_GROUP_DEFAULT), + __stringify(IPA_PM_GROUP_APPS), + __stringify(IPA_PM_GROUP_MODEM), +}; + +/** + * pop_max_from_array() -pop the max and move the last element to where the + * max was popped + * @arr: array to be searched for max + * @n: size of the array + * + * Returns: max value of the array + */ +static int pop_max_from_array(int *arr, int *n) +{ + int i; + int max, max_idx; + + max_idx = *n - 1; + max = 0; + + if (*n == 0) + return 0; + + for (i = 0; i < *n; i++) { + if (arr[i] > max) { + max = arr[i]; + max_idx = i; + } + } + (*n)--; + arr[max_idx] = arr[*n]; + + return max; +} + +/** + * calculate_throughput() - calculate the aggregated throughput + * based on active clients + * + * Returns: aggregated tput value + */ +static int calculate_throughput(void) +{ + int client_tput[IPA_PM_MAX_CLIENTS] = { 0 }; + bool group_voted[IPA_PM_GROUP_MAX] = { false }; + int i, n; + int max, second_max, aggregated_tput; + struct ipa_pm_client *client; + + /* Create a basic array to hold throughputs*/ + for (i = 1, n = 0; i < IPA_PM_MAX_CLIENTS; i++) { + client = ipa_pm_ctx->clients[i]; + if (client != NULL && IPA_PM_STATE_ACTIVE(client->state)) { + /* default case */ + if (client->group == IPA_PM_GROUP_DEFAULT) { + client_tput[n++] = client->throughput; + } else if (!group_voted[client->group]) { + client_tput[n++] = ipa_pm_ctx->group_tput + [client->group]; + group_voted[client->group] = true; + } + } + } + /*the array will only use n+1 spots. n will be the last index used*/ + + aggregated_tput = 0; + + /** + * throughput algorithm: + * 1) pop the max and second_max + * 2) add the 2nd max to aggregated tput + * 3) insert the value of max - 2nd max + * 4) repeat until array is of size 1 + */ + while (n > 1) { + max = pop_max_from_array(client_tput, &n); + second_max = pop_max_from_array(client_tput, &n); + client_tput[n++] = max - second_max; + aggregated_tput += second_max; + } + + IPA_PM_DBG_LOW("Aggregated throughput: %d\n", aggregated_tput); + + return aggregated_tput; +} + +/** + * deactivate_client() - turn off the bit in the active client bitmask based on + * the handle passed in + * @hdl: The index of the client to be deactivated + */ +static void deactivate_client(u32 hdl) +{ + unsigned long flags; + + spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags); + ipa_pm_ctx->clk_scaling.active_client_bitmask &= ~(1 << hdl); + spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags); + IPA_PM_DBG_LOW("active bitmask: %x\n", + ipa_pm_ctx->clk_scaling.active_client_bitmask); +} + +/** + * activate_client() - turn on the bit in the active client bitmask based on + * the handle passed in + * @hdl: The index of the client to be activated + */ +static void activate_client(u32 hdl) +{ + unsigned long flags; + + spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags); + ipa_pm_ctx->clk_scaling.active_client_bitmask |= (1 << hdl); + spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags); + IPA_PM_DBG_LOW("active bitmask: %x\n", + ipa_pm_ctx->clk_scaling.active_client_bitmask); +} + +/** + * deactivate_client() - get threshold + * + * Returns: threshold of the exception that passes or default if none pass + */ +static void set_current_threshold(void) +{ + int i; + struct clk_scaling_db *clk; + struct ipa_pm_exception_list *exception; + unsigned long flags; + + clk = &ipa_pm_ctx->clk_scaling; + + spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags); + for (i = 0; i < clk->exception_size; i++) { + exception = &clk->exception_list[i]; + if (exception->pending == 0 && (exception->bitmask + & ~clk->active_client_bitmask) == 0) { + spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, + flags); + clk->current_threshold = exception->threshold; + IPA_PM_DBG("Exception %d set\n", i); + return; + } + } + clk->current_threshold = clk->default_threshold; + spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags); +} + +/** + * do_clk_scaling() - set the clock based on the activated clients + * + * Returns: 0 if success, negative otherwise + */ +static int do_clk_scaling(void) +{ + int i, tput; + int new_th_idx = 1; + struct clk_scaling_db *clk_scaling; + + clk_scaling = &ipa_pm_ctx->clk_scaling; + + mutex_lock(&ipa_pm_ctx->client_mutex); + IPA_PM_DBG_LOW("clock scaling started\n"); + tput = calculate_throughput(); + ipa_pm_ctx->aggregated_tput = tput; + set_current_threshold(); + + mutex_unlock(&ipa_pm_ctx->client_mutex); + + for (i = 0; i < clk_scaling->threshold_size; i++) { + if (tput > clk_scaling->current_threshold[i]) + new_th_idx++; + } + + IPA_PM_DBG_LOW("old idx was at %d\n", ipa_pm_ctx->clk_scaling.cur_vote); + + + if (ipa_pm_ctx->clk_scaling.cur_vote != new_th_idx) { + ipa_pm_ctx->clk_scaling.cur_vote = new_th_idx; + ipa3_set_clock_plan_from_pm(ipa_pm_ctx->clk_scaling.cur_vote); + } + + IPA_PM_DBG_LOW("new idx is at %d\n", ipa_pm_ctx->clk_scaling.cur_vote); + + return 0; +} + +/** + * clock_scaling_func() - set the clock on a work queue + */ +static void clock_scaling_func(struct work_struct *work) +{ + do_clk_scaling(); +} + +/** + * activate_work_func - activate a client and vote for clock on a work queue + */ +static void activate_work_func(struct work_struct *work) +{ + struct ipa_pm_client *client; + bool dec_clk = false; + unsigned long flags; + + client = container_of(work, struct ipa_pm_client, activate_work); + if (!client->skip_clk_vote) { + IPA_ACTIVE_CLIENTS_INC_SPECIAL(client->name); + if (client->group == IPA_PM_GROUP_APPS) + __pm_stay_awake(&client->wlock); + } + + spin_lock_irqsave(&client->state_lock, flags); + IPA_PM_DBG_STATE(client->hdl, client->name, client->state); + if (client->state == IPA_PM_ACTIVATE_IN_PROGRESS) { + client->state = IPA_PM_ACTIVATED; + } else if (client->state == IPA_PM_DEACTIVATE_IN_PROGRESS) { + client->state = IPA_PM_DEACTIVATED; + dec_clk = true; + } else { + IPA_PM_ERR("unexpected state %d\n", client->state); + WARN_ON(1); + } + spin_unlock_irqrestore(&client->state_lock, flags); + + complete_all(&client->complete); + + if (dec_clk) { + if (!client->skip_clk_vote) { + IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name); + if (client->group == IPA_PM_GROUP_APPS) + __pm_relax(&client->wlock); + } + + IPA_PM_DBG_STATE(client->hdl, client->name, client->state); + return; + } + + activate_client(client->hdl); + + mutex_lock(&ipa_pm_ctx->client_mutex); + if (client->callback) { + client->callback(client->callback_params, + IPA_PM_CLIENT_ACTIVATED); + } else { + IPA_PM_ERR("client has no callback"); + WARN_ON(1); + } + mutex_unlock(&ipa_pm_ctx->client_mutex); + + IPA_PM_DBG_STATE(client->hdl, client->name, client->state); + do_clk_scaling(); +} + +/** + * delayed_deferred_deactivate_work_func - deferred deactivate on a work queue + */ +static void delayed_deferred_deactivate_work_func(struct work_struct *work) +{ + struct delayed_work *dwork; + struct ipa_pm_client *client; + unsigned long flags; + unsigned long delay; + + dwork = container_of(work, struct delayed_work, work); + client = container_of(dwork, struct ipa_pm_client, deactivate_work); + + spin_lock_irqsave(&client->state_lock, flags); + IPA_PM_DBG_STATE(client->hdl, client->name, client->state); + switch (client->state) { + case IPA_PM_ACTIVATED_TIMER_SET: + client->state = IPA_PM_ACTIVATED; + goto bail; + case IPA_PM_ACTIVATED_PENDING_RESCHEDULE: + delay = IPA_PM_DEFERRED_TIMEOUT; + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) + delay *= 5; + + queue_delayed_work(ipa_pm_ctx->wq, &client->deactivate_work, + msecs_to_jiffies(delay)); + client->state = IPA_PM_ACTIVATED_PENDING_DEACTIVATION; + goto bail; + case IPA_PM_ACTIVATED_PENDING_DEACTIVATION: + client->state = IPA_PM_DEACTIVATED; + IPA_PM_DBG_STATE(client->hdl, client->name, client->state); + spin_unlock_irqrestore(&client->state_lock, flags); + if (!client->skip_clk_vote) { + IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name); + if (client->group == IPA_PM_GROUP_APPS) + __pm_relax(&client->wlock); + } + + deactivate_client(client->hdl); + do_clk_scaling(); + return; + default: + IPA_PM_ERR("unexpected state %d\n", client->state); + WARN_ON(1); + goto bail; + } + +bail: + IPA_PM_DBG_STATE(client->hdl, client->name, client->state); + spin_unlock_irqrestore(&client->state_lock, flags); +} + +static int find_next_open_array_element(const char *name) +{ + int i, n; + + n = -ENOBUFS; + + /* 0 is not a valid handle */ + for (i = IPA_PM_MAX_CLIENTS - 1; i >= 1; i--) { + if (ipa_pm_ctx->clients[i] == NULL) { + n = i; + continue; + } + + if (strlen(name) == strlen(ipa_pm_ctx->clients[i]->name)) + if (!strcmp(name, ipa_pm_ctx->clients[i]->name)) + return -EEXIST; + } + return n; +} + +/** + * add_client_to_exception_list() - add client to the exception list and + * update pending if necessary + * @hdl: index of the IPA client + * + * Returns: 0 if success, negative otherwise + */ +static int add_client_to_exception_list(u32 hdl) +{ + int i; + struct ipa_pm_exception_list *exception; + + mutex_lock(&ipa_pm_ctx->client_mutex); + for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) { + exception = &ipa_pm_ctx->clk_scaling.exception_list[i]; + if (strnstr(exception->clients, ipa_pm_ctx->clients[hdl]->name, + strlen(exception->clients))) { + exception->pending--; + + if (exception->pending < 0) { + WARN_ON(1); + exception->pending = 0; + mutex_unlock(&ipa_pm_ctx->client_mutex); + return -EPERM; + } + exception->bitmask |= (1 << hdl); + } + } + IPA_PM_DBG("%s added to exception list\n", + ipa_pm_ctx->clients[hdl]->name); + mutex_unlock(&ipa_pm_ctx->client_mutex); + + return 0; +} + +/** + * remove_client_to_exception_list() - remove client from the exception list and + * update pending if necessary + * @hdl: index of the IPA client + * + * Returns: 0 if success, negative otherwise + */ +static int remove_client_from_exception_list(u32 hdl) +{ + int i; + struct ipa_pm_exception_list *exception; + + for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) { + exception = &ipa_pm_ctx->clk_scaling.exception_list[i]; + if (exception->bitmask & (1 << hdl)) { + exception->pending++; + exception->bitmask &= ~(1 << hdl); + } + } + IPA_PM_DBG("Client %d removed from exception list\n", hdl); + + return 0; +} + +/** + * ipa_pm_init() - initialize IPA PM Components + * @ipa_pm_init_params: parameters needed to fill exceptions and thresholds + * + * Returns: 0 on success, negative on failure + */ +int ipa_pm_init(struct ipa_pm_init_params *params) +{ + int i, j; + struct clk_scaling_db *clk_scaling; + + if (params == NULL) { + IPA_PM_ERR("Invalid Params\n"); + return -EINVAL; + } + + if (params->threshold_size <= 0 + || params->threshold_size > IPA_PM_THRESHOLD_MAX) { + IPA_PM_ERR("Invalid threshold size\n"); + return -EINVAL; + } + + if (params->exception_size < 0 + || params->exception_size > IPA_PM_EXCEPTION_MAX) { + IPA_PM_ERR("Invalid exception size\n"); + return -EINVAL; + } + + IPA_PM_DBG("IPA PM initialization started\n"); + + if (ipa_pm_ctx != NULL) { + IPA_PM_ERR("Already initialized\n"); + return -EPERM; + } + + + ipa_pm_ctx = kzalloc(sizeof(*ipa_pm_ctx), GFP_KERNEL); + if (!ipa_pm_ctx) { + IPA_PM_ERR(":kzalloc err.\n"); + return -ENOMEM; + } + + ipa_pm_ctx->wq = create_singlethread_workqueue("ipa_pm_activate"); + if (!ipa_pm_ctx->wq) { + IPA_PM_ERR("create workqueue failed\n"); + kfree(ipa_pm_ctx); + return -ENOMEM; + } + + mutex_init(&ipa_pm_ctx->client_mutex); + + /* Populate and init locks in clk_scaling_db */ + clk_scaling = &ipa_pm_ctx->clk_scaling; + spin_lock_init(&clk_scaling->lock); + clk_scaling->threshold_size = params->threshold_size; + clk_scaling->exception_size = params->exception_size; + INIT_WORK(&clk_scaling->work, clock_scaling_func); + + for (i = 0; i < params->threshold_size; i++) + clk_scaling->default_threshold[i] = + params->default_threshold[i]; + + /* Populate exception list*/ + for (i = 0; i < params->exception_size; i++) { + strlcpy(clk_scaling->exception_list[i].clients, + params->exceptions[i].usecase, IPA_PM_MAX_EX_CL); + IPA_PM_DBG("Usecase: %s\n", params->exceptions[i].usecase); + + /* Parse the commas to count the size of the clients */ + for (j = 0; j < IPA_PM_MAX_EX_CL && + clk_scaling->exception_list[i].clients[j]; j++) { + if (clk_scaling->exception_list[i].clients[j] == ',') + clk_scaling->exception_list[i].pending++; + } + + clk_scaling->exception_list[i].pending++; + IPA_PM_DBG("Pending: %d\n", + clk_scaling->exception_list[i].pending); + + /* populate the threshold */ + for (j = 0; j < params->threshold_size; j++) { + clk_scaling->exception_list[i].threshold[j] + = params->exceptions[i].threshold[j]; + } + + } + IPA_PM_DBG("initialization success"); + + return 0; +} + +int ipa_pm_destroy(void) +{ + IPA_PM_DBG("IPA PM destroy started\n"); + + if (ipa_pm_ctx == NULL) { + IPA_PM_ERR("Already destroyed\n"); + return -EPERM; + } + + destroy_workqueue(ipa_pm_ctx->wq); + + kfree(ipa_pm_ctx); + ipa_pm_ctx = NULL; + + return 0; +} + +/** + * ipa_pm_register() - register an IPA PM client with the PM + * @register_params: params for a client like throughput, callback, etc. + * @hdl: int pointer that will be used as an index to access the client + * + * Returns: 0 on success, negative on failure + * + * Side effects: *hdl is replaced with the client index or -EEXIST if + * client is already registered + */ +int ipa_pm_register(struct ipa_pm_register_params *params, u32 *hdl) +{ + struct ipa_pm_client *client; + struct wakeup_source *wlock; + int elem; + + if (ipa_pm_ctx == NULL) { + IPA_PM_ERR("PM_ctx is null\n"); + return -EINVAL; + } + + if (params == NULL || hdl == NULL || params->name == NULL) { + IPA_PM_ERR("Invalid Params\n"); + return -EINVAL; + } + + IPA_PM_DBG("IPA PM registering client\n"); + + mutex_lock(&ipa_pm_ctx->client_mutex); + + elem = find_next_open_array_element(params->name); + *hdl = elem; + if (elem < 0 || elem > IPA_PM_MAX_CLIENTS) { + mutex_unlock(&ipa_pm_ctx->client_mutex); + IPA_PM_ERR("client already registered or full array elem=%d\n", + elem); + return elem; + } + + ipa_pm_ctx->clients[*hdl] = kzalloc(sizeof + (struct ipa_pm_client), GFP_KERNEL); + if (!ipa_pm_ctx->clients[*hdl]) { + mutex_unlock(&ipa_pm_ctx->client_mutex); + IPA_PM_ERR(":kzalloc err.\n"); + return -ENOMEM; + } + mutex_unlock(&ipa_pm_ctx->client_mutex); + + client = ipa_pm_ctx->clients[*hdl]; + + spin_lock_init(&client->state_lock); + + INIT_DELAYED_WORK(&client->deactivate_work, + delayed_deferred_deactivate_work_func); + + INIT_WORK(&client->activate_work, activate_work_func); + + /* populate fields */ + strlcpy(client->name, params->name, IPA_PM_MAX_EX_CL); + client->callback = params->callback; + client->callback_params = params->user_data; + client->group = params->group; + client->hdl = *hdl; + client->skip_clk_vote = params->skip_clk_vote; + wlock = &client->wlock; + wakeup_source_init(wlock, client->name); + + /* add client to exception list */ + if (add_client_to_exception_list(*hdl)) { + ipa_pm_deregister(*hdl); + IPA_PM_ERR("Fail to add client to exception_list\n"); + return -EPERM; + } + + IPA_PM_DBG("IPA PM client registered with handle %d\n", *hdl); + return 0; +} + +/** + * ipa_pm_deregister() - deregister IPA client from the PM + * @hdl: index of the client in the array + * + * Returns: 0 on success, negative on failure + */ +int ipa_pm_deregister(u32 hdl) +{ + struct ipa_pm_client *client; + int i; + unsigned long flags; + + if (ipa_pm_ctx == NULL) { + IPA_PM_ERR("PM_ctx is null\n"); + return -EINVAL; + } + + if (hdl >= IPA_PM_MAX_CLIENTS) { + IPA_PM_ERR("Invalid Param\n"); + return -EINVAL; + } + + if (ipa_pm_ctx->clients[hdl] == NULL) { + IPA_PM_ERR("Client is Null\n"); + return -EINVAL; + } + + IPA_PM_DBG("IPA PM deregistering client\n"); + + client = ipa_pm_ctx->clients[hdl]; + spin_lock_irqsave(&client->state_lock, flags); + if (IPA_PM_STATE_IN_PROGRESS(client->state)) { + spin_unlock_irqrestore(&client->state_lock, flags); + wait_for_completion(&client->complete); + spin_lock_irqsave(&client->state_lock, flags); + } + + if (IPA_PM_STATE_ACTIVE(client->state)) { + IPA_PM_DBG("Activated clients cannot be deregistered"); + spin_unlock_irqrestore(&client->state_lock, flags); + return -EPERM; + } + spin_unlock_irqrestore(&client->state_lock, flags); + + mutex_lock(&ipa_pm_ctx->client_mutex); + + /* nullify pointers in pipe array */ + for (i = 0; i < IPA3_MAX_NUM_PIPES; i++) { + if (ipa_pm_ctx->clients_by_pipe[i] == ipa_pm_ctx->clients[hdl]) + ipa_pm_ctx->clients_by_pipe[i] = NULL; + } + wakeup_source_trash(&client->wlock); + kfree(client); + ipa_pm_ctx->clients[hdl] = NULL; + + remove_client_from_exception_list(hdl); + IPA_PM_DBG("IPA PM client %d deregistered\n", hdl); + mutex_unlock(&ipa_pm_ctx->client_mutex); + + return 0; +} + +/** + * ipa_pm_associate_ipa_cons_to_client() - add mapping to pipe with ipa cllent + * @hdl: index of the client to be mapped + * @consumer: the pipe/consumer name to be pipped to the client + * + * Returns: 0 on success, negative on failure + * + * Side effects: multiple pipes are allowed to be mapped to a single client + */ +int ipa_pm_associate_ipa_cons_to_client(u32 hdl, enum ipa_client_type consumer) +{ + int idx; + + if (ipa_pm_ctx == NULL) { + IPA_PM_ERR("PM_ctx is null\n"); + return -EINVAL; + } + + if (hdl >= IPA_PM_MAX_CLIENTS || consumer < 0 || + consumer >= IPA_CLIENT_MAX) { + IPA_PM_ERR("invalid params\n"); + return -EINVAL; + } + + mutex_lock(&ipa_pm_ctx->client_mutex); + if (ipa_pm_ctx->clients[hdl] == NULL) { + mutex_unlock(&ipa_pm_ctx->client_mutex); + IPA_PM_ERR("Client is NULL\n"); + return -EPERM; + } + + idx = ipa_get_ep_mapping(consumer); + + if (idx < 0) { + mutex_unlock(&ipa_pm_ctx->client_mutex); + IPA_PM_DBG("Pipe is not used\n"); + return 0; + } + + IPA_PM_DBG("Mapping pipe %d to client %d\n", idx, hdl); + + if (ipa_pm_ctx->clients_by_pipe[idx] != NULL) { + mutex_unlock(&ipa_pm_ctx->client_mutex); + IPA_PM_ERR("Pipe is already mapped\n"); + return -EPERM; + } + ipa_pm_ctx->clients_by_pipe[idx] = ipa_pm_ctx->clients[hdl]; + mutex_unlock(&ipa_pm_ctx->client_mutex); + + IPA_PM_DBG("Pipe %d is mapped to client %d\n", idx, hdl); + + return 0; +} + +static int ipa_pm_activate_helper(struct ipa_pm_client *client, bool sync) +{ + struct ipa_active_client_logging_info log_info; + int result = 0; + unsigned long flags; + + spin_lock_irqsave(&client->state_lock, flags); + IPA_PM_DBG_STATE(client->hdl, client->name, client->state); + + if (IPA_PM_STATE_IN_PROGRESS(client->state)) { + if (sync) { + spin_unlock_irqrestore(&client->state_lock, flags); + wait_for_completion(&client->complete); + spin_lock_irqsave(&client->state_lock, flags); + } else { + client->state = IPA_PM_ACTIVATE_IN_PROGRESS; + spin_unlock_irqrestore(&client->state_lock, flags); + return -EINPROGRESS; + } + } + + switch (client->state) { + case IPA_PM_ACTIVATED_PENDING_RESCHEDULE: + case IPA_PM_ACTIVATED_PENDING_DEACTIVATION: + client->state = IPA_PM_ACTIVATED_TIMER_SET; + case IPA_PM_ACTIVATED: + case IPA_PM_ACTIVATED_TIMER_SET: + spin_unlock_irqrestore(&client->state_lock, flags); + return 0; + case IPA_PM_DEACTIVATED: + break; + default: + IPA_PM_ERR("Invalid State\n"); + spin_unlock_irqrestore(&client->state_lock, flags); + return -EPERM; + } + IPA_PM_DBG_STATE(client->hdl, client->name, client->state); + + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, client->name); + if (!client->skip_clk_vote) { + if (sync) { + client->state = IPA_PM_ACTIVATE_IN_PROGRESS; + spin_unlock_irqrestore(&client->state_lock, flags); + IPA_ACTIVE_CLIENTS_INC_SPECIAL(client->name); + spin_lock_irqsave(&client->state_lock, flags); + } else + result = ipa3_inc_client_enable_clks_no_block + (&log_info); + } + + /* we got the clocks */ + if (result == 0) { + client->state = IPA_PM_ACTIVATED; + if (client->group == IPA_PM_GROUP_APPS) + __pm_stay_awake(&client->wlock); + spin_unlock_irqrestore(&client->state_lock, flags); + activate_client(client->hdl); + if (sync) + do_clk_scaling(); + else + queue_work(ipa_pm_ctx->wq, + &ipa_pm_ctx->clk_scaling.work); + IPA_PM_DBG_STATE(client->hdl, client->name, client->state); + return 0; + } + + client->state = IPA_PM_ACTIVATE_IN_PROGRESS; + init_completion(&client->complete); + queue_work(ipa_pm_ctx->wq, &client->activate_work); + spin_unlock_irqrestore(&client->state_lock, flags); + IPA_PM_DBG_STATE(client->hdl, client->name, client->state); + return -EINPROGRESS; +} + +/** + * ipa_pm_activate(): activate ipa client to vote for clock(). Can be called + * from atomic context and returns -EINPROGRESS if cannot be done synchronously + * @hdl: index of the client in the array + * + * Returns: 0 on success, -EINPROGRESS if operation cannot be done synchronously + * and other negatives on failure + */ +int ipa_pm_activate(u32 hdl) +{ + if (ipa_pm_ctx == NULL) { + IPA_PM_ERR("PM_ctx is null\n"); + return -EINVAL; + } + + if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) { + IPA_PM_ERR("Invalid Param\n"); + return -EINVAL; + } + + return ipa_pm_activate_helper(ipa_pm_ctx->clients[hdl], false); +} + +/** + * ipa_pm_activate(): activate ipa client to vote for clock synchronously. + * Cannot be called from an atomic contex. + * @hdl: index of the client in the array + * + * Returns: 0 on success, negative on failure + */ +int ipa_pm_activate_sync(u32 hdl) +{ + if (ipa_pm_ctx == NULL) { + IPA_PM_ERR("PM_ctx is null\n"); + return -EINVAL; + } + + if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) { + IPA_PM_ERR("Invalid Param\n"); + return -EINVAL; + } + + return ipa_pm_activate_helper(ipa_pm_ctx->clients[hdl], true); +} + +/** + * ipa_pm_deferred_deactivate(): schedule a timer to deactivate client and + * devote clock. Can be called from atomic context (asynchronously) + * @hdl: index of the client in the array + * + * Returns: 0 on success, negative on failure + */ +int ipa_pm_deferred_deactivate(u32 hdl) +{ + struct ipa_pm_client *client; + unsigned long flags; + unsigned long delay; + + if (ipa_pm_ctx == NULL) { + IPA_PM_ERR("PM_ctx is null\n"); + return -EINVAL; + } + + if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) { + IPA_PM_ERR("Invalid Param\n"); + return -EINVAL; + } + + client = ipa_pm_ctx->clients[hdl]; + IPA_PM_DBG_STATE(hdl, client->name, client->state); + + spin_lock_irqsave(&client->state_lock, flags); + switch (client->state) { + case IPA_PM_ACTIVATE_IN_PROGRESS: + client->state = IPA_PM_DEACTIVATE_IN_PROGRESS; + case IPA_PM_DEACTIVATED: + IPA_PM_DBG_STATE(hdl, client->name, client->state); + spin_unlock_irqrestore(&client->state_lock, flags); + return 0; + case IPA_PM_ACTIVATED: + delay = IPA_PM_DEFERRED_TIMEOUT; + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) + delay *= 5; + + client->state = IPA_PM_ACTIVATED_PENDING_DEACTIVATION; + queue_delayed_work(ipa_pm_ctx->wq, &client->deactivate_work, + msecs_to_jiffies(delay)); + break; + case IPA_PM_ACTIVATED_TIMER_SET: + case IPA_PM_ACTIVATED_PENDING_DEACTIVATION: + client->state = IPA_PM_ACTIVATED_PENDING_RESCHEDULE; + case IPA_PM_DEACTIVATE_IN_PROGRESS: + case IPA_PM_ACTIVATED_PENDING_RESCHEDULE: + break; + case IPA_PM_STATE_MAX: + default: + IPA_PM_ERR("Bad State"); + spin_unlock_irqrestore(&client->state_lock, flags); + return -EINVAL; + } + IPA_PM_DBG_STATE(hdl, client->name, client->state); + spin_unlock_irqrestore(&client->state_lock, flags); + + return 0; +} + +/** + * ipa_pm_deactivate_all_deferred(): Cancel the deferred deactivation timer and + * immediately devotes for IPA clocks + * + * Returns: 0 on success, negative on failure + */ +int ipa_pm_deactivate_all_deferred(void) +{ + int i; + bool run_algorithm = false; + struct ipa_pm_client *client; + unsigned long flags; + + if (ipa_pm_ctx == NULL) { + IPA_PM_ERR("PM_ctx is null\n"); + return -EINVAL; + } + + for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) { + client = ipa_pm_ctx->clients[i]; + + if (client == NULL) + continue; + + cancel_delayed_work_sync(&client->deactivate_work); + + if (IPA_PM_STATE_IN_PROGRESS(client->state)) { + wait_for_completion(&client->complete); + continue; + } + + spin_lock_irqsave(&client->state_lock, flags); + IPA_PM_DBG_STATE(client->hdl, client->name, client->state); + + if (client->state == IPA_PM_ACTIVATED_TIMER_SET) { + client->state = IPA_PM_ACTIVATED; + IPA_PM_DBG_STATE(client->hdl, client->name, + client->state); + spin_unlock_irqrestore(&client->state_lock, flags); + } else if (client->state == + IPA_PM_ACTIVATED_PENDING_DEACTIVATION || + client->state == + IPA_PM_ACTIVATED_PENDING_RESCHEDULE) { + run_algorithm = true; + client->state = IPA_PM_DEACTIVATED; + IPA_PM_DBG_STATE(client->hdl, client->name, + client->state); + spin_unlock_irqrestore(&client->state_lock, flags); + if (!client->skip_clk_vote) { + IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name); + if (client->group == IPA_PM_GROUP_APPS) + __pm_relax(&client->wlock); + } + deactivate_client(client->hdl); + } else /* if activated or deactivated, we do nothing */ + spin_unlock_irqrestore(&client->state_lock, flags); + } + + if (run_algorithm) + do_clk_scaling(); + + return 0; +} + +/** + * ipa_pm_deactivate_sync(): deactivate ipa client and devote clock. Cannot be + * called from atomic context. + * @hdl: index of the client in the array + * + * Returns: 0 on success, negative on failure + */ +int ipa_pm_deactivate_sync(u32 hdl) +{ + struct ipa_pm_client *client; + unsigned long flags; + + if (ipa_pm_ctx == NULL) { + IPA_PM_ERR("PM_ctx is null\n"); + return -EINVAL; + } + + if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) { + IPA_PM_ERR("Invalid Param\n"); + return -EINVAL; + } + client = ipa_pm_ctx->clients[hdl]; + + cancel_delayed_work_sync(&client->deactivate_work); + + if (IPA_PM_STATE_IN_PROGRESS(client->state)) + wait_for_completion(&client->complete); + + spin_lock_irqsave(&client->state_lock, flags); + IPA_PM_DBG_STATE(hdl, client->name, client->state); + + if (client->state == IPA_PM_DEACTIVATED) { + spin_unlock_irqrestore(&client->state_lock, flags); + return 0; + } + + spin_unlock_irqrestore(&client->state_lock, flags); + + /* else case (Deactivates all Activated cases)*/ + if (!client->skip_clk_vote) { + IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name); + if (client->group == IPA_PM_GROUP_APPS) + __pm_relax(&client->wlock); + } + + spin_lock_irqsave(&client->state_lock, flags); + client->state = IPA_PM_DEACTIVATED; + IPA_PM_DBG_STATE(hdl, client->name, client->state); + spin_unlock_irqrestore(&client->state_lock, flags); + deactivate_client(hdl); + do_clk_scaling(); + + return 0; +} + +/** + * ipa_pm_handle_suspend(): calls the callbacks of suspended clients to wake up + * @pipe_bitmask: the bits represent the indexes of the clients to be woken up + * + * Returns: 0 on success, negative on failure + */ +int ipa_pm_handle_suspend(u32 pipe_bitmask) +{ + int i; + struct ipa_pm_client *client; + bool client_notified[IPA_PM_MAX_CLIENTS] = { false }; + + if (ipa_pm_ctx == NULL) { + IPA_PM_ERR("PM_ctx is null\n"); + return -EINVAL; + } + + IPA_PM_DBG_LOW("bitmask: %d", pipe_bitmask); + + if (pipe_bitmask == 0) + return 0; + + mutex_lock(&ipa_pm_ctx->client_mutex); + for (i = 0; i < IPA3_MAX_NUM_PIPES; i++) { + if (pipe_bitmask & (1 << i)) { + client = ipa_pm_ctx->clients_by_pipe[i]; + if (client && !client_notified[client->hdl]) { + if (client->callback) { + client->callback(client->callback_params + , IPA_PM_REQUEST_WAKEUP); + client_notified[client->hdl] = true; + } else { + IPA_PM_ERR("client has no callback"); + WARN_ON(1); + } + } + } + } + mutex_unlock(&ipa_pm_ctx->client_mutex); + return 0; +} + +/** + * ipa_pm_set_throughput(): Adds/changes the throughput requirement to IPA PM + * to be used for clock scaling + * @hdl: index of the client in the array + * @throughput: the new throughput value to be set for that client + * + * Returns: 0 on success, negative on failure + */ +int ipa_pm_set_throughput(u32 hdl, int throughput) +{ + struct ipa_pm_client *client; + unsigned long flags; + + if (ipa_pm_ctx == NULL) { + IPA_PM_ERR("PM_ctx is null\n"); + return -EINVAL; + } + + if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL + || throughput < 0) { + IPA_PM_ERR("Invalid Params\n"); + return -EINVAL; + } + client = ipa_pm_ctx->clients[hdl]; + + mutex_lock(&ipa_pm_ctx->client_mutex); + if (client->group == IPA_PM_GROUP_DEFAULT) + IPA_PM_DBG_LOW("Old throughput: %d\n", client->throughput); + else + IPA_PM_DBG_LOW("old Group %d throughput: %d\n", + client->group, ipa_pm_ctx->group_tput[client->group]); + + if (client->group == IPA_PM_GROUP_DEFAULT) + client->throughput = throughput; + else + ipa_pm_ctx->group_tput[client->group] = throughput; + + if (client->group == IPA_PM_GROUP_DEFAULT) + IPA_PM_DBG_LOW("New throughput: %d\n", client->throughput); + else + IPA_PM_DBG_LOW("New Group %d throughput: %d\n", + client->group, ipa_pm_ctx->group_tput[client->group]); + mutex_unlock(&ipa_pm_ctx->client_mutex); + + spin_lock_irqsave(&client->state_lock, flags); + if (IPA_PM_STATE_ACTIVE(client->state) || (client->group != + IPA_PM_GROUP_DEFAULT)) { + spin_unlock_irqrestore(&client->state_lock, flags); + do_clk_scaling(); + return 0; + } + spin_unlock_irqrestore(&client->state_lock, flags); + + return 0; +} + +/** + * ipa_pm_stat() - print PM stat + * @buf: [in] The user buff used to print + * @size: [in] The size of buf + * Returns: number of bytes used on success, negative on failure + * + * This function is called by ipa_debugfs in order to receive + * a picture of the clients in the PM and the throughput, threshold and cur vote + */ +int ipa_pm_stat(char *buf, int size) +{ + struct ipa_pm_client *client; + struct clk_scaling_db *clk = &ipa_pm_ctx->clk_scaling; + int i, j, tput, cnt = 0, result = 0; + unsigned long flags; + + if (!buf || size < 0) + return -EINVAL; + + mutex_lock(&ipa_pm_ctx->client_mutex); + + result = scnprintf(buf + cnt, size - cnt, "\n\nCurrent threshold: ["); + cnt += result; + + for (i = 0; i < clk->threshold_size; i++) { + result = scnprintf(buf + cnt, size - cnt, + "%d, ", clk->current_threshold[i]); + cnt += result; + } + + result = scnprintf(buf + cnt, size - cnt, "\b\b]\n"); + cnt += result; + + result = scnprintf(buf + cnt, size - cnt, + "Aggregated tput: %d, Cur vote: %d", + ipa_pm_ctx->aggregated_tput, clk->cur_vote); + cnt += result; + + result = scnprintf(buf + cnt, size - cnt, "\n\nRegistered Clients:\n"); + cnt += result; + + + for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) { + client = ipa_pm_ctx->clients[i]; + + if (client == NULL) + continue; + + spin_lock_irqsave(&client->state_lock, flags); + if (client->group == IPA_PM_GROUP_DEFAULT) + tput = client->throughput; + else + tput = ipa_pm_ctx->group_tput[client->group]; + + result = scnprintf(buf + cnt, size - cnt, + "Client[%d]: %s State:%s\nGroup: %s Throughput: %d Pipes: ", + i, client->name, client_state_to_str[client->state], + ipa_pm_group_to_str[client->group], tput); + cnt += result; + + for (j = 0; j < IPA3_MAX_NUM_PIPES; j++) { + if (ipa_pm_ctx->clients_by_pipe[j] == client) { + result = scnprintf(buf + cnt, size - cnt, + "%d, ", j); + cnt += result; + } + } + + result = scnprintf(buf + cnt, size - cnt, "\b\b\n\n"); + cnt += result; + spin_unlock_irqrestore(&client->state_lock, flags); + } + mutex_unlock(&ipa_pm_ctx->client_mutex); + + return cnt; +} + +/** + * ipa_pm_exceptions_stat() - print PM exceptions stat + * @buf: [in] The user buff used to print + * @size: [in] The size of buf + * Returns: number of bytes used on success, negative on failure + * + * This function is called by ipa_debugfs in order to receive + * a full picture of the exceptions in the PM + */ +int ipa_pm_exceptions_stat(char *buf, int size) +{ + int i, j, cnt = 0, result = 0; + struct ipa_pm_exception_list *exception; + + if (!buf || size < 0) + return -EINVAL; + + result = scnprintf(buf + cnt, size - cnt, "\n"); + cnt += result; + + mutex_lock(&ipa_pm_ctx->client_mutex); + for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) { + exception = &ipa_pm_ctx->clk_scaling.exception_list[i]; + if (exception == NULL) { + result = scnprintf(buf + cnt, size - cnt, + "Exception %d is NULL\n\n", i); + cnt += result; + continue; + } + + result = scnprintf(buf + cnt, size - cnt, + "Exception %d: %s\nPending: %d Bitmask: %d Threshold: [" + , i, exception->clients, exception->pending, + exception->bitmask); + cnt += result; + for (j = 0; j < ipa_pm_ctx->clk_scaling.threshold_size; j++) { + result = scnprintf(buf + cnt, size - cnt, + "%d, ", exception->threshold[j]); + cnt += result; + } + result = scnprintf(buf + cnt, size - cnt, "\b\b]\n\n"); + cnt += result; + } + mutex_unlock(&ipa_pm_ctx->client_mutex); + + return cnt; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h new file mode 100644 index 000000000000..65261a1347ce --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_PM_H_ +#define _IPA_PM_H_ + +#include + +/* internal to ipa */ +#define IPA_PM_MAX_CLIENTS 32 /* actual max is value -1 since we start from 1*/ +#define IPA_PM_MAX_EX_CL 64 +#define IPA_PM_THRESHOLD_MAX 5 +#define IPA_PM_EXCEPTION_MAX 2 +#define IPA_PM_DEFERRED_TIMEOUT 10 + +/* + * ipa_pm group names + * + * Default stands for individual clients while other groups share one throughput + * Some groups also have special flags like modem which do not vote for clock + * but is accounted for in clock scaling while activated + */ +enum ipa_pm_group { + IPA_PM_GROUP_DEFAULT, + IPA_PM_GROUP_APPS, + IPA_PM_GROUP_MODEM, + IPA_PM_GROUP_MAX, +}; + +/* + * ipa_pm_cb_event + * + * specifies what kind of callback is being called. + * IPA_PM_CLIENT_ACTIVATED: the client has completed asynchronous activation + * IPA_PM_REQUEST_WAKEUP: wake up the client after it has been suspended + */ +enum ipa_pm_cb_event { + IPA_PM_CLIENT_ACTIVATED, + IPA_PM_REQUEST_WAKEUP, + IPA_PM_CB_EVENT_MAX, +}; + +/* + * struct ipa_pm_exception - clients included in exception and its threshold + * @usecase: comma separated client names + * @threshold: the threshold values for the exception + */ +struct ipa_pm_exception { + const char *usecase; + int threshold[IPA_PM_THRESHOLD_MAX]; +}; + +/* + * struct ipa_pm_init_params - parameters needed for initializng the pm + * @default_threshold: the thresholds used if no exception passes + * @threshold_size: size of the threshold + * @exceptions: list of exceptions for the pm + * @exception_size: size of the exception_list + */ +struct ipa_pm_init_params { + int default_threshold[IPA_PM_THRESHOLD_MAX]; + int threshold_size; + struct ipa_pm_exception exceptions[IPA_PM_EXCEPTION_MAX]; + int exception_size; +}; + +/* + * struct ipa_pm_register_params - parameters needed to register a client + * @name: name of the client + * @callback: pointer to the client's callback function + * @user_data: pointer to the client's callback parameters + * @group: group number of the client + * @skip_clk_vote: 0 if client votes for clock when activated, 1 if no vote + */ +struct ipa_pm_register_params { + const char *name; + void (*callback)(void *user_data, enum ipa_pm_cb_event); + void *user_data; + enum ipa_pm_group group; + bool skip_clk_vote; +}; + +#ifdef CONFIG_IPA3 + +int ipa_pm_register(struct ipa_pm_register_params *params, u32 *hdl); +int ipa_pm_associate_ipa_cons_to_client(u32 hdl, enum ipa_client_type consumer); +int ipa_pm_activate(u32 hdl); +int ipa_pm_activate_sync(u32 hdl); +int ipa_pm_deferred_deactivate(u32 hdl); +int ipa_pm_deactivate_sync(u32 hdl); +int ipa_pm_set_throughput(u32 hdl, int throughput); +int ipa_pm_deregister(u32 hdl); + +/* IPA Internal Functions */ +int ipa_pm_init(struct ipa_pm_init_params *params); +int ipa_pm_destroy(void); +int ipa_pm_handle_suspend(u32 pipe_bitmask); +int ipa_pm_deactivate_all_deferred(void); +int ipa_pm_stat(char *buf, int size); +int ipa_pm_exceptions_stat(char *buf, int size); + +#else + +static inline int ipa_pm_register( + struct ipa_pm_register_params *params, u32 *hdl) +{ + return -EPERM; +} + +static inline int ipa_pm_associate_ipa_cons_to_client( + u32 hdl, enum ipa_client_type consumer) +{ + return -EPERM; +} + +static inline int ipa_pm_activate(u32 hdl) +{ + return -EPERM; +} + +static inline int ipa_pm_activate_sync(u32 hdl) +{ + return -EPERM; +} + +static inline int ipa_pm_deferred_deactivate(u32 hdl) +{ + return -EPERM; +} + +static inline int ipa_pm_deactivate_sync(u32 hdl) +{ + return -EPERM; +} + +static inline int ipa_pm_set_throughput(u32 hdl, int throughput) +{ + return -EPERM; +} + +static inline int ipa_pm_deregister(u32 hdl) +{ + return -EPERM; +} + +/* IPA Internal Functions */ +static inline int ipa_pm_init(struct ipa_pm_init_params *params) +{ + return -EPERM; +} + +static inline int ipa_pm_destroy(void) +{ + return -EPERM; +} + +static inline int ipa_pm_handle_suspend(u32 pipe_bitmask) +{ + return -EPERM; +} + +static inline int ipa_pm_deactivate_all_deferred(void) +{ + return -EPERM; +} + +static inline int ipa_pm_stat(char *buf, int size) +{ + return -EPERM; +} + +static inline int ipa_pm_exceptions_stat(char *buf, int size) +{ + return -EPERM; +} +#endif + +#endif /* _IPA_PM_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c new file mode 100644 index 000000000000..5ad222661423 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c @@ -0,0 +1,1781 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipa_qmi_service.h" +#include "ipa_mhi_proxy.h" + +#define IPA_Q6_SVC_VERS 1 +#define IPA_A5_SVC_VERS 1 +#define Q6_QMI_COMPLETION_TIMEOUT (60*HZ) + +#define IPA_A5_SERVICE_SVC_ID 0x31 +#define IPA_A5_SERVICE_INS_ID 1 +#define IPA_Q6_SERVICE_SVC_ID 0x31 +#define IPA_Q6_SERVICE_INS_ID 2 + +#define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000 +#define QMI_SEND_REQ_TIMEOUT_MS 60000 +#define QMI_MHI_SEND_REQ_TIMEOUT_MS 1000 + +#define QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS 1000 + +static struct qmi_handle *ipa3_svc_handle; +static struct workqueue_struct *ipa_clnt_req_workqueue; +static bool ipa3_qmi_modem_init_fin, ipa3_qmi_indication_fin; +static struct work_struct ipa3_qmi_service_init_work; +static uint32_t ipa_wan_platform; +struct ipa3_qmi_context *ipa3_qmi_ctx; +static bool workqueues_stopped; +static bool ipa3_modem_init_cmplt; +static bool first_time_handshake; +struct mutex ipa3_qmi_lock; +struct ipa_msg_desc { + uint16_t msg_id; + int max_msg_len; + struct qmi_elem_info *ei_array; +}; + +/* QMI A5 service */ + +static void ipa3_handle_indication_req(struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) +{ + struct ipa_indication_reg_req_msg_v01 *indication_req; + struct ipa_indication_reg_resp_msg_v01 resp; + struct ipa_master_driver_init_complt_ind_msg_v01 ind; + int rc; + + indication_req = (struct ipa_indication_reg_req_msg_v01 *)decoded_msg; + IPAWANDBG("Received INDICATION Request\n"); + + /* cache the client sq */ + memcpy(&ipa3_qmi_ctx->client_sq, sq, sizeof(*sq)); + + memset(&resp, 0, sizeof(struct ipa_indication_reg_resp_msg_v01)); + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_INDICATION_REGISTER_RESP_V01, + QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01, + ipa3_indication_reg_resp_msg_data_v01_ei, + &resp); + + if (rc < 0) { + IPAWANERR("send response for Indication register failed\n"); + return; + } + + ipa3_qmi_indication_fin = true; + + /* check if need sending indication to modem */ + if (ipa3_qmi_modem_init_fin) { + IPAWANDBG("send indication to modem (%d)\n", + ipa3_qmi_modem_init_fin); + memset(&ind, 0, sizeof(struct + ipa_master_driver_init_complt_ind_msg_v01)); + ind.master_driver_init_status.result = + IPA_QMI_RESULT_SUCCESS_V01; + + rc = qmi_send_indication(qmi_handle, + &(ipa3_qmi_ctx->client_sq), + QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01, + QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01, + ipa3_master_driver_init_complt_ind_msg_data_v01_ei, + &ind); + + if (rc < 0) { + IPAWANERR("send indication failed\n"); + ipa3_qmi_indication_fin = false; + } + } else { + IPAWANERR("not send indication\n"); + } +} + +static void ipa3_handle_install_filter_rule_req(struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) +{ + struct ipa_install_fltr_rule_req_msg_v01 *rule_req; + struct ipa_install_fltr_rule_resp_msg_v01 resp; + uint32_t rule_hdl[MAX_NUM_Q6_RULE]; + int rc = 0, i; + + rule_req = (struct ipa_install_fltr_rule_req_msg_v01 *)decoded_msg; + memset(rule_hdl, 0, sizeof(rule_hdl)); + memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01)); + IPAWANDBG("Received install filter Request\n"); + + rc = ipa3_copy_ul_filter_rule_to_ipa((struct + ipa_install_fltr_rule_req_msg_v01*)decoded_msg); + + if (rc) { + IPAWANERR("copy UL rules from modem is failed\n"); + return; + } + + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + if (rule_req->filter_spec_ex_list_valid == true) { + resp.rule_id_valid = 1; + if (rule_req->filter_spec_ex_list_len > MAX_NUM_Q6_RULE) { + resp.rule_id_len = MAX_NUM_Q6_RULE; + IPAWANERR("installed (%d) max Q6-UL rules ", + MAX_NUM_Q6_RULE); + IPAWANERR("but modem gives total (%u)\n", + rule_req->filter_spec_ex_list_len); + } else { + resp.rule_id_len = + rule_req->filter_spec_ex_list_len; + } + } else { + resp.rule_id_valid = 0; + resp.rule_id_len = 0; + } + + /* construct UL filter rules response to Modem*/ + for (i = 0; i < resp.rule_id_len; i++) { + resp.rule_id[i] = + rule_req->filter_spec_ex_list[i].rule_id; + } + + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_INSTALL_FILTER_RULE_RESP_V01, + QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01, + ipa3_install_fltr_rule_resp_msg_data_v01_ei, + &resp); + + if (rc < 0) + IPAWANERR("install filter rules failed\n"); + else + IPAWANDBG("Replied to install filter request\n"); +} + +static void ipa3_handle_filter_installed_notify_req( + struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) +{ + struct ipa_fltr_installed_notif_resp_msg_v01 resp; + int rc = 0; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + IPAWANDBG("Received filter_install_notify Request\n"); + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01, + QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01, + ipa3_fltr_installed_notif_resp_msg_data_v01_ei, + &resp); + + if (rc < 0) + IPAWANERR("handle filter rules failed\n"); + else + IPAWANDBG("Responsed filter_install_notify Request\n"); +} + +static void handle_ipa_config_req(struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) +{ + struct ipa_config_resp_msg_v01 resp; + int rc; + + memset(&resp, 0, sizeof(struct ipa_config_resp_msg_v01)); + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + IPAWANDBG("Received IPA CONFIG Request\n"); + rc = ipa_mhi_handle_ipa_config_req( + (struct ipa_config_req_msg_v01 *)decoded_msg); + if (rc) { + IPAERR("ipa3_mhi_handle_ipa_config_req failed %d\n", rc); + resp.resp.result = IPA_QMI_RESULT_FAILURE_V01; + } + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_CONFIG_RESP_V01, + QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01, + ipa3_config_resp_msg_data_v01_ei, + &resp); + + if (rc < 0) + IPAWANERR("QMI_IPA_CONFIG_RESP_V01 failed\n"); + else + IPAWANDBG("Responsed QMI_IPA_CONFIG_RESP_V01\n"); +} + +static void ipa3_handle_modem_init_cmplt_req(struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) +{ + struct ipa_init_modem_driver_cmplt_req_msg_v01 *cmplt_req; + struct ipa_init_modem_driver_cmplt_resp_msg_v01 resp; + int rc; + + IPAWANDBG("Received QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01\n"); + cmplt_req = (struct ipa_init_modem_driver_cmplt_req_msg_v01 *) + decoded_msg; + + if (!ipa3_modem_init_cmplt) { + ipa3_modem_init_cmplt = true; + if (ipa3_qmi_modem_init_fin) { + IPAWANDBG("load uc related registers (%d)\n", + ipa3_qmi_modem_init_fin); + ipa3_uc_load_notify(); + } + } + + memset(&resp, 0, sizeof(resp)); + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01, + QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_MAX_MSG_LEN_V01, + ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei, + &resp); + + + if (rc < 0) + IPAWANERR("QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01 failed\n"); + else + IPAWANDBG("Sent QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01\n"); +} + +static void ipa3_handle_mhi_alloc_channel_req(struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) +{ + struct ipa_mhi_alloc_channel_req_msg_v01 *ch_alloc_req; + struct ipa_mhi_alloc_channel_resp_msg_v01 *resp; + int rc; + + IPAWANDBG("Received QMI_IPA_MHI_ALLOC_CHANNEL_REQ_V01\n"); + ch_alloc_req = (struct ipa_mhi_alloc_channel_req_msg_v01 *)decoded_msg; + + resp = imp_handle_allocate_channel_req(ch_alloc_req); + + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_MHI_ALLOC_CHANNEL_RESP_V01, + IPA_MHI_ALLOC_CHANNEL_RESP_MSG_V01_MAX_MSG_LEN, + ipa_mhi_alloc_channel_resp_msg_v01_ei, + resp); + + if (rc < 0) + IPAWANERR("QMI_IPA_MHI_ALLOC_CHANNEL_RESP_V01 failed\n"); + else + IPAWANDBG("Sent QMI_IPA_MHI_ALLOC_CHANNEL_RESP_V01\n"); +} + +static void ipa3_handle_mhi_vote_req(struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) +{ + struct ipa_mhi_clk_vote_req_msg_v01 *vote_req; + struct ipa_mhi_clk_vote_resp_msg_v01 resp; + int rc; + + vote_req = (struct ipa_mhi_clk_vote_req_msg_v01 *)decoded_msg; + IPAWANDBG("Received QMI_IPA_MHI_CLK_VOTE_REQ_V01(%d)\n", + vote_req->mhi_vote); + rc = imp_handle_vote_req(vote_req->mhi_vote); + if (rc) { + resp.resp.result = IPA_QMI_RESULT_FAILURE_V01; + resp.resp.error = IPA_QMI_ERR_INTERNAL_V01; + } else { + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + } + IPAWANDBG("start sending QMI_IPA_MHI_CLK_VOTE_RESP_V01\n"); + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_MHI_CLK_VOTE_RESP_V01, + IPA_MHI_CLK_VOTE_RESP_MSG_V01_MAX_MSG_LEN, + ipa_mhi_clk_vote_resp_msg_v01_ei, + &resp); + + if (rc < 0) + IPAWANERR("QMI_IPA_MHI_CLK_VOTE_RESP_V01 failed\n"); + else + IPAWANDBG("Finished senting QMI_IPA_MHI_CLK_VOTE_RESP_V01\n"); +} + +static void ipa3_a5_svc_disconnect_cb(struct qmi_handle *qmi, + unsigned int node, unsigned int port) +{ + IPAWANDBG("Received QMI client disconnect\n"); +} + +/****************************************************/ +/* QMI A5 client ->Q6 */ +/****************************************************/ +static void ipa3_q6_clnt_svc_arrive(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa3_work_svc_arrive, ipa3_q6_clnt_svc_arrive); +static void ipa3_q6_clnt_svc_exit(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa3_work_svc_exit, ipa3_q6_clnt_svc_exit); +/* Test client port for IPC Router */ +static struct qmi_handle *ipa_q6_clnt; + +static int ipa3_check_qmi_response(int rc, + int req_id, + enum ipa_qmi_result_type_v01 result, + enum ipa_qmi_error_type_v01 error, + char *resp_type) +{ + if (rc < 0) { + if (rc == -ETIMEDOUT && ipa3_rmnet_ctx.ipa_rmnet_ssr) { + IPAWANERR( + "Timeout for qmi request id %d\n", req_id); + return rc; + } + if ((rc == -ENETRESET) || (rc == -ENODEV)) { + IPAWANERR( + "SSR while waiting for qmi request id %d\n", req_id); + return rc; + } + IPAWANERR("Error sending qmi request id %d, rc = %d\n", + req_id, rc); + return rc; + } + if (result != IPA_QMI_RESULT_SUCCESS_V01 && + ipa3_rmnet_ctx.ipa_rmnet_ssr) { + IPAWANERR( + "Got bad response %d from request id %d (error %d)\n", + req_id, result, error); + return result; + } + IPAWANDBG_LOW("Received %s successfully\n", resp_type); + return 0; +} + +static int ipa3_qmi_send_req_wait(struct qmi_handle *client_handle, + struct ipa_msg_desc *req_desc, void *req, + struct ipa_msg_desc *resp_desc, void *resp, + unsigned long timeout_ms) +{ + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(client_handle, &txn, resp_desc->ei_array, resp); + + if (ret < 0) { + IPAWANERR("QMI txn init failed, ret= %d\n", ret); + return ret; + } + + ret = qmi_send_request(client_handle, + &ipa3_qmi_ctx->server_sq, + &txn, + req_desc->msg_id, + req_desc->max_msg_len, + req_desc->ei_array, + req); + + if (ret < 0) { + qmi_txn_cancel(&txn); + return ret; + } + ret = qmi_txn_wait(&txn, msecs_to_jiffies(timeout_ms)); + + return ret; +} + +static int ipa3_qmi_init_modem_send_sync_msg(void) +{ + struct ipa_init_modem_driver_req_msg_v01 req; + struct ipa_init_modem_driver_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc; + u16 smem_restr_bytes = ipa3_get_smem_restr_bytes(); + int wan_cons_ep; + + memset(&req, 0, sizeof(struct ipa_init_modem_driver_req_msg_v01)); + memset(&resp, 0, sizeof(struct ipa_init_modem_driver_resp_msg_v01)); + + req.platform_type_valid = true; + req.platform_type = ipa_wan_platform; + + req.hdr_tbl_info_valid = (IPA_MEM_PART(modem_hdr_size) != 0); + req.hdr_tbl_info.modem_offset_start = + IPA_MEM_PART(modem_hdr_ofst) + smem_restr_bytes; + req.hdr_tbl_info.modem_offset_end = IPA_MEM_PART(modem_hdr_ofst) + + smem_restr_bytes + IPA_MEM_PART(modem_hdr_size) - 1; + + req.v4_route_tbl_info_valid = true; + req.v4_route_tbl_info.route_tbl_start_addr = + IPA_MEM_PART(v4_rt_nhash_ofst) + smem_restr_bytes; + req.v4_route_tbl_info.num_indices = + IPA_MEM_PART(v4_modem_rt_index_hi); + req.v6_route_tbl_info_valid = true; + + req.v6_route_tbl_info.route_tbl_start_addr = + IPA_MEM_PART(v6_rt_nhash_ofst) + smem_restr_bytes; + req.v6_route_tbl_info.num_indices = + IPA_MEM_PART(v6_modem_rt_index_hi); + + req.v4_filter_tbl_start_addr_valid = true; + req.v4_filter_tbl_start_addr = + IPA_MEM_PART(v4_flt_nhash_ofst) + smem_restr_bytes; + + req.v6_filter_tbl_start_addr_valid = true; + req.v6_filter_tbl_start_addr = + IPA_MEM_PART(v6_flt_nhash_ofst) + smem_restr_bytes; + + req.modem_mem_info_valid = (IPA_MEM_PART(modem_size) != 0); + req.modem_mem_info.block_start_addr = + IPA_MEM_PART(modem_ofst) + smem_restr_bytes; + req.modem_mem_info.size = IPA_MEM_PART(modem_size); + + wan_cons_ep = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + if (wan_cons_ep == IPA_EP_NOT_ALLOCATED) { + IPAWANDBG("APPS_WAN_CONS is not valid\n"); + req.ctrl_comm_dest_end_pt_valid = false; + req.ctrl_comm_dest_end_pt = 0; + } else { + req.ctrl_comm_dest_end_pt_valid = true; + req.ctrl_comm_dest_end_pt = + ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + } + + req.hdr_proc_ctx_tbl_info_valid = + (IPA_MEM_PART(modem_hdr_proc_ctx_size) != 0); + req.hdr_proc_ctx_tbl_info.modem_offset_start = + IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + smem_restr_bytes; + req.hdr_proc_ctx_tbl_info.modem_offset_end = + IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + + IPA_MEM_PART(modem_hdr_proc_ctx_size) + smem_restr_bytes - 1; + + req.zip_tbl_info_valid = (IPA_MEM_PART(modem_comp_decomp_size) != 0); + req.zip_tbl_info.modem_offset_start = + IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes; + req.zip_tbl_info.modem_offset_end = + IPA_MEM_PART(modem_comp_decomp_ofst) + + IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes - 1; + + /* if hashing not supported, Modem filter/routing hash + * tables should not fill with valid data. + */ + if (!ipa3_ctx->ipa_fltrt_not_hashable) { + req.v4_hash_route_tbl_info_valid = true; + req.v4_hash_route_tbl_info.route_tbl_start_addr = + IPA_MEM_PART(v4_rt_hash_ofst) + smem_restr_bytes; + req.v4_hash_route_tbl_info.num_indices = + IPA_MEM_PART(v4_modem_rt_index_hi); + + req.v6_hash_route_tbl_info_valid = true; + req.v6_hash_route_tbl_info.route_tbl_start_addr = + IPA_MEM_PART(v6_rt_hash_ofst) + smem_restr_bytes; + req.v6_hash_route_tbl_info.num_indices = + IPA_MEM_PART(v6_modem_rt_index_hi); + + req.v4_hash_filter_tbl_start_addr_valid = true; + req.v4_hash_filter_tbl_start_addr = + IPA_MEM_PART(v4_flt_hash_ofst) + smem_restr_bytes; + + req.v6_hash_filter_tbl_start_addr_valid = true; + req.v6_hash_filter_tbl_start_addr = + IPA_MEM_PART(v6_flt_hash_ofst) + smem_restr_bytes; + } + req.hw_stats_quota_base_addr_valid = true; + req.hw_stats_quota_base_addr = + IPA_MEM_PART(stats_quota_ofst) + smem_restr_bytes; + + req.hw_stats_quota_size_valid = true; + req.hw_stats_quota_size = IPA_MEM_PART(stats_quota_size); + + req.hw_drop_stats_base_addr_valid = true; + req.hw_drop_stats_base_addr = + IPA_MEM_PART(stats_drop_ofst) + smem_restr_bytes; + + req.hw_drop_stats_table_size_valid = true; + req.hw_drop_stats_table_size = IPA_MEM_PART(stats_drop_size); + + if (!ipa3_uc_loaded_check()) { /* First time boot */ + req.is_ssr_bootup_valid = false; + req.is_ssr_bootup = 0; + } else { /* After SSR boot */ + req.is_ssr_bootup_valid = true; + req.is_ssr_bootup = 1; + } + + IPAWANDBG("platform_type %d\n", req.platform_type); + IPAWANDBG("hdr_tbl_info.modem_offset_start %d\n", + req.hdr_tbl_info.modem_offset_start); + IPAWANDBG("hdr_tbl_info.modem_offset_end %d\n", + req.hdr_tbl_info.modem_offset_end); + IPAWANDBG("v4_route_tbl_info.route_tbl_start_addr %d\n", + req.v4_route_tbl_info.route_tbl_start_addr); + IPAWANDBG("v4_route_tbl_info.num_indices %d\n", + req.v4_route_tbl_info.num_indices); + IPAWANDBG("v6_route_tbl_info.route_tbl_start_addr %d\n", + req.v6_route_tbl_info.route_tbl_start_addr); + IPAWANDBG("v6_route_tbl_info.num_indices %d\n", + req.v6_route_tbl_info.num_indices); + IPAWANDBG("v4_filter_tbl_start_addr %d\n", + req.v4_filter_tbl_start_addr); + IPAWANDBG("v6_filter_tbl_start_addr %d\n", + req.v6_filter_tbl_start_addr); + IPAWANDBG("modem_mem_info.block_start_addr %d\n", + req.modem_mem_info.block_start_addr); + IPAWANDBG("modem_mem_info.size %d\n", + req.modem_mem_info.size); + IPAWANDBG("ctrl_comm_dest_end_pt %d\n", + req.ctrl_comm_dest_end_pt); + IPAWANDBG("is_ssr_bootup %d\n", + req.is_ssr_bootup); + IPAWANDBG("v4_hash_route_tbl_info.route_tbl_start_addr %d\n", + req.v4_hash_route_tbl_info.route_tbl_start_addr); + IPAWANDBG("v4_hash_route_tbl_info.num_indices %d\n", + req.v4_hash_route_tbl_info.num_indices); + IPAWANDBG("v6_hash_route_tbl_info.route_tbl_start_addr %d\n", + req.v6_hash_route_tbl_info.route_tbl_start_addr); + IPAWANDBG("v6_hash_route_tbl_info.num_indices %d\n", + req.v6_hash_route_tbl_info.num_indices); + IPAWANDBG("v4_hash_filter_tbl_start_addr %d\n", + req.v4_hash_filter_tbl_start_addr); + IPAWANDBG("v6_hash_filter_tbl_start_addr %d\n", + req.v6_hash_filter_tbl_start_addr); + + req_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_REQ_V01; + req_desc.ei_array = ipa3_init_modem_driver_req_msg_data_v01_ei; + + resp_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_RESP_V01; + resp_desc.ei_array = ipa3_init_modem_driver_resp_msg_data_v01_ei; + + pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, &req, + &resp_desc, &resp, + QMI_SEND_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, + rc); + return rc; + } + + pr_info("QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 response received\n"); + return ipa3_check_qmi_response(rc, + QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_init_modem_driver_resp_msg_v01"); +} + +/* sending filter-install-request to modem*/ +int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req) +{ + struct ipa_install_fltr_rule_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc; + int i; + + /* check if modem up */ + if (!ipa3_qmi_indication_fin || + !ipa3_qmi_modem_init_fin || + !ipa_q6_clnt) { + IPAWANDBG("modem QMI haven't up yet\n"); + return -EINVAL; + } + + /* check if the filter rules from IPACM is valid */ + if (req->filter_spec_list_len == 0) { + IPAWANDBG("IPACM pass zero rules to Q6\n"); + } else { + IPAWANDBG("IPACM pass %u rules to Q6\n", + req->filter_spec_list_len); + } + + if (req->filter_spec_list_len >= QMI_IPA_MAX_FILTERS_V01) { + IPAWANDBG( + "IPACM passes the number of filtering rules exceed limit\n"); + return -EINVAL; + } else if (req->source_pipe_index_valid != 0) { + IPAWANDBG( + "IPACM passes source_pipe_index_valid not zero 0 != %d\n", + req->source_pipe_index_valid); + return -EINVAL; + } else if (req->source_pipe_index >= ipa3_ctx->ipa_num_pipes) { + IPAWANDBG( + "IPACM passes source pipe index not valid ID = %d\n", + req->source_pipe_index); + return -EINVAL; + } + for (i = 0; i < req->filter_spec_list_len; i++) { + if ((req->filter_spec_list[i].ip_type != + QMI_IPA_IP_TYPE_V4_V01) && + (req->filter_spec_list[i].ip_type != + QMI_IPA_IP_TYPE_V6_V01)) + return -EINVAL; + if (req->filter_spec_list[i].is_mux_id_valid == false) + return -EINVAL; + if (req->filter_spec_list[i].is_routing_table_index_valid + == false) + return -EINVAL; + if ((req->filter_spec_list[i].filter_action <= + QMI_IPA_FILTER_ACTION_INVALID_V01) || + (req->filter_spec_list[i].filter_action > + QMI_IPA_FILTER_ACTION_EXCEPTION_V01)) + return -EINVAL; + } + + mutex_lock(&ipa3_qmi_lock); + if (ipa3_qmi_ctx != NULL) { + /* cache the qmi_filter_request */ + memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[ + ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg]), + req, + sizeof(struct ipa_install_fltr_rule_req_msg_v01)); + ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg++; + ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10; + } + mutex_unlock(&ipa3_qmi_lock); + + req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01; + req_desc.ei_array = ipa3_install_fltr_rule_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01; + resp_desc.ei_array = ipa3_install_fltr_rule_resp_msg_data_v01_ei; + + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, req, + &resp_desc, &resp, + QMI_SEND_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, + rc); + return rc; + } + + return ipa3_check_qmi_response(rc, + QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_install_filter"); +} + +/* sending filter-install-request to modem*/ +int ipa3_qmi_filter_request_ex_send( + struct ipa_install_fltr_rule_req_ex_msg_v01 *req) +{ + struct ipa_install_fltr_rule_resp_ex_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc; + int i; + + /* check if modem up */ + if (!ipa3_qmi_indication_fin || + !ipa3_qmi_modem_init_fin || + !ipa_q6_clnt) { + IPAWANDBG("modem QMI haven't up yet\n"); + return -EINVAL; + } + + /* check if the filter rules from IPACM is valid */ + if (req->filter_spec_ex_list_len == 0) { + IPAWANDBG("IPACM pass zero rules to Q6\n"); + } else { + IPAWANDBG("IPACM pass %u rules to Q6\n", + req->filter_spec_ex_list_len); + } + + if (req->filter_spec_ex_list_len >= QMI_IPA_MAX_FILTERS_EX_V01) { + IPAWANDBG( + "IPACM pass the number of filtering rules exceed limit\n"); + return -EINVAL; + } else if (req->source_pipe_index_valid != 0) { + IPAWANDBG( + "IPACM passes source_pipe_index_valid not zero 0 != %d\n", + req->source_pipe_index_valid); + return -EINVAL; + } + + for (i = 0; i < req->filter_spec_ex_list_len; i++) { + if ((req->filter_spec_ex_list[i].ip_type != + QMI_IPA_IP_TYPE_V4_V01) && + (req->filter_spec_ex_list[i].ip_type != + QMI_IPA_IP_TYPE_V6_V01)) + return -EINVAL; + if (req->filter_spec_ex_list[i].is_mux_id_valid == false) + return -EINVAL; + if (req->filter_spec_ex_list[i].is_routing_table_index_valid + == false) + return -EINVAL; + if ((req->filter_spec_ex_list[i].filter_action <= + QMI_IPA_FILTER_ACTION_INVALID_V01) || + (req->filter_spec_ex_list[i].filter_action > + QMI_IPA_FILTER_ACTION_EXCEPTION_V01)) + return -EINVAL; + } + mutex_lock(&ipa3_qmi_lock); + if (ipa3_qmi_ctx != NULL) { + /* cache the qmi_filter_request */ + memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_ex_msg_cache[ + ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg]), + req, + sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01)); + ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg++; + ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg %= 10; + } + mutex_unlock(&ipa3_qmi_lock); + + req_desc.max_msg_len = + QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01; + req_desc.ei_array = ipa3_install_fltr_rule_req_ex_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_ex_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_V01; + resp_desc.ei_array = ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei; + + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, req, + &resp_desc, &resp, + QMI_SEND_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01, + rc); + return rc; + } + + return ipa3_check_qmi_response(rc, + QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_install_filter"); +} + +/* sending ul-filter-install-request to modem*/ +int ipa3_qmi_ul_filter_request_send( + struct ipa_configure_ul_firewall_rules_req_msg_v01 *req) +{ + struct ipa_configure_ul_firewall_rules_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc; + + IPAWANDBG("IPACM pass %u rules to Q6\n", + req->firewall_rules_list_len); + + mutex_lock(&ipa3_qmi_lock); + if (ipa3_qmi_ctx != NULL) { + /* cache the qmi_filter_request */ + memcpy( + &(ipa3_qmi_ctx->ipa_configure_ul_firewall_rules_req_msg_cache[ + ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg]), + req, + sizeof(struct + ipa_configure_ul_firewall_rules_req_msg_v01)); + ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg++; + ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg %= + MAX_NUM_QMI_RULE_CACHE; + } + mutex_unlock(&ipa3_qmi_lock); + + req_desc.max_msg_len = + QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01; + req_desc.ei_array = + ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei; + + memset(&resp, 0, + sizeof(struct ipa_configure_ul_firewall_rules_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01; + resp_desc.ei_array = + ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei; + + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, req, + &resp_desc, &resp, + QMI_SEND_REQ_TIMEOUT_MS); + if (rc < 0) { + IPAWANERR("send Req %d failed, rc= %d\n", + QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01, + rc); + return rc; + } + + return ipa3_check_qmi_response(rc, + QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01, + resp.resp.result, + resp.resp.error, "ipa_received_ul_firewall_filter"); +} + +int ipa3_qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req) +{ + struct ipa_enable_force_clear_datapath_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc = 0; + + if (!req || !req->source_pipe_bitmask) { + IPAWANERR("invalid params\n"); + return -EINVAL; + } + + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + IPAWANDBG("Simulating success on emu/virt mode\n"); + return 0; + } + + req_desc.max_msg_len = + QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01; + req_desc.ei_array = + ipa3_enable_force_clear_datapath_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01; + resp_desc.ei_array = + ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei; + + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, req, + &resp_desc, &resp, + QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("send Req %d failed, rc= %d\n", + QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01, + rc); + return rc; + } + + if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) { + IPAWANERR("filter_notify failed %d\n", + resp.resp.result); + return resp.resp.result; + } + IPAWANDBG("SUCCESS\n"); + return rc; +} + +int ipa3_qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req) +{ + struct ipa_disable_force_clear_datapath_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc = 0; + + + if (!req) { + IPAWANERR("invalid params\n"); + return -EINVAL; + } + + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + IPAWANDBG("Simulating success on emu/virt mode\n"); + return 0; + } + + req_desc.max_msg_len = + QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01; + req_desc.ei_array = + ipa3_disable_force_clear_datapath_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01; + resp_desc.ei_array = + ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei; + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, req, + &resp_desc, &resp, + QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("send Req %d failed, rc= %d\n", + QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01, + rc); + return rc; + } + + if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) { + IPAWANERR("filter_notify failed %d\n", + resp.resp.result); + return resp.resp.result; + } + IPAWANDBG("SUCCESS\n"); + return rc; +} + +/* sending filter-installed-notify-request to modem*/ +int ipa3_qmi_filter_notify_send( + struct ipa_fltr_installed_notif_req_msg_v01 *req) +{ + struct ipa_fltr_installed_notif_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc = 0; + + /* check if the filter rules from IPACM is valid */ + if (req->rule_id_len == 0) { + IPAWANDBG(" delete UL filter rule for pipe %d\n", + req->source_pipe_index); + } else if (req->rule_id_len > QMI_IPA_MAX_FILTERS_V01) { + IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n", + req->source_pipe_index, + req->rule_id_len); + return -EINVAL; + } + + if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) { + IPAWANERR(" UL filter rule for pipe %d install_status = %d\n", + req->source_pipe_index, req->install_status); + return -EINVAL; + } else if (req->rule_id_valid != 1) { + IPAWANERR(" UL filter rule for pipe %d rule_id_valid = %d\n", + req->source_pipe_index, req->rule_id_valid); + return -EINVAL; + } else if (req->source_pipe_index >= ipa3_ctx->ipa_num_pipes) { + IPAWANDBG( + "IPACM passes source pipe index not valid ID = %d\n", + req->source_pipe_index); + return -EINVAL; + } else if (((req->embedded_pipe_index_valid != true) || + (req->embedded_call_mux_id_valid != true)) && + ((req->embedded_pipe_index_valid != false) || + (req->embedded_call_mux_id_valid != false))) { + IPAWANERR( + "IPACM passes embedded pipe and mux valid not valid\n"); + return -EINVAL; + } else if (req->embedded_pipe_index >= ipa3_ctx->ipa_num_pipes) { + IPAWANERR("IPACM passes source pipe index not valid ID = %d\n", + req->source_pipe_index); + return -EINVAL; + } + + if (req->source_pipe_index == -1) { + IPAWANERR("Source pipe index invalid\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_qmi_lock); + if (ipa3_qmi_ctx != NULL) { + /* cache the qmi_filter_request */ + memcpy(&(ipa3_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[ + ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]), + req, + sizeof(struct ipa_fltr_installed_notif_req_msg_v01)); + ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++; + ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10; + } + mutex_unlock(&ipa3_qmi_lock); + + req_desc.max_msg_len = + QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01; + req_desc.ei_array = ipa3_fltr_installed_notif_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01; + resp_desc.ei_array = ipa3_fltr_installed_notif_resp_msg_data_v01_ei; + + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, req, + &resp_desc, &resp, + QMI_SEND_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("send Req %d failed, rc= %d\n", + QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, + rc); + return rc; + } + + return ipa3_check_qmi_response(rc, + QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_fltr_installed_notif_resp"); +} + +static void ipa3_q6_clnt_quota_reached_ind_cb(struct qmi_handle *handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *data) +{ + struct ipa_data_usage_quota_reached_ind_msg_v01 *qmi_ind; + + if (handle != ipa_q6_clnt) { + IPAWANERR("Wrong client\n"); + return; + } + + qmi_ind = (struct ipa_data_usage_quota_reached_ind_msg_v01 *) data; + + IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n", + qmi_ind->apn.mux_id, (unsigned long) qmi_ind->apn.num_Mbytes); + ipa3_broadcast_quota_reach_ind(qmi_ind->apn.mux_id, + IPA_UPSTEAM_MODEM); +} + +static void ipa3_q6_clnt_install_firewall_rules_ind_cb( + struct qmi_handle *handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *data) +{ + struct ipa_configure_ul_firewall_rules_ind_msg_v01 qmi_ul_firewall_ind; + + memset(&qmi_ul_firewall_ind, 0, sizeof( + struct ipa_configure_ul_firewall_rules_ind_msg_v01)); + memcpy(&qmi_ul_firewall_ind, data, sizeof( + struct ipa_configure_ul_firewall_rules_ind_msg_v01)); + + IPAWANDBG("UL firewall rules install indication on Q6"); + if (qmi_ul_firewall_ind.result.is_success == + QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01) { + IPAWANDBG(" : Success\n"); + IPAWANDBG + ("Mux ID : %d\n", qmi_ul_firewall_ind.result.mux_id); + } else if (qmi_ul_firewall_ind.result.is_success == + QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01) { + IPAWANERR(": Failure\n"); + } else { + IPAWANERR(": Unexpected Result"); + } +} + +static void ipa3_q6_clnt_svc_arrive(struct work_struct *work) +{ + int rc; + struct ipa_master_driver_init_complt_ind_msg_v01 ind; + + rc = kernel_connect(ipa_q6_clnt->sock, + (struct sockaddr *) &ipa3_qmi_ctx->server_sq, + sizeof(ipa3_qmi_ctx->server_sq), + 0); + + if (rc < 0) { + IPAWANERR("Couldnt connect Server\n"); + return; + } + + IPAWANDBG("Q6 QMI service available now\n"); + /* Initialize modem IPA-driver */ + IPAWANDBG("send ipa3_qmi_init_modem_send_sync_msg to modem\n"); + rc = ipa3_qmi_init_modem_send_sync_msg(); + if ((rc == -ENETRESET) || (rc == -ENODEV)) { + IPAWANERR( + "ipa3_qmi_init_modem_send_sync_msg failed due to SSR!\n"); + /* Cleanup will take place when ipa3_wwan_remove is called */ + vfree(ipa_q6_clnt); + ipa_q6_clnt = NULL; + return; + } + + if (rc != 0) { + IPAWANERR("ipa3_qmi_init_modem_send_sync_msg failed\n"); + /* + * Hardware not responding. + * This is a very unexpected scenario, which requires a kernel + * panic in order to force dumps for QMI/Q6 side analysis. + */ + ipa_assert(); + } + + ipa3_qmi_modem_init_fin = true; + + /* got modem_init_cmplt_req already, load uc-related register */ + if (ipa3_modem_init_cmplt) { + IPAWANDBG("load uc related registers (%d)\n", + ipa3_modem_init_cmplt); + ipa3_uc_load_notify(); + } + + /* In cold-bootup, first_time_handshake = false */ + ipa3_q6_handshake_complete(first_time_handshake); + first_time_handshake = true; + IPAWANDBG("complete, ipa3_qmi_modem_init_fin : %d\n", + ipa3_qmi_modem_init_fin); + + if (ipa3_qmi_indication_fin) { + IPAWANDBG("send indication to modem (%d)\n", + ipa3_qmi_indication_fin); + memset(&ind, 0, sizeof(struct + ipa_master_driver_init_complt_ind_msg_v01)); + ind.master_driver_init_status.result = + IPA_QMI_RESULT_SUCCESS_V01; + + rc = qmi_send_indication(ipa3_svc_handle, + &ipa3_qmi_ctx->client_sq, + QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01, + QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01, + ipa3_master_driver_init_complt_ind_msg_data_v01_ei, + &ind); + + IPAWANDBG("ipa_qmi_service_client good\n"); + } else { + IPAWANERR("not send indication (%d)\n", + ipa3_qmi_indication_fin); + } +} + +static void ipa3_q6_clnt_svc_exit(struct work_struct *work) +{ + if (ipa3_qmi_ctx != NULL) { + ipa3_qmi_ctx->server_sq.sq_family = 0; + ipa3_qmi_ctx->server_sq.sq_node = 0; + ipa3_qmi_ctx->server_sq.sq_port = 0; + } +} + +static int ipa3_q6_clnt_svc_event_notify_svc_new(struct qmi_handle *qmi, + struct qmi_service *service) +{ + IPAWANDBG("QMI svc:%d vers:%d ins:%d node:%d port:%d\n", + service->service, service->version, service->instance, + service->node, service->port); + + if (ipa3_qmi_ctx != NULL) { + ipa3_qmi_ctx->server_sq.sq_family = AF_QIPCRTR; + ipa3_qmi_ctx->server_sq.sq_node = service->node; + ipa3_qmi_ctx->server_sq.sq_port = service->port; + } + if (!workqueues_stopped) { + queue_delayed_work(ipa_clnt_req_workqueue, + &ipa3_work_svc_arrive, 0); + } + return 0; +} + +static void ipa3_q6_clnt_svc_event_notify_net_reset(struct qmi_handle *qmi) +{ + if (!workqueues_stopped) + queue_delayed_work(ipa_clnt_req_workqueue, + &ipa3_work_svc_exit, 0); +} + +static void ipa3_q6_clnt_svc_event_notify_svc_exit(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + IPAWANDBG("QMI svc:%d vers:%d ins:%d node:%d port:%d\n", svc->service, + svc->version, svc->instance, svc->node, svc->port); + + if (!workqueues_stopped) + queue_delayed_work(ipa_clnt_req_workqueue, + &ipa3_work_svc_exit, 0); +} + +static struct qmi_ops server_ops = { + .del_client = ipa3_a5_svc_disconnect_cb, +}; + +static struct qmi_ops client_ops = { + .new_server = ipa3_q6_clnt_svc_event_notify_svc_new, + .del_server = ipa3_q6_clnt_svc_event_notify_svc_exit, + .net_reset = ipa3_q6_clnt_svc_event_notify_net_reset, +}; + +static struct qmi_msg_handler server_handlers[] = { + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_INDICATION_REGISTER_REQ_V01, + .ei = ipa3_indication_reg_req_msg_data_v01_ei, + .decoded_size = + QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01, + .fn = ipa3_handle_indication_req, + }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, + .ei = ipa3_install_fltr_rule_req_msg_data_v01_ei, + .decoded_size = + QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01, + .fn = ipa3_handle_install_filter_rule_req, + }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, + .ei = ipa3_fltr_installed_notif_req_msg_data_v01_ei, + .decoded_size = + QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01, + .fn = ipa3_handle_filter_installed_notify_req, + }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_CONFIG_REQ_V01, + .ei = ipa3_config_req_msg_data_v01_ei, + .decoded_size = QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01, + .fn = handle_ipa_config_req, + }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01, + .ei = ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei, + .decoded_size = + QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01, + .fn = ipa3_handle_modem_init_cmplt_req, + }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01, + .ei = ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei, + .decoded_size = + QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01, + .fn = ipa3_handle_modem_init_cmplt_req, + }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_MHI_ALLOC_CHANNEL_REQ_V01, + .ei = ipa_mhi_alloc_channel_req_msg_v01_ei, + .decoded_size = + IPA_MHI_ALLOC_CHANNEL_REQ_MSG_V01_MAX_MSG_LEN, + .fn = ipa3_handle_mhi_alloc_channel_req, + }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_MHI_CLK_VOTE_REQ_V01, + .ei = ipa_mhi_clk_vote_req_msg_v01_ei, + .decoded_size = + IPA_MHI_CLK_VOTE_REQ_MSG_V01_MAX_MSG_LEN, + .fn = ipa3_handle_mhi_vote_req, + }, + +}; + +/* clinet_handlers are client callbacks that will be called from QMI context + * when an indication from Q6 server arrives. + * In our case, client_handlers needs handling only for QMI_INDICATION, + * since the QMI_REQUEST/ QMI_RESPONSE are handled in a blocking fashion + * at the time of sending QMI_REQUESTs. + */ +static struct qmi_msg_handler client_handlers[] = { + { + .type = QMI_INDICATION, + .msg_id = QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01, + .ei = ipa3_data_usage_quota_reached_ind_msg_data_v01_ei, + .decoded_size = + QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01, + .fn = ipa3_q6_clnt_quota_reached_ind_cb, + }, + { + .type = QMI_INDICATION, + .msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01, + .ei = ipa3_install_fltr_rule_req_msg_data_v01_ei, + .decoded_size = + QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01, + .fn = ipa3_q6_clnt_install_firewall_rules_ind_cb, + }, +}; + + +static void ipa3_qmi_service_init_worker(struct work_struct *work) +{ + int rc; + + /* Initialize QMI-service*/ + IPAWANDBG("IPA A7 QMI init OK :>>>>\n"); + + /* start the QMI msg cache */ + ipa3_qmi_ctx = vzalloc(sizeof(*ipa3_qmi_ctx)); + if (!ipa3_qmi_ctx) { + IPAWANERR("Failed to allocate the memory to ipa3_qmi_ctx\n"); + return; + } + + ipa3_qmi_ctx->modem_cfg_emb_pipe_flt = + ipa3_get_modem_cfg_emb_pipe_flt(); + + ipa3_svc_handle = vzalloc(sizeof(*ipa3_svc_handle)); + + if (!ipa3_svc_handle) + goto destroy_ipa_A7_svc_wq; + + rc = qmi_handle_init(ipa3_svc_handle, + QMI_IPA_MAX_MSG_LEN, + &server_ops, + server_handlers); + + if (rc < 0) { + IPAWANERR("Initializing ipa_a5 svc failed %d\n", rc); + goto destroy_qmi_handle; + } + + rc = qmi_add_server(ipa3_svc_handle, + IPA_A5_SERVICE_SVC_ID, + IPA_A5_SVC_VERS, + IPA_A5_SERVICE_INS_ID); + + if (rc < 0) { + IPAWANERR("Registering ipa_a5 svc failed %d\n", + rc); + goto deregister_qmi_srv; + } + + /* Initialize QMI-client */ + ipa_clnt_req_workqueue = create_singlethread_workqueue("clnt_req"); + if (!ipa_clnt_req_workqueue) { + IPAWANERR("Creating clnt_req workqueue failed\n"); + goto deregister_qmi_srv; + } + + /* Create a Local client port for QMI communication */ + ipa_q6_clnt = vzalloc(sizeof(*ipa_q6_clnt)); + + if (!ipa_q6_clnt) + goto destroy_clnt_req_wq; + + rc = qmi_handle_init(ipa_q6_clnt, + QMI_IPA_MAX_MSG_LEN, + &client_ops, + client_handlers); + + if (rc < 0) { + IPAWANERR("Creating clnt handle failed\n"); + goto destroy_qmi_client_handle; + } + + rc = qmi_add_lookup(ipa_q6_clnt, + IPA_Q6_SERVICE_SVC_ID, + IPA_Q6_SVC_VERS, + IPA_Q6_SERVICE_INS_ID); + + if (rc < 0) { + IPAWANERR("Adding Q6 Svc failed\n"); + goto deregister_qmi_client; + } + + /* get Q6 service and start send modem-initial to Q6 */ + IPAWANDBG("wait service available\n"); + return; + +deregister_qmi_client: + qmi_handle_release(ipa_q6_clnt); +destroy_qmi_client_handle: + vfree(ipa_q6_clnt); + ipa_q6_clnt = NULL; +destroy_clnt_req_wq: + destroy_workqueue(ipa_clnt_req_workqueue); + ipa_clnt_req_workqueue = NULL; +deregister_qmi_srv: + qmi_handle_release(ipa3_svc_handle); +destroy_qmi_handle: + vfree(ipa3_qmi_ctx); +destroy_ipa_A7_svc_wq: + vfree(ipa3_svc_handle); + ipa3_qmi_ctx = NULL; + ipa3_svc_handle = NULL; +} + +int ipa3_qmi_service_init(uint32_t wan_platform_type) +{ + ipa_wan_platform = wan_platform_type; + ipa3_qmi_modem_init_fin = false; + ipa3_qmi_indication_fin = false; + ipa3_modem_init_cmplt = false; + workqueues_stopped = false; + + if (!ipa3_svc_handle) { + INIT_WORK(&ipa3_qmi_service_init_work, + ipa3_qmi_service_init_worker); + schedule_work(&ipa3_qmi_service_init_work); + } + return 0; +} + +void ipa3_qmi_service_exit(void) +{ + + workqueues_stopped = true; + + /* qmi-service */ + if (ipa3_svc_handle != NULL) { + qmi_handle_release(ipa3_svc_handle); + vfree(ipa3_svc_handle); + ipa3_svc_handle = NULL; + } + + /* qmi-client */ + + /* Release client handle */ + if (ipa_q6_clnt != NULL) { + qmi_handle_release(ipa_q6_clnt); + vfree(ipa_q6_clnt); + ipa_q6_clnt = NULL; + if (ipa_clnt_req_workqueue) { + destroy_workqueue(ipa_clnt_req_workqueue); + ipa_clnt_req_workqueue = NULL; + } + } + + /* clean the QMI msg cache */ + mutex_lock(&ipa3_qmi_lock); + if (ipa3_qmi_ctx != NULL) { + vfree(ipa3_qmi_ctx); + ipa3_qmi_ctx = NULL; + } + mutex_unlock(&ipa3_qmi_lock); + + ipa3_qmi_modem_init_fin = false; + ipa3_qmi_indication_fin = false; + ipa3_modem_init_cmplt = false; +} + +void ipa3_qmi_stop_workqueues(void) +{ + IPAWANDBG("Stopping all QMI workqueues\n"); + + /* Stopping all workqueues so new work won't be scheduled */ + workqueues_stopped = true; + + /* Making sure that the current scheduled work won't be executed */ + cancel_delayed_work(&ipa3_work_svc_arrive); + cancel_delayed_work(&ipa3_work_svc_exit); +} + +/* voting for bus BW to ipa_rm*/ +int ipa3_vote_for_bus_bw(uint32_t *bw_mbps) +{ + int ret; + + if (bw_mbps == NULL) { + IPAWANERR("Bus BW is invalid\n"); + return -EINVAL; + } + + ret = ipa3_wwan_set_modem_perf_profile(*bw_mbps); + if (ret) + IPAWANERR("Failed to set perf profile to BW %u\n", + *bw_mbps); + else + IPAWANDBG("Succeeded to set perf profile to BW %u\n", + *bw_mbps); + + return ret; +} + +int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req, + struct ipa_get_data_stats_resp_msg_v01 *resp) +{ + struct ipa_msg_desc req_desc, resp_desc; + int rc; + + req_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_GET_DATA_STATS_REQ_V01; + req_desc.ei_array = ipa3_get_data_stats_req_msg_data_v01_ei; + + resp_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_GET_DATA_STATS_RESP_V01; + resp_desc.ei_array = ipa3_get_data_stats_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n"); + + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, req, + &resp_desc, resp, + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_GET_DATA_STATS_REQ_V01, + rc); + return rc; + } + + IPAWANDBG_LOW("QMI_IPA_GET_DATA_STATS_RESP_V01 received\n"); + + return ipa3_check_qmi_response(rc, + QMI_IPA_GET_DATA_STATS_REQ_V01, resp->resp.result, + resp->resp.error, "ipa_get_data_stats_resp_msg_v01"); +} + +int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req, + struct ipa_get_apn_data_stats_resp_msg_v01 *resp) +{ + struct ipa_msg_desc req_desc, resp_desc; + int rc; + + req_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_REQ_V01; + req_desc.ei_array = ipa3_get_apn_data_stats_req_msg_data_v01_ei; + + resp_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_RESP_V01; + resp_desc.ei_array = ipa3_get_apn_data_stats_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n"); + + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, req, + &resp_desc, resp, + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_GET_APN_DATA_STATS_REQ_V01, + rc); + return rc; + } + + IPAWANDBG_LOW("QMI_IPA_GET_APN_DATA_STATS_RESP_V01 received\n"); + + return ipa3_check_qmi_response(rc, + QMI_IPA_GET_APN_DATA_STATS_REQ_V01, resp->resp.result, + resp->resp.error, "ipa_get_apn_data_stats_req_msg_v01"); +} + +int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req) +{ + struct ipa_set_data_usage_quota_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc; + + memset(&resp, 0, sizeof(struct ipa_set_data_usage_quota_resp_msg_v01)); + + req_desc.max_msg_len = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01; + req_desc.ei_array = ipa3_set_data_usage_quota_req_msg_data_v01_ei; + + resp_desc.max_msg_len = + QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01; + resp_desc.ei_array = ipa3_set_data_usage_quota_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, req, + &resp_desc, &resp, + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01, + rc); + return rc; + } + + IPAWANDBG_LOW("QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 received\n"); + + return ipa3_check_qmi_response(rc, + QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_set_data_usage_quota_req_msg_v01"); +} + +int ipa3_qmi_stop_data_qouta(void) +{ + struct ipa_stop_data_usage_quota_req_msg_v01 req; + struct ipa_stop_data_usage_quota_resp_msg_v01 resp; + struct ipa_msg_desc req_desc, resp_desc; + int rc; + + memset(&req, 0, sizeof(struct ipa_stop_data_usage_quota_req_msg_v01)); + memset(&resp, 0, sizeof(struct ipa_stop_data_usage_quota_resp_msg_v01)); + + req_desc.max_msg_len = + QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01; + req_desc.ei_array = ipa3_stop_data_usage_quota_req_msg_data_v01_ei; + + resp_desc.max_msg_len = + QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01; + resp_desc.ei_array = ipa3_stop_data_usage_quota_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, &req, + &resp_desc, &resp, + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01, + rc); + return rc; + } + + IPAWANDBG_LOW("QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 received\n"); + + return ipa3_check_qmi_response(rc, + QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01"); +} + +int ipa3_qmi_enable_per_client_stats( + struct ipa_enable_per_client_stats_req_msg_v01 *req, + struct ipa_enable_per_client_stats_resp_msg_v01 *resp) +{ + struct ipa_msg_desc req_desc, resp_desc; + int rc = 0; + + req_desc.max_msg_len = + QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = + QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01; + req_desc.ei_array = + ipa3_enable_per_client_stats_req_msg_data_v01_ei; + + resp_desc.max_msg_len = + QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = + QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01; + resp_desc.ei_array = + ipa3_enable_per_client_stats_resp_msg_data_v01_ei; + + IPAWANDBG("Sending QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01\n"); + + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, req, + &resp_desc, resp, + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("send Req %d failed, rc= %d\n", + QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01, + rc); + return rc; + } + + IPAWANDBG("QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 received\n"); + + return ipa3_check_qmi_response(rc, + QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01, resp->resp.result, + resp->resp.error, "ipa3_qmi_enable_per_client_stats"); +} + +int ipa3_qmi_get_per_client_packet_stats( + struct ipa_get_stats_per_client_req_msg_v01 *req, + struct ipa_get_stats_per_client_resp_msg_v01 *resp) +{ + struct ipa_msg_desc req_desc, resp_desc; + int rc; + + req_desc.max_msg_len = QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01; + req_desc.ei_array = ipa3_get_stats_per_client_req_msg_data_v01_ei; + + resp_desc.max_msg_len = + QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01; + resp_desc.ei_array = ipa3_get_stats_per_client_resp_msg_data_v01_ei; + + IPAWANDBG("Sending QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01\n"); + + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, req, + &resp_desc, resp, + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("send Req %d failed, rc= %d\n", + QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01, + rc); + return rc; + } + + IPAWANDBG("QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 received\n"); + + return ipa3_check_qmi_response(rc, + QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01, resp->resp.result, + resp->resp.error, + "struct ipa_get_stats_per_client_req_msg_v01"); +} + +int ipa3_qmi_send_mhi_ready_indication( + struct ipa_mhi_ready_indication_msg_v01 *req) +{ + IPAWANDBG("Sending QMI_IPA_MHI_READY_IND_V01\n"); + + if (unlikely(!ipa3_svc_handle)) + return -ETIMEDOUT; + + return qmi_send_indication(ipa3_svc_handle, + &ipa3_qmi_ctx->client_sq, + QMI_IPA_MHI_READY_IND_V01, + IPA_MHI_READY_INDICATION_MSG_V01_MAX_MSG_LEN, + ipa_mhi_ready_indication_msg_v01_ei, + req); +} + +int ipa3_qmi_send_mhi_cleanup_request(struct ipa_mhi_cleanup_req_msg_v01 *req) +{ + + struct ipa_msg_desc req_desc, resp_desc; + struct ipa_mhi_cleanup_resp_msg_v01 resp; + int rc; + + memset(&resp, 0, sizeof(resp)); + + IPAWANDBG("Sending QMI_IPA_MHI_CLEANUP_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + + req_desc.max_msg_len = IPA_MHI_CLK_VOTE_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_IPA_MHI_CLEANUP_REQ_V01; + req_desc.ei_array = ipa_mhi_cleanup_req_msg_v01_ei; + + resp_desc.max_msg_len = IPA_MHI_CLK_VOTE_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_IPA_MHI_CLEANUP_RESP_V01; + resp_desc.ei_array = ipa_mhi_cleanup_resp_msg_v01_ei; + + rc = ipa3_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, req, + &resp_desc, &resp, + QMI_MHI_SEND_REQ_TIMEOUT_MS); + + IPAWANDBG("QMI_IPA_MHI_CLEANUP_RESP_V01 received\n"); + + return ipa3_check_qmi_response(rc, + QMI_IPA_MHI_CLEANUP_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_mhi_cleanup_req_msg"); +} + +void ipa3_qmi_init(void) +{ + mutex_init(&ipa3_qmi_lock); +} + +void ipa3_qmi_cleanup(void) +{ + mutex_destroy(&ipa3_qmi_lock); +} + diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h new file mode 100644 index 000000000000..7f5ddf094f5c --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h @@ -0,0 +1,470 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef IPA_QMI_SERVICE_H +#define IPA_QMI_SERVICE_H + +#include +#include +#include +#include +#include "ipa_i.h" +#include + +/** + * name of the DL wwan default routing tables for v4 and v6 + */ +#define IPA_A7_QMAP_HDR_NAME "ipa_qmap_hdr" +#define IPA_DFLT_WAN_RT_TBL_NAME "ipa_dflt_wan_rt" +#define MAX_NUM_Q6_RULE 35 +#define MAX_NUM_QMI_RULE_CACHE 10 +#define DEV_NAME "ipa-wan" +#define SUBSYS_MODEM "modem" + +#define IPAWANDBG(fmt, args...) \ + do { \ + pr_debug(DEV_NAME " %s:%d " fmt, __func__,\ + __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + + +#define IPAWANDBG_LOW(fmt, args...) \ + do { \ + pr_debug(DEV_NAME " %s:%d " fmt, __func__,\ + __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAWANERR(fmt, args...) \ + do { \ + pr_err(DEV_NAME " %s:%d " fmt, __func__,\ + __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAWANERR_RL(fmt, args...) \ + do { \ + pr_err_ratelimited_ipa(DEV_NAME " %s:%d " fmt, __func__,\ + __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAWANINFO(fmt, args...) \ + do { \ + pr_info(DEV_NAME " %s:%d " fmt, __func__,\ + __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +extern struct ipa3_qmi_context *ipa3_qmi_ctx; + +struct ipa3_qmi_context { + struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE]; + u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE]; + int num_ipa_install_fltr_rule_req_msg; + struct ipa_install_fltr_rule_req_msg_v01 + ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE]; + int num_ipa_install_fltr_rule_req_ex_msg; + struct ipa_install_fltr_rule_req_ex_msg_v01 + ipa_install_fltr_rule_req_ex_msg_cache[MAX_NUM_QMI_RULE_CACHE]; + int num_ipa_fltr_installed_notif_req_msg; + struct ipa_fltr_installed_notif_req_msg_v01 + ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE]; + int num_ipa_configure_ul_firewall_rules_req_msg; + struct ipa_configure_ul_firewall_rules_req_msg_v01 + ipa_configure_ul_firewall_rules_req_msg_cache + [MAX_NUM_QMI_RULE_CACHE]; + bool modem_cfg_emb_pipe_flt; + struct sockaddr_qrtr client_sq; + struct sockaddr_qrtr server_sq; +}; + +struct ipa3_rmnet_mux_val { + uint32_t mux_id; + int8_t vchannel_name[IFNAMSIZ]; + bool mux_channel_set; + bool ul_flt_reg; + bool mux_hdr_set; + uint32_t hdr_hdl; +}; + +extern struct qmi_elem_info + ipa3_init_modem_driver_req_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_init_modem_driver_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_indication_reg_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_indication_reg_resp_msg_data_v01_ei[]; + +extern struct qmi_elem_info + ipa3_master_driver_init_complt_ind_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[]; + +extern struct qmi_elem_info + ipa3_fltr_installed_notif_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[]; + +extern struct qmi_elem_info ipa3_config_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_config_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_get_data_stats_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[]; + +extern struct qmi_elem_info + ipa3_set_data_usage_quota_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_stop_data_usage_quota_req_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_ul_firewall_rule_type_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_ul_firewall_config_result_type_data_v01_ei[]; +extern struct + qmi_elem_info ipa3_per_client_stats_info_type_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_enable_per_client_stats_req_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_enable_per_client_stats_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_get_stats_per_client_req_msg_data_v01_ei[]; + +extern struct qmi_elem_info + ipa3_get_stats_per_client_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[]; + +extern struct qmi_elem_info ipa_mhi_ready_indication_msg_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_mem_addr_info_type_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_tr_info_type_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_er_info_type_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_alloc_channel_req_msg_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_ch_alloc_resp_type_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_alloc_channel_resp_msg_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_clk_vote_req_msg_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_clk_vote_resp_msg_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_cleanup_req_msg_v01_ei[]; +extern struct qmi_elem_info ipa_mhi_cleanup_resp_msg_v01_ei[]; + +/** + * struct ipa3_rmnet_context - IPA rmnet context + * @ipa_rmnet_ssr: support modem SSR + * @polling_interval: Requested interval for polling tethered statistics + * @metered_mux_id: The mux ID on which quota has been set + */ +struct ipa3_rmnet_context { + bool ipa_rmnet_ssr; + u64 polling_interval; + u32 metered_mux_id; +}; + +extern struct ipa3_rmnet_context ipa3_rmnet_ctx; + +#ifdef CONFIG_RMNET_IPA3 + +int ipa3_qmi_service_init(uint32_t wan_platform_type); + +void ipa3_qmi_service_exit(void); + +/* sending filter-install-request to modem*/ +int ipa3_qmi_filter_request_send( + struct ipa_install_fltr_rule_req_msg_v01 *req); + +int ipa3_qmi_filter_request_ex_send( + struct ipa_install_fltr_rule_req_ex_msg_v01 *req); + +int ipa3_qmi_ul_filter_request_send( + struct ipa_configure_ul_firewall_rules_req_msg_v01 *req); + +/* sending filter-installed-notify-request to modem*/ +int ipa3_qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 + *req); + +/* voting for bus BW to ipa_rm*/ +int ipa3_vote_for_bus_bw(uint32_t *bw_mbps); + +int ipa3_qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req); + +int ipa3_qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req); + +int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01 + *rule_req); + +int ipa3_wwan_update_mux_channel_prop(void); + +int ipa3_wan_ioctl_init(void); + +void ipa3_wan_ioctl_stop_qmi_messages(void); + +void ipa3_wan_ioctl_enable_qmi_messages(void); + +void ipa3_wan_ioctl_deinit(void); + +void ipa3_qmi_stop_workqueues(void); + +int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats + *data); + +int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data); + +void ipa3_broadcast_quota_reach_ind(uint32_t mux_id, + enum ipa_upstream_type upstream_type); + +int rmnet_ipa3_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe + *data); + +int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data, + bool reset); + +int rmnet_ipa3_query_tethering_stats_all( + struct wan_ioctl_query_tether_stats_all *data); + +int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data); +int rmnet_ipa3_set_lan_client_info(struct wan_ioctl_lan_client_info *data); + +int rmnet_ipa3_clear_lan_client_info(struct wan_ioctl_lan_client_info *data); + +int rmnet_ipa3_send_lan_client_msg(struct wan_ioctl_send_lan_client_msg *data); + +int rmnet_ipa3_enable_per_client_stats(bool *data); + +int rmnet_ipa3_query_per_client_stats( + struct wan_ioctl_query_per_client_stats *data); + +int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req, + struct ipa_get_data_stats_resp_msg_v01 *resp); + +int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req, + struct ipa_get_apn_data_stats_resp_msg_v01 *resp); + +int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req); + +int ipa3_qmi_stop_data_qouta(void); + +void ipa3_q6_handshake_complete(bool ssr_bootup); + +int ipa3_wwan_set_modem_perf_profile(int throughput); + +int ipa3_wwan_set_modem_state(struct wan_ioctl_notify_wan_state *state); +int ipa3_qmi_enable_per_client_stats( + struct ipa_enable_per_client_stats_req_msg_v01 *req, + struct ipa_enable_per_client_stats_resp_msg_v01 *resp); + +int ipa3_qmi_get_per_client_packet_stats( + struct ipa_get_stats_per_client_req_msg_v01 *req, + struct ipa_get_stats_per_client_resp_msg_v01 *resp); + +int ipa3_qmi_send_mhi_ready_indication( + struct ipa_mhi_ready_indication_msg_v01 *req); + +int ipa3_qmi_send_mhi_cleanup_request(struct ipa_mhi_cleanup_req_msg_v01 *req); + +void ipa3_qmi_init(void); + +void ipa3_qmi_cleanup(void); + +#else /* CONFIG_RMNET_IPA3 */ + +static inline int ipa3_qmi_service_init(uint32_t wan_platform_type) +{ + return -EPERM; +} + +static inline void ipa3_qmi_service_exit(void) { } + +/* sending filter-install-request to modem*/ +static inline int ipa3_qmi_filter_request_send( + struct ipa_install_fltr_rule_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa3_qmi_ul_filter_request_send( + struct ipa_configure_ul_firewall_rules_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa3_qmi_filter_request_ex_send( + struct ipa_install_fltr_rule_req_ex_msg_v01 *req) +{ + return -EPERM; +} + +/* sending filter-installed-notify-request to modem*/ +static inline int ipa3_qmi_filter_notify_send( + struct ipa_fltr_installed_notif_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa3_qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa3_qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa3_copy_ul_filter_rule_to_ipa( + struct ipa_install_fltr_rule_req_msg_v01 *rule_req) +{ + return -EPERM; +} + +static inline int ipa3_wwan_update_mux_channel_prop(void) +{ + return -EPERM; +} + +static inline int ipa3_wan_ioctl_init(void) +{ + return -EPERM; +} + +static inline void ipa3_wan_ioctl_stop_qmi_messages(void) { } + +static inline void ipa3_wan_ioctl_enable_qmi_messages(void) { } + +static inline void ipa3_wan_ioctl_deinit(void) { } + +static inline void ipa3_qmi_stop_workqueues(void) { } + +static inline int ipa3_vote_for_bus_bw(uint32_t *bw_mbps) +{ + return -EPERM; +} + +static inline int rmnet_ipa3_poll_tethering_stats( + struct wan_ioctl_poll_tethering_stats *data) +{ + return -EPERM; +} + +static inline int rmnet_ipa3_set_data_quota( + struct wan_ioctl_set_data_quota *data) +{ + return -EPERM; +} + +static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id, + enum ipa_upstream_type upstream_type) { } + +static inline int ipa3_qmi_get_data_stats( + struct ipa_get_data_stats_req_msg_v01 *req, + struct ipa_get_data_stats_resp_msg_v01 *resp) +{ + return -EPERM; +} + +static inline int ipa3_qmi_get_network_stats( + struct ipa_get_apn_data_stats_req_msg_v01 *req, + struct ipa_get_apn_data_stats_resp_msg_v01 *resp) +{ + return -EPERM; +} + +static inline int ipa3_qmi_set_data_quota( + struct ipa_set_data_usage_quota_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa3_qmi_stop_data_qouta(void) +{ + return -EPERM; +} + +static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { } + +static inline int ipa3_qmi_send_mhi_ready_indication( + struct ipa_mhi_ready_indication_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa3_qmi_send_mhi_cleanup_request( + struct ipa_mhi_cleanup_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa3_wwan_set_modem_perf_profile( + int throughput) +{ + return -EPERM; +} +static inline int ipa3_qmi_enable_per_client_stats( + struct ipa_enable_per_client_stats_req_msg_v01 *req, + struct ipa_enable_per_client_stats_resp_msg_v01 *resp) +{ + return -EPERM; +} + +static inline int ipa3_qmi_get_per_client_packet_stats( + struct ipa_get_stats_per_client_req_msg_v01 *req, + struct ipa_get_stats_per_client_resp_msg_v01 *resp) +{ + return -EPERM; +} + +static inline void ipa3_qmi_init(void) +{ + +} + +static inline void ipa3_qmi_cleanup(void) +{ + +} + +#endif /* CONFIG_RMNET_IPA3 */ + +#endif /* IPA_QMI_SERVICE_H */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c new file mode 100644 index 000000000000..6901c672954d --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c @@ -0,0 +1,4044 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#include + +#include + +#include "ipa_qmi_service.h" + +/* Type Definitions */ +static struct qmi_elem_info ipa3_hdr_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_hdr_tbl_info_type_v01, + modem_offset_start), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_hdr_tbl_info_type_v01, + modem_offset_end), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_route_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_route_tbl_info_type_v01, + route_tbl_start_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_route_tbl_info_type_v01, + num_indices), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_modem_mem_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_modem_mem_info_type_v01, + block_start_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_modem_mem_info_type_v01, + size), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_hdr_proc_ctx_tbl_info_type_v01, + modem_offset_start), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_hdr_proc_ctx_tbl_info_type_v01, + modem_offset_end), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_zip_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_zip_tbl_info_type_v01, + modem_offset_start), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_zip_tbl_info_type_v01, + modem_offset_end), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_ipfltr_range_eq_16_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_range_eq_16_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_range_eq_16_type_v01, + range_low), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_range_eq_16_type_v01, + range_high), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_ipfltr_mask_eq_32_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_32_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_32_type_v01, + mask), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_32_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_ipfltr_eq_16_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_eq_16_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_ipfltr_eq_16_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_ipfltr_eq_32_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_ipfltr_mask_eq_128_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_128_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 16, + .elem_size = sizeof(uint8_t), + .array_type = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_128_type_v01, + mask), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 16, + .elem_size = sizeof(uint8_t), + .array_type = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_128_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_filter_rule_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_type_v01, + rule_eq_bitmap), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_type_v01, + tos_eq_present), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + tos_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + protocol_eq_present), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + protocol_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_ihl_offset_range_16), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01, + .elem_size = sizeof( + struct ipa_ipfltr_range_eq_16_type_v01), + .array_type = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_range_16), + .ei_array = ipa3_ipfltr_range_eq_16_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_offset_meq_32), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01, + .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), + .array_type = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + offset_meq_32), + .ei_array = ipa3_ipfltr_mask_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + tc_eq_present), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + tc_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + flow_eq_present), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + flow_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_16_present), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_ipfltr_eq_16_type_v01), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_16), + .ei_array = ipa3_ipfltr_eq_16_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_32_present), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_ipfltr_eq_32_type_v01), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_32), + .ei_array = ipa3_ipfltr_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_ihl_offset_meq_32), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01, + .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), + .array_type = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_meq_32), + .ei_array = ipa3_ipfltr_mask_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_offset_meq_128), + }, + { + .data_type = QMI_STRUCT, + .elem_len = + QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01, + .elem_size = sizeof( + struct ipa_ipfltr_mask_eq_128_type_v01), + .array_type = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_type_v01, + offset_meq_128), + .ei_array = ipa3_ipfltr_mask_eq_128_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + metadata_meq32_present), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + metadata_meq32), + .ei_array = ipa3_ipfltr_mask_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ipv4_frag_eq_present), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_filter_spec_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + filter_spec_identifier), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + ip_type), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_filter_rule_type_v01), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + filter_rule), + .ei_array = ipa3_filter_rule_type_data_v01_ei, + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + filter_action), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + is_routing_table_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + route_table_index), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + is_mux_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + mux_id), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_filter_spec_ex_type_data_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + ip_type), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_filter_rule_type_v01), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + filter_rule), + .ei_array = ipa3_filter_rule_type_data_v01_ei, + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + filter_action), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + is_routing_table_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + route_table_index), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + is_mux_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + mux_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + rule_id), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + is_rule_hashable), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct +qmi_elem_info ipa3_filter_rule_identifier_to_handle_map_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_identifier_to_handle_map_v01, + filter_spec_identifier), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_identifier_to_handle_map_v01, + filter_handle), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_filter_handle_to_index_map_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_handle_to_index_map_v01, + filter_handle), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_handle_to_index_map_v01, + filter_index), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_init_modem_driver_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + platform_type_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + platform_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_hdr_tbl_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_tbl_info), + .ei_array = ipa3_hdr_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_route_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_route_tbl_info), + .ei_array = ipa3_route_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_route_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_route_tbl_info), + .ei_array = ipa3_route_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_filter_tbl_start_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_filter_tbl_start_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_filter_tbl_start_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_filter_tbl_start_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + modem_mem_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_modem_mem_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + modem_mem_info), + .ei_array = ipa3_modem_mem_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + ctrl_comm_dest_end_pt_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + ctrl_comm_dest_end_pt), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + is_ssr_bootup_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + is_ssr_bootup), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_proc_ctx_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof( + struct ipa_hdr_proc_ctx_tbl_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_proc_ctx_tbl_info), + .ei_array = ipa3_hdr_proc_ctx_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + zip_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_zip_tbl_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + zip_tbl_info), + .ei_array = ipa3_zip_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_hash_route_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_hash_route_tbl_info), + .ei_array = ipa3_route_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_hash_route_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_hash_route_tbl_info), + .ei_array = ipa3_route_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_hash_filter_tbl_start_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_hash_filter_tbl_start_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1E, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_hash_filter_tbl_start_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1E, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_hash_filter_tbl_start_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1F, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hw_stats_quota_base_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1F, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hw_stats_quota_base_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x20, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hw_stats_quota_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x20, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hw_stats_quota_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x21, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hw_drop_stats_base_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x21, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hw_drop_stats_base_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x22, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hw_drop_stats_table_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x22, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hw_drop_stats_table_size), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, + +}; + +struct qmi_elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + ctrl_comm_dest_end_pt_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + ctrl_comm_dest_end_pt), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + default_end_pt_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + default_end_pt), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + modem_driver_init_pending_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + modem_driver_init_pending), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_init_modem_driver_cmplt_req_msg_v01, + status), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_init_modem_driver_cmplt_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_indication_reg_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + master_driver_init_complete_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + master_driver_init_complete), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + data_usage_quota_reached_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + data_usage_quota_reached), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + ipa_mhi_ready_ind_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + ipa_mhi_ready_ind), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_indication_reg_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_indication_reg_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + ipa_master_driver_init_complt_ind_msg_v01, + master_driver_init_status), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(struct ipa_filter_spec_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_list), + .ei_array = ipa_filter_spec_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + source_pipe_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + source_pipe_index), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv4_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv4_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv6_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv6_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + xlat_filter_indices_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + xlat_filter_indices_list_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(uint32_t), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + xlat_filter_indices_list), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_ex_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_ex_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(struct ipa_filter_spec_ex_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_ex_list), + .ei_array = ipa_filter_spec_ex_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + filter_handle_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + filter_handle_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof( + struct ipa_filter_rule_identifier_to_handle_map_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + filter_handle_list), + .ei_array = + ipa3_filter_rule_identifier_to_handle_map_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + rule_id_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + rule_id_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(uint32_t), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + rule_id), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + source_pipe_index), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + install_status), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + filter_index_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof( + struct ipa_filter_handle_to_index_map_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x03, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + filter_index_list), + .ei_array = ipa3_filter_handle_to_index_map_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_pipe_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_pipe_index), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + retain_header_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + retain_header), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_call_mux_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_call_mux_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv4_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv4_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv6_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv6_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv4_filter_idx_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv4_filter_idx), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv6_filter_idx_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv6_filter_idx), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(uint32_t), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_CLIENT_DST_PIPES_V01, + .elem_size = sizeof(uint32_t), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_fltr_installed_notif_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + source_pipe_bitmask), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + request_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + throttle_source_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + throttle_source), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_disable_force_clear_datapath_req_msg_v01, + request_id), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info + ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_disable_force_clear_datapath_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_config_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_type_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_deaggr_supported_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_deaggr_supported), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_config_req_msg_v01, + max_aggr_frame_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_config_req_msg_v01, + max_aggr_frame_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ipa_ingress_pipe_mode_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ipa_ingress_pipe_mode), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_speed_info_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_speed_info), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_time_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_time_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_pkt_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_pkt_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_byte_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_byte_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_accumulation_time_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_accumulation_time_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_control_flags_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_control_flags), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_msi_event_threshold_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_msi_event_threshold), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_msi_event_threshold_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_msi_event_threshold), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_fifo_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_fifo_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_fifo_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_fifo_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1E, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_buf_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1E, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_buf_size), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_config_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_config_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_get_data_stats_req_msg_data_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_get_data_stats_req_msg_v01, + ipa_stats_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_req_msg_v01, + reset_stats_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_req_msg_v01, + reset_stats), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_pipe_stats_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + pipe_index), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv4_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv4_bytes), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv6_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv6_bytes), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_stats_type_filter_rule_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_stats_type_filter_rule_v01, + filter_rule_index), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_stats_type_filter_rule_v01, + num_packets), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ipa_stats_type_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ipa_stats_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ul_src_pipe_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ul_src_pipe_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_PIPES_V01, + .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ul_src_pipe_stats_list), + .ei_array = ipa3_pipe_stats_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_dst_pipe_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_dst_pipe_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_PIPES_V01, + .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_dst_pipe_stats_list), + .ei_array = ipa3_pipe_stats_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_filter_rule_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_filter_rule_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_filter_rule_stats_list), + .ei_array = ipa3_stats_type_filter_rule_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_apn_data_stats_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + mux_id), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_ul_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_ul_bytes), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_dl_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_dl_bytes), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_req_msg_v01, + mux_id_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_req_msg_v01, + mux_id_list_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_APN_V01, + .elem_size = sizeof(uint32_t), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_req_msg_v01, + mux_id_list), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + apn_data_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + apn_data_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_APN_V01, + .elem_size = sizeof(struct + ipa_apn_data_stats_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + apn_data_stats_list), + .ei_array = ipa3_apn_data_stats_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_data_usage_quota_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_data_usage_quota_info_type_v01, + mux_id), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_data_usage_quota_info_type_v01, + num_Mbytes), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_set_data_usage_quota_req_msg_v01, + apn_quota_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_set_data_usage_quota_req_msg_v01, + apn_quota_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_APN_V01, + .elem_size = sizeof(struct + ipa_data_usage_quota_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_set_data_usage_quota_req_msg_v01, + apn_quota_list), + .ei_array = ipa3_data_usage_quota_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_set_data_usage_quota_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct + ipa_data_usage_quota_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_data_usage_quota_reached_ind_msg_v01, + apn), + .ei_array = ipa3_data_usage_quota_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[] = { + /* ipa_stop_data_usage_quota_req_msg is empty */ + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_stop_data_usage_quota_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + filter_spec_ex_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + filter_spec_ex_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_EX_V01, + .elem_size = sizeof(struct + ipa_filter_spec_ex_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + filter_spec_ex_list), + .ei_array = ipa_filter_spec_ex_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + source_pipe_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + source_pipe_index), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + num_ipv4_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + num_ipv4_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + num_ipv6_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + num_ipv6_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + xlat_filter_indices_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + xlat_filter_indices_list_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_EX_V01, + .elem_size = sizeof(uint32_t), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + xlat_filter_indices_list), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_ex_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_ex_msg_v01, + rule_id_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_ex_msg_v01, + rule_id_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_EX_V01, + .elem_size = sizeof(uint32_t), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_ex_msg_v01, + rule_id), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_per_client_stats_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + client_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + src_pipe_id), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_ul_ipv4_bytes), + + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_ul_ipv6_bytes), + + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_dl_ipv4_bytes), + + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_dl_ipv6_bytes), + + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_ul_ipv4_pkts), + + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_ul_ipv6_pkts), + + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_dl_ipv4_pkts), + + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_dl_ipv6_pkts), + + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_ul_firewall_rule_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ul_firewall_rule_type_v01, + ip_type), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_filter_rule_type_v01), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_ul_firewall_rule_type_v01, + filter_rule), + .ei_array = ipa3_filter_rule_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_ul_firewall_config_result_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ul_firewall_config_result_type_v01, + is_success), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ul_firewall_config_result_type_v01, + mux_id), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_enable_per_client_stats_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + ipa_enable_per_client_stats_req_msg_v01, + enable_per_client_stats), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_enable_per_client_stats_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_enable_per_client_stats_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_get_stats_per_client_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_get_stats_per_client_req_msg_v01, + client_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_get_stats_per_client_req_msg_v01, + src_pipe_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_stats_per_client_req_msg_v01, + reset_stats_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_stats_per_client_req_msg_v01, + reset_stats), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_get_stats_per_client_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_get_stats_per_client_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_stats_per_client_resp_msg_v01, + per_client_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_stats_per_client_resp_msg_v01, + per_client_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_PER_CLIENTS_V01, + .elem_size = + sizeof(struct ipa_per_client_stats_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_stats_per_client_resp_msg_v01, + per_client_stats_list), + .ei_array = + ipa3_per_client_stats_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x1, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_req_msg_v01, + firewall_rules_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_UL_FIREWALL_RULES_V01, + .elem_size = sizeof(struct ipa_ul_firewall_rule_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x1, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_req_msg_v01, + firewall_rules_list), + .ei_array = + ipa3_ul_firewall_rule_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x2, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_req_msg_v01, + mux_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_req_msg_v01, + disable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_req_msg_v01, + disable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_req_msg_v01, + are_blacklist_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_req_msg_v01, + are_blacklist_filters), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof( + struct ipa_ul_firewall_config_result_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_ind_msg_v01, + result), + .ei_array = + ipa3_ul_firewall_config_result_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_mhi_ch_init_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_ch_init_info_type_v01, + ch_id), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_ch_init_info_type_v01, + er_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_ch_init_info_type_v01, + ch_doorbell_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_ch_init_info_type_v01, + er_doorbell_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_ch_init_info_type_v01, + direction_type), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_mhi_smmu_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_smmu_info_type_v01, + iova_ctl_base_addr), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_smmu_info_type_v01, + iova_ctl_size), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_smmu_info_type_v01, + iova_data_base_addr), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_smmu_info_type_v01, + iova_data_size), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + + +struct qmi_elem_info ipa_mhi_ready_indication_msg_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct ipa_mhi_ready_indication_msg_v01, + ch_info_arr_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01, + .elem_size = sizeof(struct ipa_mhi_ch_init_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct ipa_mhi_ready_indication_msg_v01, + ch_info_arr), + .ei_array = ipa_mhi_ch_init_info_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ipa_mhi_ready_indication_msg_v01, + smmu_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_mhi_smmu_info_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ipa_mhi_ready_indication_msg_v01, + smmu_info), + .ei_array = ipa_mhi_smmu_info_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_mem_addr_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_mem_addr_info_type_v01, + pa), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_mem_addr_info_type_v01, + iova), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_mem_addr_info_type_v01, + size), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_tr_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_tr_info_type_v01, + ch_id), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_tr_info_type_v01, + poll_cfg), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum ipa_mhi_brst_mode_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_tr_info_type_v01, + brst_mode_type), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_tr_info_type_v01, + ring_iova), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_tr_info_type_v01, + ring_len), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_tr_info_type_v01, + rp), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_tr_info_type_v01, + wp), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_er_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + er_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + intmod_cycles), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + intmod_count), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + msi_addr), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + ring_iova), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + ring_len), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + rp), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_er_info_type_v01, + wp), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_alloc_channel_req_msg_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + tr_info_arr_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01, + .elem_size = sizeof(struct ipa_mhi_tr_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + tr_info_arr), + .ei_array = ipa_mhi_tr_info_type_v01_ei, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + er_info_arr_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01, + .elem_size = sizeof(struct ipa_mhi_er_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + er_info_arr), + .ei_array = ipa_mhi_er_info_type_v01_ei, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + ctrl_addr_map_info_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_REMOTE_MHI_MEMORY_MAPPING_NUM_MAX_V01, + .elem_size = sizeof(struct ipa_mhi_mem_addr_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + ctrl_addr_map_info), + .ei_array = ipa_mhi_mem_addr_info_type_v01_ei, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x04, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + data_addr_map_info_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_REMOTE_MHI_MEMORY_MAPPING_NUM_MAX_V01, + .elem_size = sizeof(struct ipa_mhi_mem_addr_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x04, + .offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01, + data_addr_map_info), + .ei_array = ipa_mhi_mem_addr_info_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_ch_alloc_resp_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_ch_alloc_resp_type_v01, + ch_id), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct ipa_mhi_ch_alloc_resp_type_v01, + is_success), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_alloc_channel_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct ipa_mhi_alloc_channel_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ipa_mhi_alloc_channel_resp_msg_v01, + alloc_resp_arr_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ipa_mhi_alloc_channel_resp_msg_v01, + alloc_resp_arr_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01, + .elem_size = sizeof(struct ipa_mhi_ch_alloc_resp_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ipa_mhi_alloc_channel_resp_msg_v01, + alloc_resp_arr), + .ei_array = ipa_mhi_ch_alloc_resp_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_clk_vote_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct ipa_mhi_clk_vote_req_msg_v01, + mhi_vote), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_clk_vote_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct ipa_mhi_clk_vote_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_cleanup_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ipa_mhi_cleanup_req_msg_v01, + cleanup_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ipa_mhi_cleanup_req_msg_v01, + cleanup), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa_mhi_cleanup_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct ipa_mhi_cleanup_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c new file mode 100644 index 000000000000..c893be77cc70 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c @@ -0,0 +1,2082 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include "ipa_i.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_fltrt.h" + +#define IPA_RT_INDEX_BITMAP_SIZE (32) +#define IPA_RT_STATUS_OF_ADD_FAILED (-1) +#define IPA_RT_STATUS_OF_DEL_FAILED (-1) +#define IPA_RT_STATUS_OF_MDFY_FAILED (-1) + +#define IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC 5 + +#define IPA_RT_GET_RULE_TYPE(__entry) \ + ( \ + ((__entry)->rule.hashable) ? \ + (IPA_RULE_HASHABLE) : (IPA_RULE_NON_HASHABLE) \ + ) + +/** + * ipa_generate_rt_hw_rule() - Generated the RT H/W single rule + * This func will do the preparation core driver work and then calls + * the HAL layer for the real work. + * @ip: the ip address family type + * @entry: routing entry + * @buf: output buffer, buf == NULL means + * caller wants to know the size of the rule as seen + * by HW so they did not pass a valid buffer, we will use a + * scratch buffer instead. + * With this scheme we are going to + * generate the rule twice, once to know size using scratch + * buffer and second to write the rule to the actual caller + * supplied buffer which is of required size + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + */ +static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip, + struct ipa3_rt_entry *entry, u8 *buf) +{ + struct ipahal_rt_rule_gen_params gen_params; + struct ipa3_hdr_entry *hdr_entry; + struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry; + int res = 0; + + memset(&gen_params, 0, sizeof(gen_params)); + + if (entry->rule.hashable && + entry->rule.attrib.attrib_mask & IPA_FLT_IS_PURE_ACK) { + IPAERR_RL("PURE_ACK rule atrb used with hash rule\n"); + WARN_ON_RATELIMIT_IPA(1); + return -EPERM; + } + + gen_params.ipt = ip; + gen_params.dst_pipe_idx = ipa3_get_ep_mapping(entry->rule.dst); + if (gen_params.dst_pipe_idx == -1) { + IPAERR_RL("Wrong destination pipe specified in RT rule\n"); + WARN_ON_RATELIMIT_IPA(1); + return -EPERM; + } + if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) { + IPAERR_RL("No RT rule on IPA_client_producer pipe.\n"); + IPAERR_RL("pipe_idx: %d dst_pipe: %d\n", + gen_params.dst_pipe_idx, entry->rule.dst); + WARN_ON_RATELIMIT_IPA(1); + return -EPERM; + } + + /* Adding check to confirm still + * header entry present in header table or not + */ + + if (entry->hdr) { + hdr_entry = ipa3_id_find(entry->rule.hdr_hdl); + if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("Header entry already deleted\n"); + return -EPERM; + } + } else if (entry->proc_ctx) { + hdr_proc_entry = ipa3_id_find(entry->rule.hdr_proc_ctx_hdl); + if (!hdr_proc_entry || + hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) { + IPAERR_RL("Proc header entry already deleted\n"); + return -EPERM; + } + } + + if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) { + struct ipa3_hdr_proc_ctx_entry *proc_ctx; + + proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx; + if ((proc_ctx == NULL) || + (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) { + gen_params.hdr_type = IPAHAL_RT_RULE_HDR_NONE; + gen_params.hdr_ofst = 0; + } else { + gen_params.hdr_lcl = ipa3_ctx->hdr_proc_ctx_tbl_lcl; + gen_params.hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX; + gen_params.hdr_ofst = proc_ctx->offset_entry->offset + + ipa3_ctx->hdr_proc_ctx_tbl.start_offset; + } + } else if ((entry->hdr != NULL) && + (entry->hdr->cookie == IPA_HDR_COOKIE)) { + gen_params.hdr_lcl = ipa3_ctx->hdr_tbl_lcl; + gen_params.hdr_type = IPAHAL_RT_RULE_HDR_RAW; + gen_params.hdr_ofst = entry->hdr->offset_entry->offset; + } else { + gen_params.hdr_type = IPAHAL_RT_RULE_HDR_NONE; + gen_params.hdr_ofst = 0; + } + + gen_params.priority = entry->prio; + gen_params.id = entry->rule_id; + gen_params.rule = (const struct ipa_rt_rule *)&entry->rule; + + res = ipahal_rt_generate_hw_rule(&gen_params, &entry->hw_len, buf); + if (res) + IPAERR("failed to generate rt h/w rule\n"); + + return res; +} + +/** + * ipa_translate_rt_tbl_to_hw_fmt() - translate the routing driver structures + * (rules and tables) to HW format and fill it in the given buffers + * @ip: the ip address family type + * @rlt: the type of the rules to translate (hashable or non-hashable) + * @base: the rules body buffer to be filled + * @hdr: the rules header (addresses/offsets) buffer to be filled + * @body_ofst: the offset of the rules body from the rules header at + * ipa sram (for local body usage) + * @apps_start_idx: the first rt table index of apps tables + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip, + enum ipa_rule_type rlt, u8 *base, u8 *hdr, + u32 body_ofst, u32 apps_start_idx) +{ + struct ipa3_rt_tbl_set *set; + struct ipa3_rt_tbl *tbl; + struct ipa_mem_buffer tbl_mem; + u8 *tbl_mem_buf; + struct ipa3_rt_entry *entry; + int res; + u64 offset; + u8 *body_i; + + set = &ipa3_ctx->rt_tbl_set[ip]; + body_i = base; + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + if (tbl->sz[rlt] == 0) + continue; + if (tbl->in_sys[rlt]) { + /* only body (no header) */ + tbl_mem.size = tbl->sz[rlt] - + ipahal_get_hw_tbl_hdr_width(); + if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) { + IPAERR_RL("fail to alloc sys tbl of size %d\n", + tbl_mem.size); + goto err; + } + + if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base, + hdr, tbl->idx - apps_start_idx, true)) { + IPAERR_RL("fail to wrt sys tbl addr to hdr\n"); + goto hdr_update_fail; + } + + tbl_mem_buf = tbl_mem.base; + + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_rt_rule_list, + link) { + if (IPA_RT_GET_RULE_TYPE(entry) != rlt) + continue; + res = ipa_generate_rt_hw_rule(ip, entry, + tbl_mem_buf); + if (res) { + IPAERR_RL("failed to gen HW RT rule\n"); + goto hdr_update_fail; + } + tbl_mem_buf += entry->hw_len; + } + + if (tbl->curr_mem[rlt].phys_base) { + WARN_ON(tbl->prev_mem[rlt].phys_base); + tbl->prev_mem[rlt] = tbl->curr_mem[rlt]; + } + tbl->curr_mem[rlt] = tbl_mem; + } else { + offset = body_i - base + body_ofst; + + /* update the hdr at the right index */ + if (ipahal_fltrt_write_addr_to_hdr(offset, hdr, + tbl->idx - apps_start_idx, true)) { + IPAERR_RL("fail to wrt lcl tbl ofst to hdr\n"); + goto hdr_update_fail; + } + + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_rt_rule_list, + link) { + if (IPA_RT_GET_RULE_TYPE(entry) != rlt) + continue; + res = ipa_generate_rt_hw_rule(ip, entry, + body_i); + if (res) { + IPAERR_RL("failed to gen HW RT rule\n"); + goto err; + } + body_i += entry->hw_len; + } + + /** + * advance body_i to next table alignment as local + * tables + * are order back-to-back + */ + body_i += ipahal_get_lcl_tbl_addr_alignment(); + body_i = (u8 *)((long)body_i & + ~ipahal_get_lcl_tbl_addr_alignment()); + } + } + + return 0; + +hdr_update_fail: + ipahal_free_dma_mem(&tbl_mem); +err: + return -EPERM; +} + +static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip) +{ + struct ipa3_rt_tbl *tbl; + struct ipa3_rt_tbl *next; + struct ipa3_rt_tbl_set *set; + int i; + + set = &ipa3_ctx->rt_tbl_set[ip]; + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + for (i = 0; i < IPA_RULE_TYPE_MAX; i++) { + if (tbl->prev_mem[i].phys_base) { + IPADBG_LOW( + "reaping sys rt tbl name=%s ip=%d rlt=%d\n", + tbl->name, ip, i); + ipahal_free_dma_mem(&tbl->prev_mem[i]); + memset(&tbl->prev_mem[i], 0, + sizeof(tbl->prev_mem[i])); + } + } + } + + set = &ipa3_ctx->reap_rt_tbl_set[ip]; + list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) { + for (i = 0; i < IPA_RULE_TYPE_MAX; i++) { + WARN_ON(tbl->prev_mem[i].phys_base != 0); + if (tbl->curr_mem[i].phys_base) { + IPADBG_LOW( + "reaping sys rt tbl name=%s ip=%d rlt=%d\n", + tbl->name, ip, i); + ipahal_free_dma_mem(&tbl->curr_mem[i]); + } + } + list_del(&tbl->link); + kmem_cache_free(ipa3_ctx->rt_tbl_cache, tbl); + } +} + +/** + * ipa_prep_rt_tbl_for_cmt() - preparing the rt table for commit + * assign priorities to the rules, calculate their sizes and calculate + * the overall table size + * @ip: the ip address family type + * @tbl: the rt tbl to be prepared + * + * Return: 0 on success, negative on failure + */ +static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip, + struct ipa3_rt_tbl *tbl) +{ + struct ipa3_rt_entry *entry; + int prio_i; + int res; + int max_prio; + u32 hdr_width; + + tbl->sz[IPA_RULE_HASHABLE] = 0; + tbl->sz[IPA_RULE_NON_HASHABLE] = 0; + + max_prio = ipahal_get_rule_max_priority(); + + prio_i = max_prio; + list_for_each_entry(entry, &tbl->head_rt_rule_list, link) { + + if (entry->rule.max_prio) { + entry->prio = max_prio; + } else { + if (ipahal_rule_decrease_priority(&prio_i)) { + IPAERR("cannot rule decrease priority - %d\n", + prio_i); + return -EPERM; + } + entry->prio = prio_i; + } + + res = ipa_generate_rt_hw_rule(ip, entry, NULL); + if (res) { + IPAERR_RL("failed to calculate HW RT rule size\n"); + return -EPERM; + } + + IPADBG_LOW("RT rule id (handle) %d hw_len %u priority %u\n", + entry->id, entry->hw_len, entry->prio); + + if (entry->rule.hashable) + tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len; + else + tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len; + } + + if ((tbl->sz[IPA_RULE_HASHABLE] + + tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) { + WARN_ON_RATELIMIT_IPA(1); + IPAERR_RL("rt tbl %s is with zero total size\n", tbl->name); + } + + hdr_width = ipahal_get_hw_tbl_hdr_width(); + + if (tbl->sz[IPA_RULE_HASHABLE]) + tbl->sz[IPA_RULE_HASHABLE] += hdr_width; + if (tbl->sz[IPA_RULE_NON_HASHABLE]) + tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width; + + IPADBG("RT tbl index %u hash_sz %u non-hash sz %u\n", tbl->idx, + tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]); + + return 0; +} + +/** + * ipa_generate_rt_hw_tbl_img() - generates the rt hw tbls. + * headers and bodies (sys bodies) are being created into buffers that will + * be filled into the local memory (sram) + * @ip: the ip address family type + * @alloc_params: IN/OUT parameters to hold info regard the tables headers + * and bodies on DDR (DMA buffers), and needed info for the allocation + * that the HAL needs + * + * Return: 0 on success, negative on failure + */ +static int ipa_generate_rt_hw_tbl_img(enum ipa_ip_type ip, + struct ipahal_fltrt_alloc_imgs_params *alloc_params) +{ + u32 hash_bdy_start_ofst, nhash_bdy_start_ofst; + u32 apps_start_idx; + int rc = 0; + + if (ip == IPA_IP_v4) { + nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_rt_nhash_ofst) - + IPA_MEM_PART(v4_rt_nhash_ofst); + hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_rt_hash_ofst) - + IPA_MEM_PART(v4_rt_hash_ofst); + apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo); + } else { + nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_rt_nhash_ofst) - + IPA_MEM_PART(v6_rt_nhash_ofst); + hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_rt_hash_ofst) - + IPA_MEM_PART(v6_rt_hash_ofst); + apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo); + } + + if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) { + IPAERR("fail to allocate RT HW TBL images. IP %d\n", ip); + rc = -ENOMEM; + goto allocate_fail; + } + + if (ipa_translate_rt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE, + alloc_params->hash_bdy.base, alloc_params->hash_hdr.base, + hash_bdy_start_ofst, apps_start_idx)) { + IPAERR("fail to translate hashable rt tbls to hw format\n"); + rc = -EPERM; + goto translate_fail; + } + if (ipa_translate_rt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE, + alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base, + nhash_bdy_start_ofst, apps_start_idx)) { + IPAERR("fail to translate non-hashable rt tbls to hw format\n"); + rc = -EPERM; + goto translate_fail; + } + + return rc; + +translate_fail: + if (alloc_params->hash_hdr.size) + ipahal_free_dma_mem(&alloc_params->hash_hdr); + ipahal_free_dma_mem(&alloc_params->nhash_hdr); + if (alloc_params->hash_bdy.size) + ipahal_free_dma_mem(&alloc_params->hash_bdy); + if (alloc_params->nhash_bdy.size) + ipahal_free_dma_mem(&alloc_params->nhash_bdy); +allocate_fail: + return rc; +} + +/** + * ipa_rt_valid_lcl_tbl_size() - validate if the space allocated for rt tbl + * bodies at the sram is enough for the commit + * @ipt: the ip address family type + * @rlt: the rule type (hashable or non-hashable) + * + * Return: true if enough space available or false in other cases + */ +static bool ipa_rt_valid_lcl_tbl_size(enum ipa_ip_type ipt, + enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy) +{ + u16 avail; + + if (ipt == IPA_IP_v4) + avail = (rlt == IPA_RULE_HASHABLE) ? + IPA_MEM_PART(apps_v4_rt_hash_size) : + IPA_MEM_PART(apps_v4_rt_nhash_size); + else + avail = (rlt == IPA_RULE_HASHABLE) ? + IPA_MEM_PART(apps_v6_rt_hash_size) : + IPA_MEM_PART(apps_v6_rt_nhash_size); + + if (bdy->size <= avail) + return true; + + IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n", + bdy->size, avail, ipt, rlt); + return false; +} + +/** + * __ipa_commit_rt_v3() - commit rt tables to the hw + * commit the headers and the bodies if are local with internal cache flushing + * @ipt: the ip address family type + * + * Return: 0 on success, negative on failure + */ +int __ipa_commit_rt_v3(enum ipa_ip_type ip) +{ + struct ipa3_desc desc[IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC]; + struct ipahal_imm_cmd_register_write reg_write_cmd = {0}; + struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0}; + struct ipahal_imm_cmd_pyld + *cmd_pyld[IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC]; + int num_cmd = 0; + struct ipahal_fltrt_alloc_imgs_params alloc_params; + u32 num_modem_rt_index; + int rc = 0; + u32 lcl_hash_hdr, lcl_nhash_hdr; + u32 lcl_hash_bdy, lcl_nhash_bdy; + bool lcl_hash, lcl_nhash; + struct ipahal_reg_fltrt_hash_flush flush; + struct ipahal_reg_valmask valmask; + int i; + struct ipa3_rt_tbl_set *set; + struct ipa3_rt_tbl *tbl; + u32 tbl_hdr_width; + + tbl_hdr_width = ipahal_get_hw_tbl_hdr_width(); + memset(desc, 0, sizeof(desc)); + memset(cmd_pyld, 0, sizeof(cmd_pyld)); + memset(&alloc_params, 0, sizeof(alloc_params)); + alloc_params.ipt = ip; + + if (ip == IPA_IP_v4) { + num_modem_rt_index = + IPA_MEM_PART(v4_modem_rt_index_hi) - + IPA_MEM_PART(v4_modem_rt_index_lo) + 1; + lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_hash_ofst) + + num_modem_rt_index * tbl_hdr_width; + lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_nhash_ofst) + + num_modem_rt_index * tbl_hdr_width; + lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v4_rt_hash_ofst); + lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v4_rt_nhash_ofst); + lcl_hash = ipa3_ctx->ip4_rt_tbl_hash_lcl; + lcl_nhash = ipa3_ctx->ip4_rt_tbl_nhash_lcl; + alloc_params.tbls_num = IPA_MEM_PART(v4_apps_rt_index_hi) - + IPA_MEM_PART(v4_apps_rt_index_lo) + 1; + } else { + num_modem_rt_index = + IPA_MEM_PART(v6_modem_rt_index_hi) - + IPA_MEM_PART(v6_modem_rt_index_lo) + 1; + lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_hash_ofst) + + num_modem_rt_index * tbl_hdr_width; + lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_nhash_ofst) + + num_modem_rt_index * tbl_hdr_width; + lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v6_rt_hash_ofst); + lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v6_rt_nhash_ofst); + lcl_hash = ipa3_ctx->ip6_rt_tbl_hash_lcl; + lcl_nhash = ipa3_ctx->ip6_rt_tbl_nhash_lcl; + alloc_params.tbls_num = IPA_MEM_PART(v6_apps_rt_index_hi) - + IPA_MEM_PART(v6_apps_rt_index_lo) + 1; + } + + if (!ipa3_ctx->rt_idx_bitmap[ip]) { + IPAERR("no rt tbls present\n"); + rc = -EPERM; + goto no_rt_tbls; + } + + set = &ipa3_ctx->rt_tbl_set[ip]; + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + if (ipa_prep_rt_tbl_for_cmt(ip, tbl)) { + rc = -EPERM; + goto no_rt_tbls; + } + if (!tbl->in_sys[IPA_RULE_HASHABLE] && + tbl->sz[IPA_RULE_HASHABLE]) { + alloc_params.num_lcl_hash_tbls++; + alloc_params.total_sz_lcl_hash_tbls += + tbl->sz[IPA_RULE_HASHABLE]; + alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width; + } + if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] && + tbl->sz[IPA_RULE_NON_HASHABLE]) { + alloc_params.num_lcl_nhash_tbls++; + alloc_params.total_sz_lcl_nhash_tbls += + tbl->sz[IPA_RULE_NON_HASHABLE]; + alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width; + } + } + + if (ipa_generate_rt_hw_tbl_img(ip, &alloc_params)) { + IPAERR("fail to generate RT HW TBL images. IP %d\n", ip); + rc = -EFAULT; + goto no_rt_tbls; + } + + if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE, + &alloc_params.hash_bdy)) { + rc = -EFAULT; + goto fail_size_valid; + } + if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE, + &alloc_params.nhash_bdy)) { + rc = -EFAULT; + goto fail_size_valid; + } + + /* + * SRAM memory not allocated to hash tables. Sending + * command to hash tables(filer/routing) operation not supported. + */ + if (!ipa3_ctx->ipa_fltrt_not_hashable) { + /* flushing ipa internal hashable rt rules cache */ + memset(&flush, 0, sizeof(flush)); + if (ip == IPA_IP_v4) + flush.v4_rt = true; + else + flush.v6_rt = true; + ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask); + reg_write_cmd.skip_pipeline_clear = false; + reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + reg_write_cmd.offset = ipahal_get_reg_ofst( + IPA_FILT_ROUT_HASH_FLUSH); + reg_write_cmd.value = valmask.val; + reg_write_cmd.value_mask = valmask.mask; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, ®_write_cmd, + false); + if (!cmd_pyld[num_cmd]) { + IPAERR( + "fail construct register_write imm cmd. IP %d\n", ip); + goto fail_size_valid; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + num_cmd++; + } + + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = alloc_params.nhash_hdr.size; + mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base; + mem_cmd.local_addr = lcl_nhash_hdr; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip); + goto fail_imm_cmd_construct; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + num_cmd++; + + /* + * SRAM memory not allocated to hash tables. Sending + * command to hash tables(filer/routing) operation not supported. + */ + if (!ipa3_ctx->ipa_fltrt_not_hashable) { + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = alloc_params.hash_hdr.size; + mem_cmd.system_addr = alloc_params.hash_hdr.phys_base; + mem_cmd.local_addr = lcl_hash_hdr; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR( + "fail construct dma_shared_mem imm cmd. IP %d\n", ip); + goto fail_imm_cmd_construct; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + num_cmd++; + } + + if (lcl_nhash) { + if (num_cmd >= IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC) { + IPAERR("number of commands is out of range: IP = %d\n", + ip); + rc = -ENOBUFS; + goto fail_imm_cmd_construct; + } + + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = alloc_params.nhash_bdy.size; + mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base; + mem_cmd.local_addr = lcl_nhash_bdy; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct dma_shared_mem cmd. IP %d\n", + ip); + goto fail_imm_cmd_construct; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + num_cmd++; + } + if (lcl_hash) { + if (num_cmd >= IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC) { + IPAERR("number of commands is out of range: IP = %d\n", + ip); + rc = -ENOBUFS; + goto fail_imm_cmd_construct; + } + + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = alloc_params.hash_bdy.size; + mem_cmd.system_addr = alloc_params.hash_bdy.phys_base; + mem_cmd.local_addr = lcl_hash_bdy; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct dma_shared_mem cmd. IP %d\n", + ip); + goto fail_imm_cmd_construct; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + num_cmd++; + } + + if (ipa3_send_cmd(num_cmd, desc)) { + IPAERR_RL("fail to send immediate command\n"); + rc = -EFAULT; + goto fail_imm_cmd_construct; + } + + IPADBG_LOW("Hashable HEAD\n"); + IPA_DUMP_BUFF(alloc_params.hash_hdr.base, + alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size); + + IPADBG_LOW("Non-Hashable HEAD\n"); + IPA_DUMP_BUFF(alloc_params.nhash_hdr.base, + alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size); + + if (alloc_params.hash_bdy.size) { + IPADBG_LOW("Hashable BODY\n"); + IPA_DUMP_BUFF(alloc_params.hash_bdy.base, + alloc_params.hash_bdy.phys_base, + alloc_params.hash_bdy.size); + } + + if (alloc_params.nhash_bdy.size) { + IPADBG_LOW("Non-Hashable BODY\n"); + IPA_DUMP_BUFF(alloc_params.nhash_bdy.base, + alloc_params.nhash_bdy.phys_base, + alloc_params.nhash_bdy.size); + } + + __ipa_reap_sys_rt_tbls(ip); + +fail_imm_cmd_construct: + for (i = 0 ; i < num_cmd ; i++) + ipahal_destroy_imm_cmd(cmd_pyld[i]); +fail_size_valid: + if (alloc_params.hash_hdr.size) + ipahal_free_dma_mem(&alloc_params.hash_hdr); + ipahal_free_dma_mem(&alloc_params.nhash_hdr); + if (alloc_params.hash_bdy.size) + ipahal_free_dma_mem(&alloc_params.hash_bdy); + if (alloc_params.nhash_bdy.size) + ipahal_free_dma_mem(&alloc_params.nhash_bdy); + +no_rt_tbls: + return rc; +} + +/** + * __ipa3_find_rt_tbl() - find the routing table + * which name is given as parameter + * @ip: [in] the ip address family type of the wanted routing table + * @name: [in] the name of the wanted routing table + * + * Returns: the routing table which name is given as parameter, or NULL if it + * doesn't exist + */ +struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name) +{ + struct ipa3_rt_tbl *entry; + struct ipa3_rt_tbl_set *set; + + if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) { + IPAERR_RL("Name too long: %s\n", name); + return NULL; + } + + set = &ipa3_ctx->rt_tbl_set[ip]; + list_for_each_entry(entry, &set->head_rt_tbl_list, link) { + if (!strcmp(name, entry->name)) + return entry; + } + + return NULL; +} + +/** + * ipa3_query_rt_index() - find the routing table index + * which name and ip type are given as parameters + * @in: [out] the index of the wanted routing table + * + * Returns: the routing table which name is given as parameter, or NULL if it + * doesn't exist + */ +int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in) +{ + struct ipa3_rt_tbl *entry; + + if (in->ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + in->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + /* check if this table exists */ + entry = __ipa3_find_rt_tbl(in->ip, in->name); + if (!entry) { + mutex_unlock(&ipa3_ctx->lock); + return -EFAULT; + } + in->idx = entry->idx; + mutex_unlock(&ipa3_ctx->lock); + return 0; +} + +static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip, + const char *name) +{ + struct ipa3_rt_tbl *entry; + struct ipa3_rt_tbl_set *set; + int i; + int id; + int max_tbl_indx; + + if (name == NULL) { + IPAERR_RL("no tbl name\n"); + goto error; + } + + if (ip == IPA_IP_v4) { + max_tbl_indx = + max(IPA_MEM_PART(v4_modem_rt_index_hi), + IPA_MEM_PART(v4_apps_rt_index_hi)); + } else if (ip == IPA_IP_v6) { + max_tbl_indx = + max(IPA_MEM_PART(v6_modem_rt_index_hi), + IPA_MEM_PART(v6_apps_rt_index_hi)); + } else { + IPAERR_RL("bad ip family type\n"); + goto error; + } + + set = &ipa3_ctx->rt_tbl_set[ip]; + /* check if this table exists */ + entry = __ipa3_find_rt_tbl(ip, name); + if (!entry) { + entry = kmem_cache_zalloc(ipa3_ctx->rt_tbl_cache, GFP_KERNEL); + if (!entry) + goto error; + + /* find a routing tbl index */ + for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) { + if (!test_bit(i, &ipa3_ctx->rt_idx_bitmap[ip])) { + entry->idx = i; + set_bit(i, &ipa3_ctx->rt_idx_bitmap[ip]); + break; + } + } + if (i == IPA_RT_INDEX_BITMAP_SIZE) { + IPAERR("not free RT tbl indices left\n"); + goto fail_rt_idx_alloc; + } + if (i > max_tbl_indx) { + IPAERR("rt tbl index is above max\n"); + goto fail_rt_idx_alloc; + } + + INIT_LIST_HEAD(&entry->head_rt_rule_list); + INIT_LIST_HEAD(&entry->link); + strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX); + entry->set = set; + entry->cookie = IPA_RT_TBL_COOKIE; + entry->in_sys[IPA_RULE_HASHABLE] = (ip == IPA_IP_v4) ? + !ipa3_ctx->ip4_rt_tbl_hash_lcl : + !ipa3_ctx->ip6_rt_tbl_hash_lcl; + entry->in_sys[IPA_RULE_NON_HASHABLE] = (ip == IPA_IP_v4) ? + !ipa3_ctx->ip4_rt_tbl_nhash_lcl : + !ipa3_ctx->ip6_rt_tbl_nhash_lcl; + set->tbl_cnt++; + entry->rule_ids = &set->rule_ids; + list_add(&entry->link, &set->head_rt_tbl_list); + + IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx, + set->tbl_cnt, ip); + + id = ipa3_id_alloc(entry); + if (id < 0) { + IPAERR_RL("failed to add to tree\n"); + WARN_ON_RATELIMIT_IPA(1); + goto ipa_insert_failed; + } + entry->id = id; + } + + return entry; +ipa_insert_failed: + set->tbl_cnt--; + list_del(&entry->link); + idr_destroy(entry->rule_ids); +fail_rt_idx_alloc: + entry->cookie = 0; + kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry); +error: + return NULL; +} + +static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry) +{ + enum ipa_ip_type ip = IPA_IP_MAX; + u32 id; + struct ipa3_rt_tbl_set *rset; + + if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) { + IPAERR_RL("bad params\n"); + return -EINVAL; + } + id = entry->id; + if (ipa3_id_find(id) == NULL) { + IPAERR_RL("lookup failed\n"); + return -EPERM; + } + + if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v4]) + ip = IPA_IP_v4; + else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6]) + ip = IPA_IP_v6; + else { + WARN_ON_RATELIMIT_IPA(1); + return -EPERM; + } + + rset = &ipa3_ctx->reap_rt_tbl_set[ip]; + + entry->rule_ids = NULL; + if (entry->in_sys[IPA_RULE_HASHABLE] || + entry->in_sys[IPA_RULE_NON_HASHABLE]) { + list_move(&entry->link, &rset->head_rt_tbl_list); + clear_bit(entry->idx, &ipa3_ctx->rt_idx_bitmap[ip]); + entry->set->tbl_cnt--; + IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d ip=%d\n", + entry->idx, entry->set->tbl_cnt, ip); + } else { + list_del(&entry->link); + clear_bit(entry->idx, &ipa3_ctx->rt_idx_bitmap[ip]); + entry->set->tbl_cnt--; + IPADBG("del rt tbl_idx=%d tbl_cnt=%d ip=%d\n", + entry->idx, entry->set->tbl_cnt, ip); + kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry); + } + + /* remove the handle from the database */ + ipa3_id_remove(id); + return 0; +} + +static int __ipa_rt_validate_rule_id(u16 rule_id) +{ + if (!rule_id) + return 0; + + if ((rule_id < ipahal_get_rule_id_hi_bit()) || + (rule_id >= ((ipahal_get_rule_id_hi_bit()<<1)-1))) { + IPAERR_RL("Invalid rule_id provided 0x%x\n", + rule_id); + return -EPERM; + } + + return 0; +} +static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule, + struct ipa3_hdr_entry **hdr, + struct ipa3_hdr_proc_ctx_entry **proc_ctx) +{ + if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) { + IPAERR_RL("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n"); + return -EPERM; + } + + if (rule->hdr_hdl) { + *hdr = ipa3_id_find(rule->hdr_hdl); + if ((*hdr == NULL) || ((*hdr)->cookie != IPA_HDR_COOKIE)) { + IPAERR_RL("rt rule does not point to valid hdr\n"); + return -EPERM; + } + } else if (rule->hdr_proc_ctx_hdl) { + *proc_ctx = ipa3_id_find(rule->hdr_proc_ctx_hdl); + if ((*proc_ctx == NULL) || + ((*proc_ctx)->cookie != IPA_PROC_HDR_COOKIE)) { + + IPAERR_RL("rt rule does not point to valid proc ctx\n"); + return -EPERM; + } + } + + return 0; +} + +static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry, + const struct ipa_rt_rule *rule, + struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr, + struct ipa3_hdr_proc_ctx_entry *proc_ctx, + u16 rule_id, bool user) +{ + int id; + + *entry = kmem_cache_zalloc(ipa3_ctx->rt_rule_cache, GFP_KERNEL); + if (!*entry) + goto error; + + INIT_LIST_HEAD(&(*entry)->link); + (*(entry))->cookie = IPA_RT_RULE_COOKIE; + (*(entry))->rule = *rule; + (*(entry))->tbl = tbl; + (*(entry))->hdr = hdr; + (*(entry))->proc_ctx = proc_ctx; + if (rule_id) { + id = rule_id; + (*(entry))->rule_id_valid = 1; + } else { + id = ipa3_alloc_rule_id(tbl->rule_ids); + if (id < 0) { + IPAERR_RL("failed to allocate rule id\n"); + WARN_ON_RATELIMIT_IPA(1); + goto alloc_rule_id_fail; + } + } + (*(entry))->rule_id = id; + (*(entry))->ipacm_installed = user; + + return 0; + +alloc_rule_id_fail: + kmem_cache_free(ipa3_ctx->rt_rule_cache, *entry); +error: + return -EPERM; +} + +static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl, + struct ipa3_rt_tbl *tbl) +{ + int id; + + tbl->rule_cnt++; + if (entry->hdr) + entry->hdr->ref_cnt++; + else if (entry->proc_ctx) + entry->proc_ctx->ref_cnt++; + id = ipa3_id_alloc(entry); + if (id < 0) { + IPAERR_RL("failed to add to tree\n"); + WARN_ON_RATELIMIT_IPA(1); + goto ipa_insert_failed; + } + IPADBG("add rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n", + tbl->idx, tbl->rule_cnt, entry->rule_id); + *rule_hdl = id; + entry->id = id; + + return 0; + +ipa_insert_failed: + if (entry->hdr) + entry->hdr->ref_cnt--; + else if (entry->proc_ctx) + entry->proc_ctx->ref_cnt--; + idr_remove(tbl->rule_ids, entry->rule_id); + list_del(&entry->link); + kmem_cache_free(ipa3_ctx->rt_rule_cache, entry); + return -EPERM; +} + +static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, + const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl, + u16 rule_id, bool user) +{ + struct ipa3_rt_tbl *tbl; + struct ipa3_rt_entry *entry; + struct ipa3_hdr_entry *hdr = NULL; + struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL; + + if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx)) + goto error; + + if (__ipa_rt_validate_rule_id(rule_id)) + goto error; + + tbl = __ipa_add_rt_tbl(ip, name); + if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) { + IPAERR_RL("failed adding rt tbl name = %s\n", + name ? name : ""); + goto error; + } + /* + * do not allow any rule to be added at "default" routing + * table + */ + if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) && + (tbl->rule_cnt > 0)) { + IPAERR_RL("cannot add rules to default rt table\n"); + goto error; + } + + if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, + rule_id, user)) + goto error; + + if (at_rear) + list_add_tail(&entry->link, &tbl->head_rt_rule_list); + else + list_add(&entry->link, &tbl->head_rt_rule_list); + + if (__ipa_finish_rt_rule_add(entry, rule_hdl, tbl)) + goto error; + + return 0; + +error: + return -EPERM; +} + +static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl, + const struct ipa_rt_rule *rule, u32 *rule_hdl, + struct ipa3_rt_entry **add_after_entry) +{ + struct ipa3_rt_entry *entry; + struct ipa3_hdr_entry *hdr = NULL; + struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL; + + if (!*add_after_entry) + goto error; + + if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx)) + goto error; + + if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, 0, true)) + goto error; + + list_add(&entry->link, &((*add_after_entry)->link)); + + if (__ipa_finish_rt_rule_add(entry, rule_hdl, tbl)) + goto error; + + /* + * prepare for next insertion + */ + *add_after_entry = entry; + + return 0; + +error: + *add_after_entry = NULL; + return -EPERM; +} + +/** + * ipa3_add_rt_rule() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ + +int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) +{ + return ipa3_add_rt_rule_usr(rules, false); +} + +/** + * ipa3_add_rt_rule_usr() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * @user_only: [in] indicate installed by userspace module + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ + +int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only) +{ + int i; + int ret; + + if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < rules->num_rules; i++) { + rules->rt_tbl_name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + /* if hashing not supported, all tables are non-hash tables*/ + if (ipa3_ctx->ipa_fltrt_not_hashable) + rules->rules[i].rule.hashable = false; + if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name, + &rules->rules[i].rule, + rules->rules[i].at_rear, + &rules->rules[i].rt_rule_hdl, + 0, + user_only)) { + IPAERR_RL("failed to add rt rule %d\n", i); + rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED; + } else { + rules->rules[i].status = 0; + } + } + + if (rules->commit) + if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) { + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +/** + * ipa3_add_rt_rule_ext() - Add the specified routing rules to SW with rule id + * and optionally commit to IPA HW + * @rules: [inout] set of routing rules to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules) +{ + int i; + int ret; + + if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < rules->num_rules; i++) { + /* if hashing not supported, all tables are non-hash tables*/ + if (ipa3_ctx->ipa_fltrt_not_hashable) + rules->rules[i].rule.hashable = false; + if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name, + &rules->rules[i].rule, + rules->rules[i].at_rear, + &rules->rules[i].rt_rule_hdl, + rules->rules[i].rule_id, true)) { + IPAERR_RL("failed to add rt rule %d\n", i); + rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED; + } else { + rules->rules[i].status = 0; + } + } + + if (rules->commit) + if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) { + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +/** + * ipa3_add_rt_rule_after() - Add the given routing rules after the + * specified rule to SW and optionally commit to IPA HW + * @rules: [inout] set of routing rules to add + handle where to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules) +{ + int i; + int ret = 0; + struct ipa3_rt_tbl *tbl = NULL; + struct ipa3_rt_entry *entry = NULL; + + if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + rules->rt_tbl_name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name); + if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) { + IPAERR_RL("failed finding rt tbl name = %s\n", + rules->rt_tbl_name); + ret = -EINVAL; + goto bail; + } + + if (!tbl->rule_cnt) { + IPAERR_RL("tbl->rule_cnt == 0"); + ret = -EINVAL; + goto bail; + } + + entry = ipa3_id_find(rules->add_after_hdl); + if (!entry) { + IPAERR_RL("failed finding rule %d in rt tbls\n", + rules->add_after_hdl); + ret = -EINVAL; + goto bail; + } + + if (entry->cookie != IPA_RT_RULE_COOKIE) { + IPAERR_RL("Invalid cookie value = %u rule %d in rt tbls\n", + entry->cookie, rules->add_after_hdl); + ret = -EINVAL; + goto bail; + } + + if (entry->tbl != tbl) { + IPAERR_RL("given rt rule does not match the table\n"); + ret = -EINVAL; + goto bail; + } + + /* + * do not allow any rule to be added at "default" routing + * table + */ + if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) && + (tbl->rule_cnt > 0)) { + IPAERR_RL("cannot add rules to default rt table\n"); + ret = -EINVAL; + goto bail; + } + + /* + * we add all rules one after the other, if one insertion fails, it cuts + * the chain (all following will receive fail status) following calls to + * __ipa_add_rt_rule_after will fail (entry == NULL) + */ + + for (i = 0; i < rules->num_rules; i++) { + /* if hashing not supported, all tables are non-hash tables*/ + if (ipa3_ctx->ipa_fltrt_not_hashable) + rules->rules[i].rule.hashable = false; + if (__ipa_add_rt_rule_after(tbl, + &rules->rules[i].rule, + &rules->rules[i].rt_rule_hdl, + &entry)) { + IPAERR_RL("failed to add rt rule %d\n", i); + rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED; + } else { + rules->rules[i].status = 0; + } + } + + if (rules->commit) + if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) { + IPAERR_RL("failed to commit\n"); + ret = -EPERM; + goto bail; + } + + ret = 0; + goto bail; + +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +int __ipa3_del_rt_rule(u32 rule_hdl) +{ + struct ipa3_rt_entry *entry; + int id; + struct ipa3_hdr_entry *hdr_entry; + struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry; + + entry = ipa3_id_find(rule_hdl); + + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + return -EINVAL; + } + + if (entry->cookie != IPA_RT_RULE_COOKIE) { + IPAERR_RL("bad params\n"); + return -EINVAL; + } + + if (!strcmp(entry->tbl->name, IPA_DFLT_RT_TBL_NAME)) { + IPADBG("Deleting rule from default rt table idx=%u\n", + entry->tbl->idx); + if (entry->tbl->rule_cnt == 1) { + IPAERR_RL("Default tbl last rule cannot be deleted\n"); + return -EINVAL; + } + } + + /* Adding check to confirm still + * header entry present in header table or not + */ + + if (entry->hdr) { + hdr_entry = ipa3_id_find(entry->rule.hdr_hdl); + if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("Header entry already deleted\n"); + return -EINVAL; + } + } else if (entry->proc_ctx) { + hdr_proc_entry = ipa3_id_find(entry->rule.hdr_proc_ctx_hdl); + if (!hdr_proc_entry || + hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) { + IPAERR_RL("Proc header entry already deleted\n"); + return -EINVAL; + } + } + + if (entry->hdr) + __ipa3_release_hdr(entry->hdr->id); + else if (entry->proc_ctx) + __ipa3_release_hdr_proc_ctx(entry->proc_ctx->id); + list_del(&entry->link); + entry->tbl->rule_cnt--; + IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n ref_cnt=%u", + entry->tbl->idx, entry->tbl->rule_cnt, + entry->rule_id, entry->tbl->ref_cnt); + /* if rule id was allocated from idr, remove it */ + if (!entry->rule_id_valid) + idr_remove(entry->tbl->rule_ids, entry->rule_id); + if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) { + if (__ipa_del_rt_tbl(entry->tbl)) + IPAERR_RL("fail to del RT tbl\n"); + } + entry->cookie = 0; + id = entry->id; + kmem_cache_free(ipa3_ctx->rt_rule_cache, entry); + + /* remove the handle from the database */ + ipa3_id_remove(id); + + return 0; +} + +/** + * ipa3_del_rt_rule() - Remove the specified routing rules to SW and optionally + * commit to IPA HW + * @hdls: [inout] set of routing rules to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls) +{ + int i; + int ret; + + if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa3_del_rt_rule(hdls->hdl[i].hdl)) { + IPAERR_RL("failed to del rt rule %i\n", i); + hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) + if (ipa3_ctx->ctrl->ipa3_commit_rt(hdls->ip)) { + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +/** + * ipa_commit_rt_rule() - Commit the current SW routing table of specified type + * to IPA HW + * @ip: The family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_commit_rt(enum ipa_ip_type ip) +{ + int ret; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + /* + * issue a commit on the filtering module of same IP type since + * filtering rules point to routing tables + */ + if (ipa3_commit_flt(ip)) + return -EPERM; + + mutex_lock(&ipa3_ctx->lock); + if (ipa3_ctx->ctrl->ipa3_commit_rt(ip)) { + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +/** + * ipa3_reset_rt() - reset the current SW routing table of specified type + * (does not commit to HW) + * @ip: [in] The family of routing tables + * @user_only: [in] indicate delete rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only) +{ + struct ipa3_rt_tbl *tbl; + struct ipa3_rt_tbl *tbl_next; + struct ipa3_rt_tbl_set *set; + struct ipa3_rt_entry *rule; + struct ipa3_rt_entry *rule_next; + struct ipa3_rt_tbl_set *rset; + struct ipa3_hdr_entry *hdr_entry; + struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry; + u32 apps_start_idx; + int id; + bool tbl_user = false; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + if (ip == IPA_IP_v4) + apps_start_idx = + IPA_MEM_PART(v4_apps_rt_index_lo); + else + apps_start_idx = + IPA_MEM_PART(v6_apps_rt_index_lo); + + /* + * issue a reset on the filtering module of same IP type since + * filtering rules point to routing tables + */ + if (ipa3_reset_flt(ip, user_only)) + IPAERR_RL("fail to reset flt ip=%d\n", ip); + + set = &ipa3_ctx->rt_tbl_set[ip]; + rset = &ipa3_ctx->reap_rt_tbl_set[ip]; + mutex_lock(&ipa3_ctx->lock); + IPADBG("reset rt ip=%d\n", ip); + list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) { + tbl_user = false; + list_for_each_entry_safe(rule, rule_next, + &tbl->head_rt_rule_list, link) { + if (ipa3_id_find(rule->id) == NULL) { + WARN_ON_RATELIMIT_IPA(1); + mutex_unlock(&ipa3_ctx->lock); + return -EFAULT; + } + + /* indicate if tbl used for user-specified rules*/ + if (rule->ipacm_installed) { + IPADBG("tbl_user %d, tbl-index %d\n", + tbl_user, tbl->id); + tbl_user = true; + } + /* + * for the "default" routing tbl, remove all but the + * last rule + */ + if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1) + continue; + + if (!user_only || + rule->ipacm_installed) { + list_del(&rule->link); + if (rule->hdr) { + hdr_entry = ipa3_id_find( + rule->rule.hdr_hdl); + if (!hdr_entry || + hdr_entry->cookie != IPA_HDR_COOKIE) { + mutex_unlock(&ipa3_ctx->lock); + IPAERR_RL( + "Header already deleted\n"); + return -EINVAL; + } + } else if (rule->proc_ctx) { + hdr_proc_entry = + ipa3_id_find( + rule->rule.hdr_proc_ctx_hdl); + if (!hdr_proc_entry || + hdr_proc_entry->cookie != + IPA_PROC_HDR_COOKIE) { + mutex_unlock(&ipa3_ctx->lock); + IPAERR_RL( + "Proc entry already deleted\n"); + return -EINVAL; + } + } + tbl->rule_cnt--; + if (rule->hdr) + __ipa3_release_hdr(rule->hdr->id); + else if (rule->proc_ctx) + __ipa3_release_hdr_proc_ctx( + rule->proc_ctx->id); + rule->cookie = 0; + if (!rule->rule_id_valid) + idr_remove(tbl->rule_ids, + rule->rule_id); + id = rule->id; + kmem_cache_free(ipa3_ctx->rt_rule_cache, rule); + + /* remove the handle from the database */ + ipa3_id_remove(id); + } + } + + if (ipa3_id_find(tbl->id) == NULL) { + WARN_ON_RATELIMIT_IPA(1); + mutex_unlock(&ipa3_ctx->lock); + return -EFAULT; + } + id = tbl->id; + + /* do not remove the "default" routing tbl which has index 0 */ + if (tbl->idx != apps_start_idx) { + if (!user_only || tbl_user) { + tbl->rule_ids = NULL; + if (tbl->in_sys[IPA_RULE_HASHABLE] || + tbl->in_sys[IPA_RULE_NON_HASHABLE]) { + list_move(&tbl->link, + &rset->head_rt_tbl_list); + clear_bit(tbl->idx, + &ipa3_ctx->rt_idx_bitmap[ip]); + set->tbl_cnt--; + IPADBG("rst tbl_idx=%d cnt=%d\n", + tbl->idx, set->tbl_cnt); + } else { + list_del(&tbl->link); + set->tbl_cnt--; + clear_bit(tbl->idx, + &ipa3_ctx->rt_idx_bitmap[ip]); + IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n", + tbl->idx, set->tbl_cnt); + kmem_cache_free(ipa3_ctx->rt_tbl_cache, + tbl); + } + /* remove the handle from the database */ + ipa3_id_remove(id); + } + } + } + + /* commit the change to IPA-HW */ + if (ipa3_ctx->ctrl->ipa3_commit_rt(IPA_IP_v4) || + ipa3_ctx->ctrl->ipa3_commit_rt(IPA_IP_v6)) { + IPAERR("fail to commit rt-rule\n"); + WARN_ON_RATELIMIT_IPA(1); + mutex_unlock(&ipa3_ctx->lock); + return -EPERM; + } + mutex_unlock(&ipa3_ctx->lock); + + return 0; +} + +/** + * ipa3_get_rt_tbl() - lookup the specified routing table and return handle if + * it exists, if lookup succeeds the routing table ref cnt is increased + * @lookup: [inout] routing table to lookup and its handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + * Caller should call ipa3_put_rt_tbl later if this function succeeds + */ +int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup) +{ + struct ipa3_rt_tbl *entry; + int result = -EFAULT; + + if (lookup == NULL || lookup->ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + mutex_lock(&ipa3_ctx->lock); + lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name); + if (entry && entry->cookie == IPA_RT_TBL_COOKIE) { + if (entry->ref_cnt == U32_MAX) { + IPAERR_RL("fail: ref count crossed limit\n"); + goto ret; + } + entry->ref_cnt++; + lookup->hdl = entry->id; + + /* commit for get */ + if (ipa3_ctx->ctrl->ipa3_commit_rt(lookup->ip)) + IPAERR_RL("fail to commit RT tbl\n"); + + result = 0; + } + +ret: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_put_rt_tbl() - Release the specified routing table handle + * @rt_tbl_hdl: [in] the routing table handle to release + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_put_rt_tbl(u32 rt_tbl_hdl) +{ + struct ipa3_rt_tbl *entry; + enum ipa_ip_type ip = IPA_IP_MAX; + int result = 0; + + mutex_lock(&ipa3_ctx->lock); + entry = ipa3_id_find(rt_tbl_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + result = -EINVAL; + goto ret; + } + + if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) { + IPAERR_RL("bad params\n"); + result = -EINVAL; + goto ret; + } + + if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v4]) + ip = IPA_IP_v4; + else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6]) + ip = IPA_IP_v6; + else { + WARN_ON_RATELIMIT_IPA(1); + result = -EINVAL; + goto ret; + } + + entry->ref_cnt--; + if (entry->ref_cnt == 0 && entry->rule_cnt == 0) { + IPADBG("zero ref_cnt, delete rt tbl (idx=%u)\n", + entry->idx); + if (__ipa_del_rt_tbl(entry)) + IPAERR_RL("fail to del RT tbl\n"); + /* commit for put */ + if (ipa3_ctx->ctrl->ipa3_commit_rt(ip)) + IPAERR_RL("fail to commit RT tbl\n"); + } + + result = 0; + +ret: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + + +static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) +{ + struct ipa3_rt_entry *entry; + struct ipa3_hdr_entry *hdr = NULL; + struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL; + struct ipa3_hdr_entry *hdr_entry; + struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry; + + if (rtrule->rule.hdr_hdl) { + hdr = ipa3_id_find(rtrule->rule.hdr_hdl); + if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { + IPAERR_RL("rt rule does not point to valid hdr\n"); + goto error; + } + } else if (rtrule->rule.hdr_proc_ctx_hdl) { + proc_ctx = ipa3_id_find(rtrule->rule.hdr_proc_ctx_hdl); + if ((proc_ctx == NULL) || + (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) { + IPAERR_RL("rt rule does not point to valid proc ctx\n"); + goto error; + } + } + + entry = ipa3_id_find(rtrule->rt_rule_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + goto error; + } + + if (entry->cookie != IPA_RT_RULE_COOKIE) { + IPAERR_RL("bad params\n"); + goto error; + } + + if (!strcmp(entry->tbl->name, IPA_DFLT_RT_TBL_NAME)) { + IPAERR_RL("Default tbl rule cannot be modified\n"); + return -EINVAL; + } + /* Adding check to confirm still + * header entry present in header table or not + */ + + if (entry->hdr) { + hdr_entry = ipa3_id_find(entry->rule.hdr_hdl); + if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("Header entry already deleted\n"); + return -EPERM; + } + } else if (entry->proc_ctx) { + hdr_proc_entry = ipa3_id_find(entry->rule.hdr_proc_ctx_hdl); + if (!hdr_proc_entry || + hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) { + IPAERR_RL("Proc header entry already deleted\n"); + return -EPERM; + } + } + + if (entry->hdr) + entry->hdr->ref_cnt--; + if (entry->proc_ctx) + entry->proc_ctx->ref_cnt--; + + entry->rule = rtrule->rule; + entry->hdr = hdr; + entry->proc_ctx = proc_ctx; + + if (entry->hdr) + entry->hdr->ref_cnt++; + if (entry->proc_ctx) + entry->proc_ctx->ref_cnt++; + + entry->hw_len = 0; + entry->prio = 0; + + return 0; + +error: + return -EPERM; +} + +/** + * ipa3_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally + * commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls) +{ + int i; + int result; + + if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < hdls->num_rules; i++) { + /* if hashing not supported, all tables are non-hash tables*/ + if (ipa3_ctx->ipa_fltrt_not_hashable) + hdls->rules[i].rule.hashable = false; + if (__ipa_mdfy_rt_rule(&hdls->rules[i])) { + IPAERR_RL("failed to mdfy rt rule %i\n", i); + hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED; + } else { + hdls->rules[i].status = 0; + } + } + + if (hdls->commit) + if (ipa3_ctx->ctrl->ipa3_commit_rt(hdls->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_set_rt_tuple_mask() - Sets the rt tuple masking for the given tbl + * table index must be for AP EP (not modem) + * updates the the routing masking values without changing the flt ones. + * + * @tbl_idx: routing table index to configure the tuple masking + * @tuple: the tuple members masking + * Returns: 0 on success, negative on failure + * + */ +int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple) +{ + struct ipahal_reg_fltrt_hash_tuple fltrt_tuple; + + if (!tuple) { + IPAERR_RL("bad tuple\n"); + return -EINVAL; + } + + if (tbl_idx >= + max(IPA_MEM_PART(v6_rt_num_index), + IPA_MEM_PART(v4_rt_num_index)) || + tbl_idx < 0) { + IPAERR_RL("bad table index\n"); + return -EINVAL; + } + + if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) && + tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi)) { + IPAERR_RL("cannot configure modem v4 rt tuple by AP\n"); + return -EINVAL; + } + + if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) && + tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi)) { + IPAERR_RL("cannot configure modem v6 rt tuple by AP\n"); + return -EINVAL; + } + + ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n, + tbl_idx, &fltrt_tuple); + fltrt_tuple.rt = *tuple; + ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n, + tbl_idx, &fltrt_tuple); + + return 0; +} + +/** + * ipa3_rt_read_tbl_from_hw() -Read routing table from IPA HW + * @tbl_idx: routing table index + * @ip_type: IPv4 or IPv6 table + * @hashable: hashable or non-hashable table + * @entry: array to fill the table entries + * @num_entry: number of entries in entry array. set by the caller to indicate + * entry array size. Then set by this function as an output parameter to + * indicate the number of entries in the array + * + * This function reads the routing table from IPA SRAM and prepares an array + * of entries. This function is mainly used for debugging purposes. + * + * If empty table or Modem Apps table, zero entries will be returned. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, enum ipa_ip_type ip_type, + bool hashable, struct ipahal_rt_rule_entry entry[], int *num_entry) +{ + void *ipa_sram_mmio; + u64 hdr_base_ofst; + int res = 0; + u64 tbl_addr; + bool is_sys; + struct ipa_mem_buffer *sys_tbl_mem; + u8 *rule_addr; + int rule_idx; + + IPADBG_LOW("tbl_idx=%d ip_t=%d hash=%d entry=0x%pK num_entry=0x%pK\n", + tbl_idx, ip_type, hashable, entry, num_entry); + + /* + * SRAM memory not allocated to hash tables. Reading of hash table + * rules operation not supported + */ + if (hashable && ipa3_ctx->ipa_fltrt_not_hashable) { + IPADBG("Reading hashable rules not supported\n"); + *num_entry = 0; + return 0; + } + + if (ip_type == IPA_IP_v4 && tbl_idx >= IPA_MEM_PART(v4_rt_num_index)) { + IPAERR_RL("Invalid params\n"); + return -EFAULT; + } + + if (ip_type == IPA_IP_v6 && tbl_idx >= IPA_MEM_PART(v6_rt_num_index)) { + IPAERR_RL("Invalid params\n"); + return -EFAULT; + } + + /* map IPA SRAM */ + ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, + ipa3_ctx->smem_restricted_bytes / 4), + ipa3_ctx->smem_sz); + if (!ipa_sram_mmio) { + IPAERR("fail to ioremap IPA SRAM\n"); + return -ENOMEM; + } + + memset(entry, 0, sizeof(*entry) * (*num_entry)); + if (hashable) { + if (ip_type == IPA_IP_v4) + hdr_base_ofst = + IPA_MEM_PART(v4_rt_hash_ofst); + else + hdr_base_ofst = + IPA_MEM_PART(v6_rt_hash_ofst); + } else { + if (ip_type == IPA_IP_v4) + hdr_base_ofst = + IPA_MEM_PART(v4_rt_nhash_ofst); + else + hdr_base_ofst = + IPA_MEM_PART(v6_rt_nhash_ofst); + } + + IPADBG_LOW("hdr_base_ofst=0x%llx\n", hdr_base_ofst); + + res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst, + tbl_idx, &tbl_addr, &is_sys); + if (res) { + IPAERR("failed to read table address from header structure\n"); + goto bail; + } + IPADBG_LOW("rt tbl %d: tbl_addr=0x%llx is_sys=%d\n", + tbl_idx, tbl_addr, is_sys); + if (!tbl_addr) { + IPAERR("invalid rt tbl addr\n"); + res = -EFAULT; + goto bail; + } + + /* for tables which reside in DDR access it from the virtual memory */ + if (is_sys) { + struct ipa3_rt_tbl_set *set; + struct ipa3_rt_tbl *tbl; + + set = &ipa3_ctx->rt_tbl_set[ip_type]; + rule_addr = NULL; + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + if (tbl->idx == tbl_idx) { + sys_tbl_mem = &(tbl->curr_mem[hashable ? + IPA_RULE_HASHABLE : + IPA_RULE_NON_HASHABLE]); + if (sys_tbl_mem->phys_base && + sys_tbl_mem->phys_base != tbl_addr) { + IPAERR("mismatch:parsed=%llx sw=%pad\n" + , tbl_addr, + &sys_tbl_mem->phys_base); + } + if (sys_tbl_mem->phys_base) + rule_addr = sys_tbl_mem->base; + else + rule_addr = NULL; + } + } + } else { + rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr; + } + + IPADBG_LOW("First rule addr 0x%pK\n", rule_addr); + + if (!rule_addr) { + /* Modem table in system memory or empty table */ + *num_entry = 0; + goto bail; + } + + rule_idx = 0; + while (rule_idx < *num_entry) { + res = ipahal_rt_parse_hw_rule(rule_addr, &entry[rule_idx]); + if (res) { + IPAERR("failed parsing rt rule\n"); + goto bail; + } + + IPADBG_LOW("rule_size=%d\n", entry[rule_idx].rule_size); + if (!entry[rule_idx].rule_size) + break; + + rule_addr += entry[rule_idx].rule_size; + rule_idx++; + } + *num_entry = rule_idx; +bail: + iounmap(ipa_sram_mmio); + return res; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h new file mode 100644 index 000000000000..7effbb4c7d15 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ipa +#define TRACE_INCLUDE_FILE ipa_trace + +#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _IPA_TRACE_H + +#include + +TRACE_EVENT( + intr_to_poll3, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + poll_to_intr3, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + idle_sleep_enter3, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + idle_sleep_exit3, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + rmnet_ipa_netifni3, + + TP_PROTO(unsigned long rx_pkt_cnt), + + TP_ARGS(rx_pkt_cnt), + + TP_STRUCT__entry( + __field(unsigned long, rx_pkt_cnt) + ), + + TP_fast_assign( + __entry->rx_pkt_cnt = rx_pkt_cnt; + ), + + TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt) +); + +TRACE_EVENT( + rmnet_ipa_netifrx3, + + TP_PROTO(unsigned long rx_pkt_cnt), + + TP_ARGS(rx_pkt_cnt), + + TP_STRUCT__entry( + __field(unsigned long, rx_pkt_cnt) + ), + + TP_fast_assign( + __entry->rx_pkt_cnt = rx_pkt_cnt; + ), + + TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt) +); + +TRACE_EVENT( + rmnet_ipa_netif_rcv_skb3, + + TP_PROTO(unsigned long rx_pkt_cnt), + + TP_ARGS(rx_pkt_cnt), + + TP_STRUCT__entry( + __field(unsigned long, rx_pkt_cnt) + ), + + TP_fast_assign( + __entry->rx_pkt_cnt = rx_pkt_cnt; + ), + + TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt) +); + +TRACE_EVENT( + ipa3_rx_poll_num, + + TP_PROTO(int poll_num), + + TP_ARGS(poll_num), + + TP_STRUCT__entry( + __field(int, poll_num) + ), + + TP_fast_assign( + __entry->poll_num = poll_num; + ), + + TP_printk("each_poll_aggr_pkt_num=%d", __entry->poll_num) +); + +TRACE_EVENT( + ipa3_rx_poll_cnt, + + TP_PROTO(int poll_num), + + TP_ARGS(poll_num), + + TP_STRUCT__entry( + __field(int, poll_num) + ), + + TP_fast_assign( + __entry->poll_num = poll_num; + ), + + TP_printk("napi_overall_poll_pkt_cnt=%d", __entry->poll_num) +); + + +#endif /* _IPA_TRACE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c new file mode 100644 index 000000000000..f3fad51125fc --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c @@ -0,0 +1,982 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#include "ipa_i.h" +#include + +#define IPA_RAM_UC_SMEM_SIZE 128 +#define IPA_HW_INTERFACE_VERSION 0x2000 +#define IPA_PKT_FLUSH_TO_US 100 +#define IPA_UC_POLL_SLEEP_USEC 100 +#define IPA_UC_POLL_MAX_RETRY 10000 + +/** + * Mailbox register to Interrupt HWP for CPU cmd + * Usage of IPA_UC_MAILBOX_m_n doorbell instead of IPA_IRQ_EE_UC_0 + * due to HW limitation. + * + */ +#define IPA_CPU_2_HW_CMD_MBOX_m 0 +#define IPA_CPU_2_HW_CMD_MBOX_n 23 + +/** + * enum ipa3_cpu_2_hw_commands - Values that represent the commands from the CPU + * IPA_CPU_2_HW_CMD_NO_OP : No operation is required. + * IPA_CPU_2_HW_CMD_UPDATE_FLAGS : Update SW flags which defines the behavior + * of HW. + * IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST : Launch predefined test over HW. + * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO : Read HW internal debug information. + * IPA_CPU_2_HW_CMD_ERR_FATAL : CPU instructs HW to perform error fatal + * handling. + * IPA_CPU_2_HW_CMD_CLK_GATE : CPU instructs HW to goto Clock Gated state. + * IPA_CPU_2_HW_CMD_CLK_UNGATE : CPU instructs HW to goto Clock Ungated state. + * IPA_CPU_2_HW_CMD_MEMCPY : CPU instructs HW to do memcopy using QMB. + * IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug. + * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY : Command to check for GSI channel emptiness. + * IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO: Command to store remote IPA Info + */ +enum ipa3_cpu_2_hw_commands { + IPA_CPU_2_HW_CMD_NO_OP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0), + IPA_CPU_2_HW_CMD_UPDATE_FLAGS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), + IPA_CPU_2_HW_CMD_DEBUG_GET_INFO = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3), + IPA_CPU_2_HW_CMD_ERR_FATAL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4), + IPA_CPU_2_HW_CMD_CLK_GATE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5), + IPA_CPU_2_HW_CMD_CLK_UNGATE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6), + IPA_CPU_2_HW_CMD_MEMCPY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7), + IPA_CPU_2_HW_CMD_RESET_PIPE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8), + IPA_CPU_2_HW_CMD_REG_WRITE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9), + IPA_CPU_2_HW_CMD_GSI_CH_EMPTY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10), + IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 11), +}; + +/** + * enum ipa3_hw_2_cpu_responses - Values that represent common HW responses + * to CPU commands. + * @IPA_HW_2_CPU_RESPONSE_NO_OP : No operation response + * @IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED : HW shall send this command once + * boot sequence is completed and HW is ready to serve commands from CPU + * @IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED: Response to CPU commands + * @IPA_HW_2_CPU_RESPONSE_DEBUG_GET_INFO : Response to + * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO command + */ +enum ipa3_hw_2_cpu_responses { + IPA_HW_2_CPU_RESPONSE_NO_OP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0), + IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), + IPA_HW_2_CPU_RESPONSE_DEBUG_GET_INFO = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3), +}; + +/** + * struct IpaHwMemCopyData_t - Structure holding the parameters + * for IPA_CPU_2_HW_CMD_MEMCPY command. + * + * The parameters are passed as immediate params in the shared memory + */ +struct IpaHwMemCopyData_t { + u32 destination_addr; + u32 source_addr; + u32 dest_buffer_size; + u32 source_buffer_size; +}; + +/** + * struct IpaHwRegWriteCmdData_t - holds the parameters for + * IPA_CPU_2_HW_CMD_REG_WRITE command. Parameters are + * sent as 64b immediate parameters. + * @RegisterAddress: RG10 register address where the value needs to be written + * @RegisterValue: 32-Bit value to be written into the register + */ +struct IpaHwRegWriteCmdData_t { + u32 RegisterAddress; + u32 RegisterValue; +}; + +/** + * union IpaHwCpuCmdCompletedResponseData_t - Structure holding the parameters + * for IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED response. + * @originalCmdOp : The original command opcode + * @status : 0 for success indication, otherwise failure + * @reserved : Reserved + * + * Parameters are sent as 32b immediate parameters. + */ +union IpaHwCpuCmdCompletedResponseData_t { + struct IpaHwCpuCmdCompletedResponseParams_t { + u32 originalCmdOp:8; + u32 status:8; + u32 reserved:16; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwUpdateFlagsCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_UPDATE_FLAGS command + * @newFlags: SW flags defined the behavior of HW. + * This field is expected to be used as bitmask for enum ipa3_hw_flags + */ +union IpaHwUpdateFlagsCmdData_t { + struct IpaHwUpdateFlagsCmdParams_t { + u32 newFlags; + } params; + u32 raw32b; +}; + +/** + * union IpaHwChkChEmptyCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY command. Parameters are sent as 32b + * immediate parameters. + * @ee_n : EE owner of the channel + * @vir_ch_id : GSI virtual channel ID of the channel to checked of emptiness + * @reserved_02_04 : Reserved + */ +union IpaHwChkChEmptyCmdData_t { + struct IpaHwChkChEmptyCmdParams_t { + u8 ee_n; + u8 vir_ch_id; + u16 reserved_02_04; + } __packed params; + u32 raw32b; +} __packed; + + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO + * command. + * @remoteIPAAddr: 5G IPA address : uC proxies Q6 doorbell to this address + * @mboxN: mbox on which Q6 will interrupt uC + */ +struct IpaHwDbAddrInfo_t { + u32 remoteIPAAddr; + uint32_t mboxN; +} __packed; + + +/** + * When resource group 10 limitation mitigation is enabled, uC send + * cmd should be able to run in interrupt context, so using spin lock + * instead of mutex. + */ +#define IPA3_UC_LOCK(flags) \ +do { \ + if (ipa3_ctx->apply_rg10_wa) \ + spin_lock_irqsave(&ipa3_ctx->uc_ctx.uc_spinlock, flags); \ + else \ + mutex_lock(&ipa3_ctx->uc_ctx.uc_lock); \ +} while (0) + +#define IPA3_UC_UNLOCK(flags) \ +do { \ + if (ipa3_ctx->apply_rg10_wa) \ + spin_unlock_irqrestore(&ipa3_ctx->uc_ctx.uc_spinlock, flags); \ + else \ + mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock); \ +} while (0) + +struct ipa3_uc_hdlrs ipa3_uc_hdlrs[IPA_HW_NUM_FEATURES] = { { 0 } }; + +const char *ipa_hw_error_str(enum ipa3_hw_errors err_type) +{ + const char *str; + + switch (err_type) { + case IPA_HW_ERROR_NONE: + str = "IPA_HW_ERROR_NONE"; + break; + case IPA_HW_INVALID_DOORBELL_ERROR: + str = "IPA_HW_INVALID_DOORBELL_ERROR"; + break; + case IPA_HW_DMA_ERROR: + str = "IPA_HW_DMA_ERROR"; + break; + case IPA_HW_FATAL_SYSTEM_ERROR: + str = "IPA_HW_FATAL_SYSTEM_ERROR"; + break; + case IPA_HW_INVALID_OPCODE: + str = "IPA_HW_INVALID_OPCODE"; + break; + case IPA_HW_INVALID_PARAMS: + str = "IPA_HW_INVALID_PARAMS"; + break; + case IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE: + str = "IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE"; + break; + case IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE: + str = "IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE"; + break; + case IPA_HW_GSI_CH_NOT_EMPTY_FAILURE: + str = "IPA_HW_GSI_CH_NOT_EMPTY_FAILURE"; + break; + default: + str = "INVALID ipa_hw_errors type"; + } + + return str; +} + +static void ipa3_log_evt_hdlr(void) +{ + int i; + + if (!ipa3_ctx->uc_ctx.uc_event_top_ofst) { + ipa3_ctx->uc_ctx.uc_event_top_ofst = + ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams; + if (ipa3_ctx->uc_ctx.uc_event_top_ofst + + sizeof(struct IpaHwEventLogInfoData_t) >= + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst( + IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0) + + ipa3_ctx->smem_sz) { + IPAERR("uc_top 0x%x outside SRAM\n", + ipa3_ctx->uc_ctx.uc_event_top_ofst); + goto bad_uc_top_ofst; + } + + ipa3_ctx->uc_ctx.uc_event_top_mmio = ioremap( + ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->uc_ctx.uc_event_top_ofst, + sizeof(struct IpaHwEventLogInfoData_t)); + if (!ipa3_ctx->uc_ctx.uc_event_top_mmio) { + IPAERR("fail to ioremap uc top\n"); + goto bad_uc_top_ofst; + } + + for (i = 0; i < IPA_HW_NUM_FEATURES; i++) { + if (ipa3_uc_hdlrs[i].ipa_uc_event_log_info_hdlr) + ipa3_uc_hdlrs[i].ipa_uc_event_log_info_hdlr + (ipa3_ctx->uc_ctx.uc_event_top_mmio); + } + } else { + + if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams != + ipa3_ctx->uc_ctx.uc_event_top_ofst) { + IPAERR("uc top ofst changed new=%u cur=%u\n", + ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams, + ipa3_ctx->uc_ctx.uc_event_top_ofst); + } + } + + return; + +bad_uc_top_ofst: + ipa3_ctx->uc_ctx.uc_event_top_ofst = 0; +} + +/** + * ipa3_uc_state_check() - Check the status of the uC interface + * + * Return value: 0 if the uC is loaded, interface is initialized + * and there was no recent failure in one of the commands. + * A negative value is returned otherwise. + */ +int ipa3_uc_state_check(void) +{ + if (!ipa3_ctx->uc_ctx.uc_inited) { + IPAERR("uC interface not initialized\n"); + return -EFAULT; + } + + if (!ipa3_ctx->uc_ctx.uc_loaded) { + IPAERR("uC is not loaded\n"); + return -EFAULT; + } + + if (ipa3_ctx->uc_ctx.uc_failed) { + IPAERR("uC has failed its last command\n"); + return -EFAULT; + } + + return 0; +} + +/** + * ipa3_uc_loaded_check() - Check the uC has been loaded + * + * Return value: 1 if the uC is loaded, 0 otherwise + */ +int ipa3_uc_loaded_check(void) +{ + return ipa3_ctx->uc_ctx.uc_loaded; +} +EXPORT_SYMBOL(ipa3_uc_loaded_check); + +static void ipa3_uc_event_handler(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data) +{ + union IpaHwErrorEventData_t evt; + u8 feature; + + WARN_ON(private_data != ipa3_ctx); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + IPADBG("uC evt opcode=%u\n", + ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp); + + + feature = EXTRACT_UC_FEATURE(ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp); + + if (feature >= IPA_HW_FEATURE_MAX) { + IPAERR("Invalid feature %u for event %u\n", + feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return; + } + /* Feature specific handling */ + if (ipa3_uc_hdlrs[feature].ipa_uc_event_hdlr) + ipa3_uc_hdlrs[feature].ipa_uc_event_hdlr + (ipa3_ctx->uc_ctx.uc_sram_mmio); + + /* General handling */ + if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_ERROR) { + evt.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams; + IPAERR("uC Error, evt errorType = %s\n", + ipa_hw_error_str(evt.params.errorType)); + ipa3_ctx->uc_ctx.uc_failed = true; + ipa3_ctx->uc_ctx.uc_error_type = evt.params.errorType; + ipa3_ctx->uc_ctx.uc_error_timestamp = + ipahal_read_reg(IPA_TAG_TIMER); + /* Unexpected UC hardware state */ + ipa_assert(); + } else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_LOG_INFO) { + IPADBG("uC evt log info ofst=0x%x\n", + ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams); + ipa3_log_evt_hdlr(); + } else { + IPADBG("unsupported uC evt opcode=%u\n", + ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp); + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + +} + +int ipa3_uc_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr) +{ + int result = 0; + struct ipa_active_client_logging_info log_info; + + IPADBG("this=%pK evt=%lu ptr=%pK\n", this, event, ptr); + + result = ipa3_uc_state_check(); + if (result) + goto fail; + + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); + if (ipa3_inc_client_enable_clks_no_block(&log_info)) + goto fail; + + ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp = + IPA_CPU_2_HW_CMD_ERR_FATAL; + ipa3_ctx->uc_ctx.pending_cmd = ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp; + /* ensure write to shared memory is done before triggering uc */ + wmb(); + + if (ipa3_ctx->apply_rg10_wa) + ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n, + IPA_CPU_2_HW_CMD_MBOX_m, + IPA_CPU_2_HW_CMD_MBOX_n, 0x1); + else + ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1); + + /* give uc enough time to save state */ + udelay(IPA_PKT_FLUSH_TO_US); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPADBG("err_fatal issued\n"); + +fail: + return NOTIFY_DONE; +} + +static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data) +{ + union IpaHwCpuCmdCompletedResponseData_t uc_rsp; + u8 feature; + int res; + int i; + + WARN_ON(private_data != ipa3_ctx); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + IPADBG("uC rsp opcode=%u\n", + ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp); + + feature = EXTRACT_UC_FEATURE(ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp); + + if (feature >= IPA_HW_FEATURE_MAX) { + IPAERR("Invalid feature %u for event %u\n", + feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return; + } + + /* Feature specific handling */ + if (ipa3_uc_hdlrs[feature].ipa3_uc_response_hdlr) { + res = ipa3_uc_hdlrs[feature].ipa3_uc_response_hdlr( + ipa3_ctx->uc_ctx.uc_sram_mmio, + &ipa3_ctx->uc_ctx.uc_status); + if (res == 0) { + IPADBG("feature %d specific response handler\n", + feature); + complete_all(&ipa3_ctx->uc_ctx.uc_completion); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return; + } + } + + /* General handling */ + if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp == + IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED) { + ipa3_ctx->uc_ctx.uc_loaded = true; + + IPADBG("IPA uC loaded\n"); + /* + * The proxy vote is held until uC is loaded to ensure that + * IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED is received. + */ + ipa3_proxy_clk_unvote(); + + for (i = 0; i < IPA_HW_NUM_FEATURES; i++) { + if (ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr) + ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr(); + } + } else if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp == + IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) { + uc_rsp.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams; + IPADBG("uC cmd response opcode=%u status=%u\n", + uc_rsp.params.originalCmdOp, + uc_rsp.params.status); + if (uc_rsp.params.originalCmdOp == + ipa3_ctx->uc_ctx.pending_cmd) { + ipa3_ctx->uc_ctx.uc_status = uc_rsp.params.status; + complete_all(&ipa3_ctx->uc_ctx.uc_completion); + } else { + IPAERR("Expected cmd=%u rcvd cmd=%u\n", + ipa3_ctx->uc_ctx.pending_cmd, + uc_rsp.params.originalCmdOp); + } + } else { + IPAERR("Unsupported uC rsp opcode = %u\n", + ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp); + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + +static int ipa3_uc_send_cmd_64b_param(u32 cmd_lo, u32 cmd_hi, u32 opcode, + u32 expected_status, bool polling_mode, unsigned long timeout_jiffies) +{ + int index; + union IpaHwCpuCmdCompletedResponseData_t uc_rsp; + unsigned long flags = 0; + int retries = 0; + u32 uc_error_type; + +send_cmd_lock: + IPA3_UC_LOCK(flags); + + if (ipa3_uc_state_check()) { + IPADBG("uC send command aborted\n"); + IPA3_UC_UNLOCK(flags); + return -EBADF; + } +send_cmd: + if (ipa3_ctx->apply_rg10_wa) { + if (!polling_mode) + IPADBG("Overriding mode to polling mode\n"); + polling_mode = true; + } else { + init_completion(&ipa3_ctx->uc_ctx.uc_completion); + } + + ipa3_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd_lo; + ipa3_ctx->uc_ctx.uc_sram_mmio->cmdParams_hi = cmd_hi; + ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp = opcode; + ipa3_ctx->uc_ctx.pending_cmd = opcode; + ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp = 0; + ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams = 0; + + ipa3_ctx->uc_ctx.uc_status = 0; + + /* ensure write to shared memory is done before triggering uc */ + wmb(); + + if (ipa3_ctx->apply_rg10_wa) + ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n, + IPA_CPU_2_HW_CMD_MBOX_m, + IPA_CPU_2_HW_CMD_MBOX_n, 0x1); + else + ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1); + + if (polling_mode) { + struct IpaHwSharedMemCommonMapping_t *uc_sram_ptr = + ipa3_ctx->uc_ctx.uc_sram_mmio; + for (index = 0; index < IPA_UC_POLL_MAX_RETRY; index++) { + if (uc_sram_ptr->responseOp == + IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) { + uc_rsp.raw32b = uc_sram_ptr->responseParams; + if (uc_rsp.params.originalCmdOp == + ipa3_ctx->uc_ctx.pending_cmd) { + ipa3_ctx->uc_ctx.uc_status = + uc_rsp.params.status; + break; + } + } + if (ipa3_ctx->apply_rg10_wa) + udelay(IPA_UC_POLL_SLEEP_USEC); + else + usleep_range(IPA_UC_POLL_SLEEP_USEC, + IPA_UC_POLL_SLEEP_USEC); + } + + if (index == IPA_UC_POLL_MAX_RETRY) { + IPAERR("uC max polling retries reached\n"); + if (ipa3_ctx->uc_ctx.uc_failed) { + uc_error_type = ipa3_ctx->uc_ctx.uc_error_type; + IPAERR("uC reported on Error, errorType = %s\n", + ipa_hw_error_str(uc_error_type)); + } + IPA3_UC_UNLOCK(flags); + /* Unexpected UC hardware state */ + ipa_assert(); + } + } else { + if (wait_for_completion_timeout(&ipa3_ctx->uc_ctx.uc_completion, + timeout_jiffies) == 0) { + IPAERR("uC timed out\n"); + if (ipa3_ctx->uc_ctx.uc_failed) { + uc_error_type = ipa3_ctx->uc_ctx.uc_error_type; + IPAERR("uC reported on Error, errorType = %s\n", + ipa_hw_error_str(uc_error_type)); + } + IPA3_UC_UNLOCK(flags); + /* Unexpected UC hardware state */ + ipa_assert(); + } + } + + if (ipa3_ctx->uc_ctx.uc_status != expected_status) { + if (ipa3_ctx->uc_ctx.uc_status == + IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE || + ipa3_ctx->uc_ctx.uc_status == + IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE || + ipa3_ctx->uc_ctx.uc_status == + IPA_HW_CONS_STOP_FAILURE || + ipa3_ctx->uc_ctx.uc_status == + IPA_HW_PROD_STOP_FAILURE) { + retries++; + if (retries == IPA_GSI_CHANNEL_STOP_MAX_RETRY) { + IPAERR("Failed after %d tries\n", retries); + IPA3_UC_UNLOCK(flags); + /* Unexpected UC hardware state */ + ipa_assert(); + } + IPA3_UC_UNLOCK(flags); + if (ipa3_ctx->uc_ctx.uc_status == + IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE) + ipa3_inject_dma_task_for_gsi(); + /* sleep for short period to flush IPA */ + usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC, + IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC); + goto send_cmd_lock; + } + + if (ipa3_ctx->uc_ctx.uc_status == + IPA_HW_GSI_CH_NOT_EMPTY_FAILURE) { + retries++; + if (retries >= IPA_GSI_CHANNEL_EMPTY_MAX_RETRY) { + IPAERR("Failed after %d tries\n", retries); + IPA3_UC_UNLOCK(flags); + return -EFAULT; + } + if (ipa3_ctx->apply_rg10_wa) + udelay( + IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC / 2 + + IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC / 2); + else + usleep_range( + IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC, + IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC); + goto send_cmd; + } + + IPAERR("Recevied status %u, Expected status %u\n", + ipa3_ctx->uc_ctx.uc_status, expected_status); + IPA3_UC_UNLOCK(flags); + return -EFAULT; + } + + IPA3_UC_UNLOCK(flags); + + IPADBG("uC cmd %u send succeeded\n", opcode); + + return 0; +} + +/** + * ipa3_uc_interface_init() - Initialize the interface with the uC + * + * Return value: 0 on success, negative value otherwise + */ +int ipa3_uc_interface_init(void) +{ + int result; + unsigned long phys_addr; + + if (ipa3_ctx->uc_ctx.uc_inited) { + IPADBG("uC interface already initialized\n"); + return 0; + } + + mutex_init(&ipa3_ctx->uc_ctx.uc_lock); + spin_lock_init(&ipa3_ctx->uc_ctx.uc_spinlock); + + phys_addr = ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0); + ipa3_ctx->uc_ctx.uc_sram_mmio = ioremap(phys_addr, + IPA_RAM_UC_SMEM_SIZE); + if (!ipa3_ctx->uc_ctx.uc_sram_mmio) { + IPAERR("Fail to ioremap IPA uC SRAM\n"); + result = -ENOMEM; + goto remap_fail; + } + + if (!ipa3_ctx->apply_rg10_wa) { + result = ipa3_add_interrupt_handler(IPA_UC_IRQ_0, + ipa3_uc_event_handler, true, + ipa3_ctx); + if (result) { + IPAERR("Fail to register for UC_IRQ0 rsp interrupt\n"); + result = -EFAULT; + goto irq_fail0; + } + + result = ipa3_add_interrupt_handler(IPA_UC_IRQ_1, + ipa3_uc_response_hdlr, true, + ipa3_ctx); + if (result) { + IPAERR("fail to register for UC_IRQ1 rsp interrupt\n"); + result = -EFAULT; + goto irq_fail1; + } + } + + ipa3_ctx->uc_ctx.uc_inited = true; + + IPADBG("IPA uC interface is initialized\n"); + return 0; + +irq_fail1: + ipa3_remove_interrupt_handler(IPA_UC_IRQ_0); +irq_fail0: + iounmap(ipa3_ctx->uc_ctx.uc_sram_mmio); +remap_fail: + return result; +} + +/** + * ipa3_uc_load_notify() - Notification about uC loading + * + * This function should be called when IPA uC interface layer cannot + * determine by itself about uC loading by waits for external notification. + * Example is resource group 10 limitation were ipa driver does not get uC + * interrupts. + * The function should perform actions that were not done at init due to uC + * not being loaded then. + */ +void ipa3_uc_load_notify(void) +{ + int i; + int result; + + if (!ipa3_ctx->apply_rg10_wa) + return; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipa3_ctx->uc_ctx.uc_loaded = true; + IPADBG("IPA uC loaded\n"); + + ipa3_proxy_clk_unvote(); + + ipa3_init_interrupts(); + + result = ipa3_add_interrupt_handler(IPA_UC_IRQ_0, + ipa3_uc_event_handler, true, + ipa3_ctx); + if (result) + IPAERR("Fail to register for UC_IRQ0 rsp interrupt.\n"); + + for (i = 0; i < IPA_HW_NUM_FEATURES; i++) { + if (ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr) + ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr(); + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} +EXPORT_SYMBOL(ipa3_uc_load_notify); + +/** + * ipa3_uc_send_cmd() - Send a command to the uC + * + * Note1: This function sends command with 32bit parameter and do not + * use the higher 32bit of the command parameter (set to zero). + * + * Note2: In case the operation times out (No response from the uC) or + * polling maximal amount of retries has reached, the logic + * considers it as an invalid state of the uC/IPA, and + * issues a kernel panic. + * + * Returns: 0 on success. + * -EINVAL in case of invalid input. + * -EBADF in case uC interface is not initialized / + * or the uC has failed previously. + * -EFAULT in case the received status doesn't match + * the expected. + */ +int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status, + bool polling_mode, unsigned long timeout_jiffies) +{ + return ipa3_uc_send_cmd_64b_param(cmd, 0, opcode, + expected_status, polling_mode, timeout_jiffies); +} + +/** + * ipa3_uc_register_handlers() - Registers event, response and log event + * handlers for a specific feature.Please note + * that currently only one handler can be + * registered per feature. + * + * Return value: None + */ +void ipa3_uc_register_handlers(enum ipa3_hw_features feature, + struct ipa3_uc_hdlrs *hdlrs) +{ + unsigned long flags = 0; + + if (0 > feature || IPA_HW_FEATURE_MAX <= feature) { + IPAERR("Feature %u is invalid, not registering hdlrs\n", + feature); + return; + } + + IPA3_UC_LOCK(flags); + ipa3_uc_hdlrs[feature] = *hdlrs; + IPA3_UC_UNLOCK(flags); + + IPADBG("uC handlers registered for feature %u\n", feature); +} + +int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client) +{ + const struct ipa_gsi_ep_config *gsi_ep_info; + union IpaHwChkChEmptyCmdData_t cmd; + int ret; + + gsi_ep_info = ipa3_get_gsi_ep_info(ipa_client); + if (!gsi_ep_info) { + IPAERR("Failed getting GSI EP info for client=%d\n", + ipa_client); + return 0; + } + + if (ipa3_uc_state_check()) { + IPADBG("uC cannot be used to validate ch emptiness clnt=%d\n" + , ipa_client); + return 0; + } + + cmd.params.ee_n = gsi_ep_info->ee; + cmd.params.vir_ch_id = gsi_ep_info->ipa_gsi_chan_num; + + IPADBG("uC emptiness check for IPA GSI Channel %d\n", + gsi_ep_info->ipa_gsi_chan_num); + + ret = ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_GSI_CH_EMPTY, 0, + false, 10*HZ); + + return ret; +} + + +/** + * ipa3_uc_notify_clk_state() - notify to uC of clock enable / disable + * @enabled: true if clock are enabled + * + * The function uses the uC interface in order to notify uC before IPA clocks + * are disabled to make sure uC is not in the middle of operation. + * Also after clocks are enabled ned to notify uC to start processing. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_uc_notify_clk_state(bool enabled) +{ + u32 opcode; + + if (ipa3_ctx->ipa_hw_type > IPA_HW_v4_0) { + IPADBG_LOW("not supported past IPA v4.0\n"); + return 0; + } + + /* + * If the uC interface has not been initialized yet, + * don't notify the uC on the enable/disable + */ + if (ipa3_uc_state_check()) { + IPADBG("uC interface will not notify the UC on clock state\n"); + return 0; + } + + IPADBG("uC clock %s notification\n", (enabled) ? "UNGATE" : "GATE"); + + opcode = (enabled) ? IPA_CPU_2_HW_CMD_CLK_UNGATE : + IPA_CPU_2_HW_CMD_CLK_GATE; + + return ipa3_uc_send_cmd(0, opcode, 0, true, 0); +} + +/** + * ipa3_uc_update_hw_flags() - send uC the HW flags to be used + * @flags: This field is expected to be used as bitmask for enum ipa3_hw_flags + * + * Returns: 0 on success, negative on failure + */ +int ipa3_uc_update_hw_flags(u32 flags) +{ + union IpaHwUpdateFlagsCmdData_t cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.newFlags = flags; + return ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_UPDATE_FLAGS, 0, + false, HZ); +} + +/** + * ipa3_uc_rg10_write_reg() - write to register possibly via uC + * + * if the RG10 limitation workaround is enabled, then writing + * to a register will be proxied by the uC due to H/W limitation. + * This func should be called for RG10 registers only + * + * @Parameters: Like ipahal_write_reg_n() parameters + * + */ +void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val) +{ + int ret; + u32 paddr; + + if (!ipa3_ctx->apply_rg10_wa) + return ipahal_write_reg_n(reg, n, val); + + + /* calculate register physical address */ + paddr = ipa3_ctx->ipa_wrapper_base + ipa3_ctx->ctrl->ipa_reg_base_ofst; + paddr += ipahal_get_reg_n_ofst(reg, n); + + IPADBG("Sending uC cmd to reg write: addr=0x%x val=0x%x\n", + paddr, val); + ret = ipa3_uc_send_cmd_64b_param(paddr, val, + IPA_CPU_2_HW_CMD_REG_WRITE, 0, true, 0); + if (ret) { + IPAERR("failed to send cmd to uC for reg write\n"); + /* Unexpected UC hardware state */ + ipa_assert(); + } +} + +/** + * ipa3_uc_memcpy() - Perform a memcpy action using IPA uC + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len) +{ + int res; + struct ipa_mem_buffer mem; + struct IpaHwMemCopyData_t *cmd; + + IPADBG("dest 0x%pa src 0x%pa len %d\n", &dest, &src, len); + mem.size = sizeof(cmd); + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + cmd = (struct IpaHwMemCopyData_t *)mem.base; + memset(cmd, 0, sizeof(*cmd)); + cmd->destination_addr = dest; + cmd->dest_buffer_size = len; + cmd->source_addr = src; + cmd->source_buffer_size = len; + res = ipa3_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MEMCPY, 0, + true, 10 * HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + goto free_coherent; + } + + res = 0; +free_coherent: + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + return res; +} + +int ipa3_uc_send_remote_ipa_info(u32 remote_addr, uint32_t mbox_n) +{ + int res; + struct ipa_mem_buffer cmd; + struct IpaHwDbAddrInfo_t *uc_info; + + cmd.size = sizeof(*uc_info); + cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) + return -ENOMEM; + + uc_info = (struct IpaHwDbAddrInfo_t *) cmd.base; + uc_info->remoteIPAAddr = remote_addr; + uc_info->mboxN = mbox_n; + + res = ipa3_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO, 0, + false, 10 * HZ); + + if (res) { + IPAERR("fail to map 0x%x to mbox %d\n", + uc_info->remoteIPAAddr, + uc_info->mboxN); + goto free_coherent; + } + + res = 0; +free_coherent: + dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + return res; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c new file mode 100644 index 000000000000..639c2aa813d3 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c @@ -0,0 +1,956 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include "ipa_i.h" + +/* MHI uC interface definitions */ +#define IPA_HW_INTERFACE_MHI_VERSION 0x0004 + +#define IPA_HW_MAX_NUMBER_OF_CHANNELS 2 +#define IPA_HW_MAX_NUMBER_OF_EVENTRINGS 2 +#define IPA_HW_MAX_CHANNEL_HANDLE (IPA_HW_MAX_NUMBER_OF_CHANNELS-1) + +/** + * Values that represent the MHI commands from CPU to IPA HW. + * @IPA_CPU_2_HW_CMD_MHI_INIT: Initialize HW to be ready for MHI processing. + * Once operation was completed HW shall respond with + * IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED. + * @IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL: Initialize specific channel to be ready + * to serve MHI transfers. Once initialization was completed HW shall + * respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE. + * IPA_HW_MHI_CHANNEL_STATE_ENABLE + * @IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI: Update MHI MSI interrupts data. + * Once operation was completed HW shall respond with + * IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED. + * @IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE: Change specific channel + * processing state following host request. Once operation was completed + * HW shall respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE. + * @IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO: Info related to DL UL syncronization. + * @IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE: Cmd to stop event ring processing. + */ +enum ipa_cpu_2_hw_mhi_commands { + IPA_CPU_2_HW_CMD_MHI_INIT + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), + IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), + IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2), + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 3), + IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4), + IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5) +}; + +/** + * Values that represent MHI related HW responses to CPU commands. + * @IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE: Response to + * IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL or + * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE commands. + */ +enum ipa_hw_2_cpu_mhi_responses { + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), +}; + +/** + * Values that represent MHI related HW event to be sent to CPU. + * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR: Event specify the device detected an + * error in an element from the transfer ring associated with the channel + * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST: Event specify a transport + * interrupt was asserted when MHI engine is suspended + */ +enum ipa_hw_2_cpu_mhi_events { + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), +}; + +/** + * Channel error types. + * @IPA_HW_CHANNEL_ERROR_NONE: No error persists. + * @IPA_HW_CHANNEL_INVALID_RE_ERROR: Invalid Ring Element was detected + */ +enum ipa_hw_channel_errors { + IPA_HW_CHANNEL_ERROR_NONE, + IPA_HW_CHANNEL_INVALID_RE_ERROR +}; + +/** + * MHI error types. + * @IPA_HW_INVALID_MMIO_ERROR: Invalid data read from MMIO space + * @IPA_HW_INVALID_CHANNEL_ERROR: Invalid data read from channel context array + * @IPA_HW_INVALID_EVENT_ERROR: Invalid data read from event ring context array + * @IPA_HW_NO_ED_IN_RING_ERROR: No event descriptors are available to report on + * secondary event ring + * @IPA_HW_LINK_ERROR: Link error + */ +enum ipa_hw_mhi_errors { + IPA_HW_INVALID_MMIO_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), + IPA_HW_INVALID_CHANNEL_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), + IPA_HW_INVALID_EVENT_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2), + IPA_HW_NO_ED_IN_RING_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4), + IPA_HW_LINK_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5), +}; + + +/** + * Structure referring to the common and MHI section of 128B shared memory + * located in offset zero of SW Partition in IPA SRAM. + * The shared memory is used for communication between IPA HW and CPU. + * @common: common section in IPA SRAM + * @interfaceVersionMhi: The MHI interface version as reported by HW + * @mhiState: Overall MHI state + * @reserved_2B: reserved + * @mhiCnl0State: State of MHI channel 0. + * The state carries information regarding the error type. + * See IPA_HW_MHI_CHANNEL_STATES. + * @mhiCnl0State: State of MHI channel 1. + * @mhiCnl0State: State of MHI channel 2. + * @mhiCnl0State: State of MHI channel 3 + * @mhiCnl0State: State of MHI channel 4. + * @mhiCnl0State: State of MHI channel 5. + * @mhiCnl0State: State of MHI channel 6. + * @mhiCnl0State: State of MHI channel 7. + * @reserved_37_34: reserved + * @reserved_3B_38: reserved + * @reserved_3F_3C: reserved + */ +struct IpaHwSharedMemMhiMapping_t { + struct IpaHwSharedMemCommonMapping_t common; + u16 interfaceVersionMhi; + u8 mhiState; + u8 reserved_2B; + u8 mhiCnl0State; + u8 mhiCnl1State; + u8 mhiCnl2State; + u8 mhiCnl3State; + u8 mhiCnl4State; + u8 mhiCnl5State; + u8 mhiCnl6State; + u8 mhiCnl7State; + u32 reserved_37_34; + u32 reserved_3B_38; + u32 reserved_3F_3C; +}; + + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT command. + * Parameters are sent as pointer thus should be reside in address accessible + * to HW. + * @msiAddress: The MSI base (in device space) used for asserting the interrupt + * (MSI) associated with the event ring + * mmioBaseAddress: The address (in device space) of MMIO structure in + * host space + * deviceMhiCtrlBaseAddress: Base address of the memory region in the device + * address space where the MHI control data structures are allocated by + * the host, including channel context array, event context array, + * and rings. This value is used for host/device address translation. + * deviceMhiDataBaseAddress: Base address of the memory region in the device + * address space where the MHI data buffers are allocated by the host. + * This value is used for host/device address translation. + * firstChannelIndex: First channel ID. Doorbell 0 is mapped to this channel + * firstEventRingIndex: First event ring ID. Doorbell 16 is mapped to this + * event ring. + */ +struct IpaHwMhiInitCmdData_t { + u32 msiAddress; + u32 mmioBaseAddress; + u32 deviceMhiCtrlBaseAddress; + u32 deviceMhiDataBaseAddress; + u32 firstChannelIndex; + u32 firstEventRingIndex; +}; + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL + * command. Parameters are sent as 32b immediate parameters. + * @hannelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @contexArrayIndex: Unique index for channels, between 0 and 255. The index is + * used as an index in channel context array structures. + * @bamPipeId: The IPA pipe number for pipe dedicated for this channel + * @channelDirection: The direction of the channel as defined in the channel + * type field (CHTYPE) in the channel context data structure. + * @reserved: reserved. + */ +union IpaHwMhiInitChannelCmdData_t { + struct IpaHwMhiInitChannelCmdParams_t { + u32 channelHandle:8; + u32 contexArrayIndex:8; + u32 bamPipeId:6; + u32 channelDirection:2; + u32 reserved:8; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI command. + * @msiAddress_low: The MSI lower base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiMask: Mask indicating number of messages assigned by the host to device + * @msiData: Data Pattern to use when generating the MSI + */ +struct IpaHwMhiMsiCmdData_t { + u32 msiAddress_low; + u32 msiAddress_hi; + u32 msiMask; + u32 msiData; +}; + +/** + * Structure holding the parameters for + * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE command. + * Parameters are sent as 32b immediate parameters. + * @requestedState: The requested channel state as was indicated from Host. + * Use IPA_HW_MHI_CHANNEL_STATES to specify the requested state + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @LPTransitionRejected: Indication that low power state transition was + * rejected + * @reserved: reserved + */ +union IpaHwMhiChangeChannelStateCmdData_t { + struct IpaHwMhiChangeChannelStateCmdParams_t { + u32 requestedState:8; + u32 channelHandle:8; + u32 LPTransitionRejected:8; + u32 reserved:8; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE command. + * Parameters are sent as 32b immediate parameters. + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @reserved: reserved + */ +union IpaHwMhiStopEventUpdateData_t { + struct IpaHwMhiStopEventUpdateDataParams_t { + u32 channelHandle:8; + u32 reserved:24; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE response. + * Parameters are sent as 32b immediate parameters. + * @state: The new channel state. In case state is not as requested this is + * error indication for the last command + * @channelHandle: The channel identifier + * @additonalParams: For stop: the number of pending transport descriptors + * currently queued + */ +union IpaHwMhiChangeChannelStateResponseData_t { + struct IpaHwMhiChangeChannelStateResponseParams_t { + u32 state:8; + u32 channelHandle:8; + u32 additonalParams:16; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR event. + * Parameters are sent as 32b immediate parameters. + * @errorType: Type of error - IPA_HW_CHANNEL_ERRORS + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @reserved: reserved + */ +union IpaHwMhiChannelErrorEventData_t { + struct IpaHwMhiChannelErrorEventParams_t { + u32 errorType:8; + u32 channelHandle:8; + u32 reserved:16; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST event. + * Parameters are sent as 32b immediate parameters. + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @reserved: reserved + */ +union IpaHwMhiChannelWakeupEventData_t { + struct IpaHwMhiChannelWakeupEventParams_t { + u32 channelHandle:8; + u32 reserved:24; + } params; + u32 raw32b; +}; + +/** + * Structure holding the MHI Common statistics + * @numULDLSync: Number of times UL activity trigged due to DL activity + * @numULTimerExpired: Number of times UL Accm Timer expired + */ +struct IpaHwStatsMhiCmnInfoData_t { + u32 numULDLSync; + u32 numULTimerExpired; + u32 numChEvCtxWpRead; + u32 reserved; +}; + +/** + * Structure holding the MHI Channel statistics + * @doorbellInt: The number of doorbell int + * @reProccesed: The number of ring elements processed + * @bamFifoFull: Number of times Bam Fifo got full + * @bamFifoEmpty: Number of times Bam Fifo got empty + * @bamFifoUsageHigh: Number of times Bam fifo usage went above 75% + * @bamFifoUsageLow: Number of times Bam fifo usage went below 25% + * @bamInt: Number of BAM Interrupts + * @ringFull: Number of times Transfer Ring got full + * @ringEmpty: umber of times Transfer Ring got empty + * @ringUsageHigh: Number of times Transfer Ring usage went above 75% + * @ringUsageLow: Number of times Transfer Ring usage went below 25% + * @delayedMsi: Number of times device triggered MSI to host after + * Interrupt Moderation Timer expiry + * @immediateMsi: Number of times device triggered MSI to host immediately + * @thresholdMsi: Number of times device triggered MSI due to max pending + * events threshold reached + * @numSuspend: Number of times channel was suspended + * @numResume: Number of times channel was suspended + * @num_OOB: Number of times we indicated that we are OOB + * @num_OOB_timer_expiry: Number of times we indicated that we are OOB + * after timer expiry + * @num_OOB_moderation_timer_start: Number of times we started timer after + * sending OOB and hitting OOB again before we processed threshold + * number of packets + * @num_db_mode_evt: Number of times we indicated that we are in Doorbell mode + */ +struct IpaHwStatsMhiCnlInfoData_t { + u32 doorbellInt; + u32 reProccesed; + u32 bamFifoFull; + u32 bamFifoEmpty; + u32 bamFifoUsageHigh; + u32 bamFifoUsageLow; + u32 bamInt; + u32 ringFull; + u32 ringEmpty; + u32 ringUsageHigh; + u32 ringUsageLow; + u32 delayedMsi; + u32 immediateMsi; + u32 thresholdMsi; + u32 numSuspend; + u32 numResume; + u32 num_OOB; + u32 num_OOB_timer_expiry; + u32 num_OOB_moderation_timer_start; + u32 num_db_mode_evt; +}; + +/** + * Structure holding the MHI statistics + * @mhiCmnStats: Stats pertaining to MHI + * @mhiCnlStats: Stats pertaining to each channel + */ +struct IpaHwStatsMhiInfoData_t { + struct IpaHwStatsMhiCmnInfoData_t mhiCmnStats; + struct IpaHwStatsMhiCnlInfoData_t mhiCnlStats[ + IPA_HW_MAX_NUMBER_OF_CHANNELS]; +}; + +/** + * Structure holding the MHI Common Config info + * @isDlUlSyncEnabled: Flag to indicate if DL-UL synchronization is enabled + * @UlAccmVal: Out Channel(UL) accumulation time in ms when DL UL Sync is + * enabled + * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events + * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events + */ +struct IpaHwConfigMhiCmnInfoData_t { + u8 isDlUlSyncEnabled; + u8 UlAccmVal; + u8 ulMsiEventThreshold; + u8 dlMsiEventThreshold; +}; + +/** + * Structure holding the parameters for MSI info data + * @msiAddress_low: The MSI lower base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiMask: Mask indicating number of messages assigned by the host to device + * @msiData: Data Pattern to use when generating the MSI + */ +struct IpaHwConfigMhiMsiInfoData_t { + u32 msiAddress_low; + u32 msiAddress_hi; + u32 msiMask; + u32 msiData; +}; + +/** + * Structure holding the MHI Channel Config info + * @transferRingSize: The Transfer Ring size in terms of Ring Elements + * @transferRingIndex: The Transfer Ring channel number as defined by host + * @eventRingIndex: The Event Ring Index associated with this Transfer Ring + * @bamPipeIndex: The BAM Pipe associated with this channel + * @isOutChannel: Indication for the direction of channel + * @reserved_0: Reserved byte for maintaining 4byte alignment + * @reserved_1: Reserved byte for maintaining 4byte alignment + */ +struct IpaHwConfigMhiCnlInfoData_t { + u16 transferRingSize; + u8 transferRingIndex; + u8 eventRingIndex; + u8 bamPipeIndex; + u8 isOutChannel; + u8 reserved_0; + u8 reserved_1; +}; + +/** + * Structure holding the MHI Event Config info + * @msiVec: msi vector to invoke MSI interrupt + * @intmodtValue: Interrupt moderation timer (in milliseconds) + * @eventRingSize: The Event Ring size in terms of Ring Elements + * @eventRingIndex: The Event Ring number as defined by host + * @reserved_0: Reserved byte for maintaining 4byte alignment + * @reserved_1: Reserved byte for maintaining 4byte alignment + * @reserved_2: Reserved byte for maintaining 4byte alignment + */ +struct IpaHwConfigMhiEventInfoData_t { + u32 msiVec; + u16 intmodtValue; + u16 eventRingSize; + u8 eventRingIndex; + u8 reserved_0; + u8 reserved_1; + u8 reserved_2; +}; + +/** + * Structure holding the MHI Config info + * @mhiCmnCfg: Common Config pertaining to MHI + * @mhiMsiCfg: Config pertaining to MSI config + * @mhiCnlCfg: Config pertaining to each channel + * @mhiEvtCfg: Config pertaining to each event Ring + */ +struct IpaHwConfigMhiInfoData_t { + struct IpaHwConfigMhiCmnInfoData_t mhiCmnCfg; + struct IpaHwConfigMhiMsiInfoData_t mhiMsiCfg; + struct IpaHwConfigMhiCnlInfoData_t mhiCnlCfg[ + IPA_HW_MAX_NUMBER_OF_CHANNELS]; + struct IpaHwConfigMhiEventInfoData_t mhiEvtCfg[ + IPA_HW_MAX_NUMBER_OF_EVENTRINGS]; +}; + + +struct ipa3_uc_mhi_ctx { + u8 expected_responseOp; + u32 expected_responseParams; + void (*ready_cb)(void); + void (*wakeup_request_cb)(void); + u32 mhi_uc_stats_ofst; + struct IpaHwStatsMhiInfoData_t *mhi_uc_stats_mmio; +}; + +#define PRINT_COMMON_STATS(x) \ + (nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \ + #x "=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCmnStats.x)) + +#define PRINT_CHANNEL_STATS(ch, x) \ + (nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \ + #x "=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCnlStats[ch].x)) + +struct ipa3_uc_mhi_ctx *ipa3_uc_mhi_ctx; + +static int ipa3_uc_mhi_response_hdlr(struct IpaHwSharedMemCommonMapping_t + *uc_sram_mmio, u32 *uc_status) +{ + IPADBG("responseOp=%d\n", uc_sram_mmio->responseOp); + if (uc_sram_mmio->responseOp == ipa3_uc_mhi_ctx->expected_responseOp && + uc_sram_mmio->responseParams == + ipa3_uc_mhi_ctx->expected_responseParams) { + *uc_status = 0; + return 0; + } + return -EINVAL; +} + +static void ipa3_uc_mhi_event_hdlr(struct IpaHwSharedMemCommonMapping_t + *uc_sram_mmio) +{ + if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR) { + union IpaHwMhiChannelErrorEventData_t evt; + + IPAERR("Channel error\n"); + evt.raw32b = uc_sram_mmio->eventParams; + IPAERR("errorType=%d channelHandle=%d reserved=%d\n", + evt.params.errorType, evt.params.channelHandle, + evt.params.reserved); + } else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST) { + union IpaHwMhiChannelWakeupEventData_t evt; + + IPADBG("WakeUp channel request\n"); + evt.raw32b = uc_sram_mmio->eventParams; + IPADBG("channelHandle=%d reserved=%d\n", + evt.params.channelHandle, evt.params.reserved); + ipa3_uc_mhi_ctx->wakeup_request_cb(); + } +} + +static void ipa3_uc_mhi_event_log_info_hdlr( + struct IpaHwEventLogInfoData_t *uc_event_top_mmio) +{ + struct Ipa3HwEventInfoData_t *evt_info_ptr; + u32 size; + + if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_MHI)) == 0) { + IPAERR("MHI feature missing 0x%x\n", + uc_event_top_mmio->featureMask); + return; + } + + evt_info_ptr = &uc_event_top_mmio->statsInfo; + size = evt_info_ptr->featureInfo[IPA_HW_FEATURE_MHI].params.size; + if (size != sizeof(struct IpaHwStatsMhiInfoData_t)) { + IPAERR("mhi stats sz invalid exp=%zu is=%u\n", + sizeof(struct IpaHwStatsMhiInfoData_t), + size); + return; + } + + ipa3_uc_mhi_ctx->mhi_uc_stats_ofst = + evt_info_ptr->baseAddrOffset + + evt_info_ptr->featureInfo[IPA_HW_FEATURE_MHI].params.offset; + IPAERR("MHI stats ofst=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_ofst); + if (ipa3_uc_mhi_ctx->mhi_uc_stats_ofst + + sizeof(struct IpaHwStatsMhiInfoData_t) >= + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0) + + ipa3_ctx->smem_sz) { + IPAERR("uc_mhi_stats 0x%x outside SRAM\n", + ipa3_uc_mhi_ctx->mhi_uc_stats_ofst); + return; + } + + ipa3_uc_mhi_ctx->mhi_uc_stats_mmio = + ioremap(ipa3_ctx->ipa_wrapper_base + + ipa3_uc_mhi_ctx->mhi_uc_stats_ofst, + sizeof(struct IpaHwStatsMhiInfoData_t)); + if (!ipa3_uc_mhi_ctx->mhi_uc_stats_mmio) { + IPAERR("fail to ioremap uc mhi stats\n"); + return; + } +} + +int ipa3_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void)) +{ + struct ipa3_uc_hdlrs hdlrs; + + if (ipa3_uc_mhi_ctx) { + IPAERR("Already initialized\n"); + return -EFAULT; + } + + ipa3_uc_mhi_ctx = kzalloc(sizeof(*ipa3_uc_mhi_ctx), GFP_KERNEL); + if (!ipa3_uc_mhi_ctx) { + IPAERR("no mem\n"); + return -ENOMEM; + } + + ipa3_uc_mhi_ctx->ready_cb = ready_cb; + ipa3_uc_mhi_ctx->wakeup_request_cb = wakeup_request_cb; + + memset(&hdlrs, 0, sizeof(hdlrs)); + hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_mhi_ctx->ready_cb; + hdlrs.ipa3_uc_response_hdlr = ipa3_uc_mhi_response_hdlr; + hdlrs.ipa_uc_event_hdlr = ipa3_uc_mhi_event_hdlr; + hdlrs.ipa_uc_event_log_info_hdlr = ipa3_uc_mhi_event_log_info_hdlr; + ipa3_uc_register_handlers(IPA_HW_FEATURE_MHI, &hdlrs); + + IPADBG("Done\n"); + return 0; +} + +void ipa3_uc_mhi_cleanup(void) +{ + struct ipa3_uc_hdlrs null_hdlrs = { 0 }; + + IPADBG("Enter\n"); + + if (!ipa3_uc_mhi_ctx) { + IPAERR("ipa3_uc_mhi_ctx is not initialized\n"); + return; + } + ipa3_uc_register_handlers(IPA_HW_FEATURE_MHI, &null_hdlrs); + kfree(ipa3_uc_mhi_ctx); + ipa3_uc_mhi_ctx = NULL; + + IPADBG("Done\n"); +} + +int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr, + u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx, + u32 first_evt_idx) +{ + int res; + struct ipa_mem_buffer mem; + struct IpaHwMhiInitCmdData_t *init_cmd_data; + struct IpaHwMhiMsiCmdData_t *msi_cmd; + + if (!ipa3_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + res = ipa3_uc_update_hw_flags(0); + if (res) { + IPAERR("ipa3_uc_update_hw_flags failed %d\n", res); + goto disable_clks; + } + + mem.size = sizeof(*init_cmd_data); + mem.base = dma_zalloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + res = -ENOMEM; + goto disable_clks; + } + init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base; + init_cmd_data->msiAddress = msi->addr_low; + init_cmd_data->mmioBaseAddress = mmio_addr; + init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr; + init_cmd_data->deviceMhiDataBaseAddress = host_data_addr; + init_cmd_data->firstChannelIndex = first_ch_idx; + init_cmd_data->firstEventRingIndex = first_evt_idx; + res = ipa3_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0, + false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, + mem.phys_base); + goto disable_clks; + } + + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + + mem.size = sizeof(*msi_cmd); + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + res = -ENOMEM; + goto disable_clks; + } + + msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base; + msi_cmd->msiAddress_hi = msi->addr_hi; + msi_cmd->msiAddress_low = msi->addr_low; + msi_cmd->msiData = msi->data; + msi_cmd->msiMask = msi->mask; + res = ipa3_uc_send_cmd((u32)mem.phys_base, + IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, + mem.phys_base); + goto disable_clks; + } + + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; + +} + +int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle, + int contexArrayIndex, int channelDirection) + +{ + int res; + union IpaHwMhiInitChannelCmdData_t init_cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + + if (!ipa3_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + if (ipa_ep_idx < 0 || ipa_ep_idx >= ipa3_ctx->ipa_num_pipes) { + IPAERR("Invalid ipa_ep_idx.\n"); + return -EINVAL; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; + uc_rsp.params.channelHandle = channelHandle; + ipa3_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&init_cmd, 0, sizeof(init_cmd)); + init_cmd.params.channelHandle = channelHandle; + init_cmd.params.contexArrayIndex = contexArrayIndex; + init_cmd.params.bamPipeId = ipa_ep_idx; + init_cmd.params.channelDirection = channelDirection; + + res = ipa3_uc_send_cmd(init_cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL, 0, false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + + +int ipa3_uc_mhi_reset_channel(int channelHandle) +{ + union IpaHwMhiChangeChannelStateCmdData_t cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + int res; + + if (!ipa3_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE; + uc_rsp.params.channelHandle = channelHandle; + ipa3_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_DISABLE; + cmd.params.channelHandle = channelHandle; + res = ipa3_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa3_uc_mhi_suspend_channel(int channelHandle) +{ + union IpaHwMhiChangeChannelStateCmdData_t cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + int res; + + if (!ipa3_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND; + uc_rsp.params.channelHandle = channelHandle; + ipa3_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_SUSPEND; + cmd.params.channelHandle = channelHandle; + res = ipa3_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected) +{ + union IpaHwMhiChangeChannelStateCmdData_t cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + int res; + + if (!ipa3_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; + uc_rsp.params.channelHandle = channelHandle; + ipa3_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_RUN; + cmd.params.channelHandle = channelHandle; + cmd.params.LPTransitionRejected = LPTransitionRejected; + res = ipa3_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa3_uc_mhi_stop_event_update_channel(int channelHandle) +{ + union IpaHwMhiStopEventUpdateData_t cmd; + int res; + + if (!ipa3_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.channelHandle = channelHandle; + + ipa3_uc_mhi_ctx->expected_responseOp = + IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE; + ipa3_uc_mhi_ctx->expected_responseParams = cmd.raw32b; + + res = ipa3_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE, 0, false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd) +{ + int res; + + if (!ipa3_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n", + cmd->params.isDlUlSyncEnabled, cmd->params.UlAccmVal); + IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n", + cmd->params.ulMsiEventThreshold, + cmd->params.dlMsiEventThreshold); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + res = ipa3_uc_send_cmd(cmd->raw32b, + IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa3_uc_mhi_print_stats(char *dbg_buff, int size) +{ + int nBytes = 0; + int i; + + if (!ipa3_uc_mhi_ctx->mhi_uc_stats_mmio) { + IPAERR("MHI uc stats is not valid\n"); + return 0; + } + + nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, + "Common Stats:\n"); + PRINT_COMMON_STATS(numULDLSync); + PRINT_COMMON_STATS(numULTimerExpired); + PRINT_COMMON_STATS(numChEvCtxWpRead); + + for (i = 0; i < IPA_HW_MAX_NUMBER_OF_CHANNELS; i++) { + nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, + "Channel %d Stats:\n", i); + PRINT_CHANNEL_STATS(i, doorbellInt); + PRINT_CHANNEL_STATS(i, reProccesed); + PRINT_CHANNEL_STATS(i, bamFifoFull); + PRINT_CHANNEL_STATS(i, bamFifoEmpty); + PRINT_CHANNEL_STATS(i, bamFifoUsageHigh); + PRINT_CHANNEL_STATS(i, bamFifoUsageLow); + PRINT_CHANNEL_STATS(i, bamInt); + PRINT_CHANNEL_STATS(i, ringFull); + PRINT_CHANNEL_STATS(i, ringEmpty); + PRINT_CHANNEL_STATS(i, ringUsageHigh); + PRINT_CHANNEL_STATS(i, ringUsageLow); + PRINT_CHANNEL_STATS(i, delayedMsi); + PRINT_CHANNEL_STATS(i, immediateMsi); + PRINT_CHANNEL_STATS(i, thresholdMsi); + PRINT_CHANNEL_STATS(i, numSuspend); + PRINT_CHANNEL_STATS(i, numResume); + PRINT_CHANNEL_STATS(i, num_OOB); + PRINT_CHANNEL_STATS(i, num_OOB_timer_expiry); + PRINT_CHANNEL_STATS(i, num_OOB_moderation_timer_start); + PRINT_CHANNEL_STATS(i, num_db_mode_evt); + } + + return nBytes; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c new file mode 100644 index 000000000000..348df1940b4c --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c @@ -0,0 +1,625 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#include "ipa_i.h" + +#define IPA_UC_NTN_DB_PA_TX 0x79620DC +#define IPA_UC_NTN_DB_PA_RX 0x79620D8 + +static void ipa3_uc_ntn_event_handler(struct IpaHwSharedMemCommonMapping_t + *uc_sram_mmio) + +{ + union Ipa3HwNTNErrorEventData_t ntn_evt; + + if (uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_NTN_ERROR) { + ntn_evt.raw32b = uc_sram_mmio->eventParams; + IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n", + ntn_evt.params.ntn_error_type, + ntn_evt.params.ipa_pipe_number, + ntn_evt.params.ntn_ch_err_type); + } +} + +static void ipa3_uc_ntn_event_log_info_handler( +struct IpaHwEventLogInfoData_t *uc_event_top_mmio) +{ + struct Ipa3HwEventInfoData_t *statsPtr = &uc_event_top_mmio->statsInfo; + + if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) { + IPAERR("NTN feature missing 0x%x\n", + uc_event_top_mmio->featureMask); + return; + } + + if (statsPtr->featureInfo[IPA_HW_FEATURE_NTN].params.size != + sizeof(struct Ipa3HwStatsNTNInfoData_t)) { + IPAERR("NTN stats sz invalid exp=%zu is=%u\n", + sizeof(struct Ipa3HwStatsNTNInfoData_t), + statsPtr->featureInfo[IPA_HW_FEATURE_NTN].params.size); + return; + } + + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = + uc_event_top_mmio->statsInfo.baseAddrOffset + + statsPtr->featureInfo[IPA_HW_FEATURE_NTN].params.offset; + IPAERR("NTN stats ofst=0x%x\n", ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst); + if (ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst + + sizeof(struct Ipa3HwStatsNTNInfoData_t) >= + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0) + + ipa3_ctx->smem_sz) { + IPAERR("uc_ntn_stats 0x%x outside SRAM\n", + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst); + return; + } + + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio = + ioremap(ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst, + sizeof(struct Ipa3HwStatsNTNInfoData_t)); + if (!ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) { + IPAERR("fail to ioremap uc ntn stats\n"); + return; + } +} + +/** + * ipa2_get_wdi_stats() - Query WDI statistics from uc + * @stats: [inout] stats blob from client populated by driver + * + * Returns: 0 on success, negative on failure + * + * @note Cannot be called from atomic context + * + */ +int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats) +{ +#define TX_STATS(y) stats->tx_ch_stats[0].y = \ + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y +#define RX_STATS(y) stats->rx_ch_stats[0].y = \ + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y + + if (unlikely(!ipa3_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (!stats || !ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) { + IPAERR("bad parms stats=%pK ntn_stats=%pK\n", + stats, + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio); + return -EINVAL; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + TX_STATS(num_pkts_processed); + TX_STATS(ring_stats.ringFull); + TX_STATS(ring_stats.ringEmpty); + TX_STATS(ring_stats.ringUsageHigh); + TX_STATS(ring_stats.ringUsageLow); + TX_STATS(ring_stats.RingUtilCount); + TX_STATS(gsi_stats.bamFifoFull); + TX_STATS(gsi_stats.bamFifoEmpty); + TX_STATS(gsi_stats.bamFifoUsageHigh); + TX_STATS(gsi_stats.bamFifoUsageLow); + TX_STATS(gsi_stats.bamUtilCount); + TX_STATS(num_db); + TX_STATS(num_qmb_int_handled); + TX_STATS(ipa_pipe_number); + + RX_STATS(num_pkts_processed); + RX_STATS(ring_stats.ringFull); + RX_STATS(ring_stats.ringEmpty); + RX_STATS(ring_stats.ringUsageHigh); + RX_STATS(ring_stats.ringUsageLow); + RX_STATS(ring_stats.RingUtilCount); + RX_STATS(gsi_stats.bamFifoFull); + RX_STATS(gsi_stats.bamFifoEmpty); + RX_STATS(gsi_stats.bamFifoUsageHigh); + RX_STATS(gsi_stats.bamFifoUsageLow); + RX_STATS(gsi_stats.bamUtilCount); + RX_STATS(num_db); + RX_STATS(num_qmb_int_handled); + RX_STATS(ipa_pipe_number); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + + +int ipa3_ntn_uc_reg_rdyCB(void (*ipa_ready_cb)(void *), void *user_data) +{ + int ret; + + if (!ipa3_ctx) { + IPAERR("IPA ctx is null\n"); + return -ENXIO; + } + + ret = ipa3_uc_state_check(); + if (ret) { + ipa3_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb; + ipa3_ctx->uc_ntn_ctx.priv = user_data; + return 0; + } + + return -EEXIST; +} + +void ipa3_ntn_uc_dereg_rdyCB(void) +{ + ipa3_ctx->uc_ntn_ctx.uc_ready_cb = NULL; + ipa3_ctx->uc_ntn_ctx.priv = NULL; +} + +static void ipa3_uc_ntn_loaded_handler(void) +{ + if (!ipa3_ctx) { + IPAERR("IPA ctx is null\n"); + return; + } + + if (ipa3_ctx->uc_ntn_ctx.uc_ready_cb) { + ipa3_ctx->uc_ntn_ctx.uc_ready_cb( + ipa3_ctx->uc_ntn_ctx.priv); + + ipa3_ctx->uc_ntn_ctx.uc_ready_cb = + NULL; + ipa3_ctx->uc_ntn_ctx.priv = NULL; + } +} + +int ipa3_ntn_init(void) +{ + struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 }; + + uc_ntn_cbs.ipa_uc_event_hdlr = ipa3_uc_ntn_event_handler; + uc_ntn_cbs.ipa_uc_event_log_info_hdlr = + ipa3_uc_ntn_event_log_info_handler; + uc_ntn_cbs.ipa_uc_loaded_hdlr = + ipa3_uc_ntn_loaded_handler; + + ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs); + + return 0; +} + +static int ipa3_uc_send_ntn_setup_pipe_cmd( + struct ipa_ntn_setup_info *ntn_info, u8 dir) +{ + int ipa_ep_idx; + int result = 0; + struct ipa_mem_buffer cmd; + struct Ipa3HwNtnSetUpCmdData_t *Ntn_params; + struct IpaHwOffloadSetUpCmdData_t *cmd_data; + struct IpaHwOffloadSetUpCmdData_t_v4_0 *cmd_data_v4_0; + + if (ntn_info == NULL) { + IPAERR("invalid input\n"); + return -EINVAL; + } + + ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client); + if (ipa_ep_idx == -1) { + IPAERR("fail to get ep idx.\n"); + return -EFAULT; + } + + IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx); + + IPADBG("ring_base_pa = 0x%pa\n", + &ntn_info->ring_base_pa); + IPADBG("ring_base_iova = 0x%pa\n", + &ntn_info->ring_base_iova); + IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size); + IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa); + IPADBG("buff_pool_base_iova = 0x%pa\n", &ntn_info->buff_pool_base_iova); + IPADBG("num_buffers = %d\n", ntn_info->num_buffers); + IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size); + IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + cmd.size = sizeof(*cmd_data_v4_0); + else + cmd.size = sizeof(*cmd_data); + cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -ENOMEM; + } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + cmd_data_v4_0 = (struct IpaHwOffloadSetUpCmdData_t_v4_0 *) + cmd.base; + cmd_data_v4_0->protocol = IPA_HW_FEATURE_NTN; + Ntn_params = &cmd_data_v4_0->SetupCh_params.NtnSetupCh_params; + } else { + cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base; + cmd_data->protocol = IPA_HW_FEATURE_NTN; + Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params; + } + + if (ntn_info->smmu_enabled) { + Ntn_params->ring_base_pa = (u32)ntn_info->ring_base_iova; + Ntn_params->buff_pool_base_pa = + (u32)ntn_info->buff_pool_base_iova; + } else { + Ntn_params->ring_base_pa = ntn_info->ring_base_pa; + Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa; + } + + Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size; + Ntn_params->num_buffers = ntn_info->num_buffers; + Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa; + Ntn_params->data_buff_size = ntn_info->data_buff_size; + Ntn_params->ipa_pipe_number = ipa_ep_idx; + Ntn_params->dir = dir; + + result = ipa3_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) + result = -EFAULT; + + dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + return result; +} + +static int ipa3_smmu_map_uc_ntn_pipes(struct ipa_ntn_setup_info *params, + bool map) +{ + struct iommu_domain *smmu_domain; + int result; + int i; + u64 iova; + phys_addr_t pa; + u64 iova_p; + phys_addr_t pa_p; + u32 size_p; + + if (params->data_buff_size > PAGE_SIZE) { + IPAERR("invalid data buff size\n"); + return -EINVAL; + } + + result = ipa3_smmu_map_peer_reg(rounddown(params->ntn_reg_base_ptr_pa, + PAGE_SIZE), map, IPA_SMMU_CB_UC); + if (result) { + IPAERR("failed to %s uC regs %d\n", + map ? "map" : "unmap", result); + goto fail; + } + + if (params->smmu_enabled) { + IPADBG("smmu is enabled on EMAC\n"); + result = ipa3_smmu_map_peer_buff((u64)params->ring_base_iova, + params->ntn_ring_size, map, params->ring_base_sgt, + IPA_SMMU_CB_UC); + if (result) { + IPAERR("failed to %s ntn ring %d\n", + map ? "map" : "unmap", result); + goto fail_map_ring; + } + result = ipa3_smmu_map_peer_buff( + (u64)params->buff_pool_base_iova, + params->num_buffers * 4, map, + params->buff_pool_base_sgt, IPA_SMMU_CB_UC); + if (result) { + IPAERR("failed to %s pool buffs %d\n", + map ? "map" : "unmap", result); + goto fail_map_buffer_smmu_enabled; + } + } else { + IPADBG("smmu is disabled on EMAC\n"); + result = ipa3_smmu_map_peer_buff((u64)params->ring_base_pa, + params->ntn_ring_size, map, NULL, IPA_SMMU_CB_UC); + if (result) { + IPAERR("failed to %s ntn ring %d\n", + map ? "map" : "unmap", result); + goto fail_map_ring; + } + result = ipa3_smmu_map_peer_buff(params->buff_pool_base_pa, + params->num_buffers * 4, map, NULL, IPA_SMMU_CB_UC); + if (result) { + IPAERR("failed to %s pool buffs %d\n", + map ? "map" : "unmap", result); + goto fail_map_buffer_smmu_disabled; + } + } + + if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) { + IPADBG("AP SMMU is set to s1 bypass\n"); + return 0; + } + + smmu_domain = ipa3_get_smmu_domain(); + if (!smmu_domain) { + IPAERR("invalid smmu domain\n"); + return -EINVAL; + } + + for (i = 0; i < params->num_buffers; i++) { + iova = (u64)params->data_buff_list[i].iova; + pa = (phys_addr_t)params->data_buff_list[i].pa; + IPA_SMMU_ROUND_TO_PAGE(iova, pa, params->data_buff_size, iova_p, + pa_p, size_p); + IPADBG("%s 0x%llx to 0x%pa size %d\n", map ? "mapping" : + "unmapping", iova_p, &pa_p, size_p); + if (map) { + result = ipa3_iommu_map(smmu_domain, iova_p, pa_p, + size_p, IOMMU_READ | IOMMU_WRITE); + if (result) + IPAERR("Fail to map 0x%llx\n", iova); + } else { + result = iommu_unmap(smmu_domain, iova_p, size_p); + if (result != params->data_buff_size) + IPAERR("Fail to unmap 0x%llx\n", iova); + } + if (result) { + if (params->smmu_enabled) + goto fail_map_data_buff_smmu_enabled; + else + goto fail_map_data_buff_smmu_disabled; + } + } + return 0; + +fail_map_data_buff_smmu_enabled: + ipa3_smmu_map_peer_buff((u64)params->buff_pool_base_iova, + params->num_buffers * 4, !map, NULL, IPA_SMMU_CB_UC); + goto fail_map_buffer_smmu_enabled; +fail_map_data_buff_smmu_disabled: + ipa3_smmu_map_peer_buff(params->buff_pool_base_pa, + params->num_buffers * 4, !map, NULL, IPA_SMMU_CB_UC); + goto fail_map_buffer_smmu_disabled; +fail_map_buffer_smmu_enabled: + ipa3_smmu_map_peer_buff((u64)params->ring_base_iova, + params->ntn_ring_size, !map, params->ring_base_sgt, + IPA_SMMU_CB_UC); + goto fail_map_ring; +fail_map_buffer_smmu_disabled: + ipa3_smmu_map_peer_buff((u64)params->ring_base_pa, + params->ntn_ring_size, !map, NULL, IPA_SMMU_CB_UC); +fail_map_ring: + ipa3_smmu_map_peer_reg(rounddown(params->ntn_reg_base_ptr_pa, + PAGE_SIZE), !map, IPA_SMMU_CB_UC); +fail: + return result; +} + +/** + * ipa3_setup_uc_ntn_pipes() - setup uc offload pipes + */ +int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp) +{ + struct ipa3_ep_context *ep_ul; + struct ipa3_ep_context *ep_dl; + int ipa_ep_idx_ul; + int ipa_ep_idx_dl; + int result = 0; + + if (in == NULL) { + IPAERR("invalid input\n"); + return -EINVAL; + } + + ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client); + ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client); + if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) { + IPAERR("fail to alloc EP.\n"); + return -EFAULT; + } + + ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul]; + ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl]; + + if (ep_ul->valid || ep_dl->valid) { + IPAERR("EP already allocated ul:%d dl:%d\n", + ep_ul->valid, ep_dl->valid); + return -EFAULT; + } + + memset(ep_ul, 0, offsetof(struct ipa3_ep_context, sys)); + memset(ep_dl, 0, offsetof(struct ipa3_ep_context, sys)); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + /* setup ul ep cfg */ + ep_ul->valid = 1; + ep_ul->client = in->ul.client; + ep_ul->client_notify = notify; + ep_ul->priv = priv; + + memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg)); + ep_ul->cfg.nat.nat_en = IPA_SRC_NAT; + ep_ul->cfg.hdr.hdr_len = hdr_len; + ep_ul->cfg.mode.mode = IPA_BASIC; + + if (ipa3_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) { + IPAERR("fail to setup ul pipe cfg\n"); + result = -EFAULT; + goto fail; + } + + result = ipa3_smmu_map_uc_ntn_pipes(&in->ul, true); + if (result) { + IPAERR("failed to map SMMU for UL %d\n", result); + goto fail; + } + + if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) { + IPAERR("fail to send cmd to uc for ul pipe\n"); + result = -EFAULT; + goto fail_smmu_map_ul; + } + ipa3_install_dflt_flt_rules(ipa_ep_idx_ul); + outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX; + ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED; + IPADBG("client %d (ep: %d) connected\n", in->ul.client, + ipa_ep_idx_ul); + + /* setup dl ep cfg */ + ep_dl->valid = 1; + ep_dl->client = in->dl.client; + memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg)); + ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT; + ep_dl->cfg.hdr.hdr_len = hdr_len; + ep_dl->cfg.mode.mode = IPA_BASIC; + + if (ipa3_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) { + IPAERR("fail to setup dl pipe cfg\n"); + result = -EFAULT; + goto fail_smmu_map_ul; + } + + result = ipa3_smmu_map_uc_ntn_pipes(&in->dl, true); + if (result) { + IPAERR("failed to map SMMU for DL %d\n", result); + goto fail_smmu_map_ul; + } + + if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) { + IPAERR("fail to send cmd to uc for dl pipe\n"); + result = -EFAULT; + goto fail_smmu_map_dl; + } + outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX; + ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED; + + result = ipa3_enable_data_path(ipa_ep_idx_dl); + if (result) { + IPAERR("Enable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx_dl); + result = -EFAULT; + goto fail_smmu_map_dl; + } + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPADBG("client %d (ep: %d) connected\n", in->dl.client, + ipa_ep_idx_dl); + + return 0; + +fail_smmu_map_dl: + ipa3_smmu_map_uc_ntn_pipes(&in->dl, false); +fail_smmu_map_ul: + ipa3_smmu_map_uc_ntn_pipes(&in->ul, false); +fail: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +/** + * ipa3_tear_down_uc_offload_pipes() - tear down uc offload pipes + */ + +int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, + int ipa_ep_idx_dl, struct ipa_ntn_conn_in_params *params) +{ + struct ipa_mem_buffer cmd; + struct ipa3_ep_context *ep_ul, *ep_dl; + struct IpaHwOffloadCommonChCmdData_t *cmd_data; + struct IpaHwOffloadCommonChCmdData_t_v4_0 *cmd_data_v4_0; + union Ipa3HwNtnCommonChCmdData_t *tear; + int result = 0; + + IPADBG("ep_ul = %d\n", ipa_ep_idx_ul); + IPADBG("ep_dl = %d\n", ipa_ep_idx_dl); + + ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul]; + ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl]; + + if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED || + ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) { + IPAERR("channel bad state: ul %d dl %d\n", + ep_ul->uc_offload_state, ep_dl->uc_offload_state); + return -EFAULT; + } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + cmd.size = sizeof(*cmd_data_v4_0); + else + cmd.size = sizeof(*cmd_data); + cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -ENOMEM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + cmd_data_v4_0 = (struct IpaHwOffloadCommonChCmdData_t_v4_0 *) + cmd.base; + cmd_data_v4_0->protocol = IPA_HW_FEATURE_NTN; + tear = &cmd_data_v4_0->CommonCh_params.NtnCommonCh_params; + } else { + cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base; + cmd_data->protocol = IPA_HW_FEATURE_NTN; + tear = &cmd_data->CommonCh_params.NtnCommonCh_params; + } + + /* teardown the DL pipe */ + ipa3_disable_data_path(ipa_ep_idx_dl); + /* + * Reset ep before sending cmd otherwise disconnect + * during data transfer will result into + * enormous suspend interrupts + */ + memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context)); + IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl); + tear->params.ipa_pipe_number = ipa_ep_idx_dl; + result = ipa3_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) { + IPAERR("fail to tear down dl pipe\n"); + result = -EFAULT; + goto fail; + } + + /* unmap the DL pipe */ + result = ipa3_smmu_map_uc_ntn_pipes(¶ms->dl, false); + if (result) { + IPAERR("failed to unmap SMMU for DL %d\n", result); + goto fail; + } + + /* teardown the UL pipe */ + tear->params.ipa_pipe_number = ipa_ep_idx_ul; + result = ipa3_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) { + IPAERR("fail to tear down ul pipe\n"); + result = -EFAULT; + goto fail; + } + + /* unmap the UL pipe */ + result = ipa3_smmu_map_uc_ntn_pipes(¶ms->ul, false); + if (result) { + IPAERR("failed to unmap SMMU for UL %d\n", result); + goto fail; + } + + ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul); + memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context)); + IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul); + +fail: + dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h new file mode 100644 index 000000000000..7f32952cd98a --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h @@ -0,0 +1,625 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_UC_OFFLOAD_I_H_ +#define _IPA_UC_OFFLOAD_I_H_ + +#include +#include "ipa_i.h" + +/* + * Neutrino protocol related data structures + */ + +#define IPA_UC_MAX_NTN_TX_CHANNELS 1 +#define IPA_UC_MAX_NTN_RX_CHANNELS 1 + +#define IPA_NTN_TX_DIR 1 +#define IPA_NTN_RX_DIR 2 + +#define IPA_WDI3_TX_DIR 1 +#define IPA_WDI3_RX_DIR 2 + +/** + * @brief Enum value determined based on the feature it + * corresponds to + * +----------------+----------------+ + * | 3 bits | 5 bits | + * +----------------+----------------+ + * | HW_FEATURE | OPCODE | + * +----------------+----------------+ + * + */ +#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode) +#define EXTRACT_UC_FEATURE(value) (value >> 5) + +#define IPA_HW_NUM_FEATURES 0x8 + +/** + * enum ipa3_hw_features - Values that represent the features supported + * in IPA HW + * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW + * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW + * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse + * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW + * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW + * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW + * @IPA_HW_FEATURE_WDI3 : Feature related to WDI operation in IPA HW + */ +enum ipa3_hw_features { + IPA_HW_FEATURE_COMMON = 0x0, + IPA_HW_FEATURE_MHI = 0x1, + IPA_HW_FEATURE_POWER_COLLAPSE = 0x2, + IPA_HW_FEATURE_WDI = 0x3, + IPA_HW_FEATURE_ZIP = 0x4, + IPA_HW_FEATURE_NTN = 0x5, + IPA_HW_FEATURE_OFFLOAD = 0x6, + IPA_HW_FEATURE_WDI3 = 0x7, + IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES +}; + +/** + * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU. + * @IPA_HW_2_CPU_EVENT_NO_OP : No event present + * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the + * device + * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information + */ +enum ipa3_hw_2_cpu_events { + IPA_HW_2_CPU_EVENT_NO_OP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0), + IPA_HW_2_CPU_EVENT_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_HW_2_CPU_EVENT_LOG_INFO = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), +}; + +/** + * enum ipa3_hw_errors - Common error types. + * @IPA_HW_ERROR_NONE : No error persists + * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell + * @IPA_HW_DMA_ERROR : Unexpected DMA error + * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset. + * @IPA_HW_INVALID_OPCODE : Invalid opcode sent + * @IPA_HW_INVALID_PARAMS : Invalid params for the requested command + * @IPA_HW_GSI_CH_NOT_EMPTY_FAILURE : GSI channel emptiness validation failed + * @IPA_HW_CONS_STOP_FAILURE : NTN/ETH CONS stop failed + * @IPA_HW_PROD_STOP_FAILURE : NTN/ETH PROD stop failed + */ +enum ipa3_hw_errors { + IPA_HW_ERROR_NONE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0), + IPA_HW_INVALID_DOORBELL_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_HW_DMA_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), + IPA_HW_FATAL_SYSTEM_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3), + IPA_HW_INVALID_OPCODE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4), + IPA_HW_INVALID_PARAMS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5), + IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6), + IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7), + IPA_HW_GSI_CH_NOT_EMPTY_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8), + IPA_HW_CONS_STOP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9), + IPA_HW_PROD_STOP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10) +}; + +/** + * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common + * section in 128B shared memory located in offset zero of SW Partition in IPA + * SRAM. + * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS + * @cmdParams : CPU->HW command parameter lower 32bit. + * @cmdParams_hi : CPU->HW command parameter higher 32bit. + * of parameters (immediate parameters) and point on structure in system memory + * (in such case the address must be accessible for HW) + * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES + * @responseParams : HW->CPU response parameter. The parameter filed can hold 32 + * bits of parameters (immediate parameters) and point on structure in system + * memory + * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS + * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 + * bits of parameters (immediate parameters) and point on + * structure in system memory + * @firstErrorAddress : Contains the address of first error-source on SNOC + * @hwState : State of HW. The state carries information regarding the + * error type. + * @warningCounter : The warnings counter. The counter carries information + * regarding non fatal errors in HW + * @interfaceVersionCommon : The Common interface version as reported by HW + * + * The shared memory is used for communication between IPA HW and CPU. + */ +struct IpaHwSharedMemCommonMapping_t { + u8 cmdOp; + u8 reserved_01; + u16 reserved_03_02; + u32 cmdParams; + u32 cmdParams_hi; + u8 responseOp; + u8 reserved_0D; + u16 reserved_0F_0E; + u32 responseParams; + u8 eventOp; + u8 reserved_15; + u16 reserved_17_16; + u32 eventParams; + u32 firstErrorAddress; + u8 hwState; + u8 warningCounter; + u16 reserved_23_22; + u16 interfaceVersionCommon; + u16 reserved_27_26; +} __packed; + +/** + * union Ipa3HwFeatureInfoData_t - parameters for stats/config blob + * + * @offset : Location of a feature within the EventInfoData + * @size : Size of the feature + */ +union Ipa3HwFeatureInfoData_t { + struct IpaHwFeatureInfoParams_t { + u32 offset:16; + u32 size:16; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwErrorEventData_t - HW->CPU Common Events + * @errorType : Entered when a system error is detected by the HW. Type of + * error is specified by IPA_HW_ERRORS + * @reserved : Reserved + */ +union IpaHwErrorEventData_t { + struct IpaHwErrorEventParams_t { + u32 errorType:8; + u32 reserved:24; + } __packed params; + u32 raw32b; +} __packed; + +/** + * struct Ipa3HwEventInfoData_t - Structure holding the parameters for + * statistics and config info + * + * @baseAddrOffset : Base Address Offset of the statistics or config + * structure from IPA_WRAPPER_BASE + * @Ipa3HwFeatureInfoData_t : Location and size of each feature within + * the statistics or config structure + * + * @note Information about each feature in the featureInfo[] + * array is populated at predefined indices per the IPA_HW_FEATURES + * enum definition + */ +struct Ipa3HwEventInfoData_t { + u32 baseAddrOffset; + union Ipa3HwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES]; +} __packed; + +/** + * struct IpaHwEventLogInfoData_t - Structure holding the parameters for + * IPA_HW_2_CPU_EVENT_LOG_INFO Event + * + * @featureMask : Mask indicating the features enabled in HW. + * Refer IPA_HW_FEATURE_MASK + * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event + * Log Buffer structure + * @statsInfo : Statistics related information + * @configInfo : Configuration related information + * + * @note The offset location of this structure from IPA_WRAPPER_BASE + * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO + * Event + */ +struct IpaHwEventLogInfoData_t { + u32 featureMask; + u32 circBuffBaseAddrOffset; + struct Ipa3HwEventInfoData_t statsInfo; + struct Ipa3HwEventInfoData_t configInfo; + +} __packed; + +/** + * struct ipa3_uc_ntn_ctx + * @ntn_uc_stats_ofst: Neutrino stats offset + * @ntn_uc_stats_mmio: Neutrino stats + * @priv: private data of client + * @uc_ready_cb: uc Ready cb + */ +struct ipa3_uc_ntn_ctx { + u32 ntn_uc_stats_ofst; + struct Ipa3HwStatsNTNInfoData_t *ntn_uc_stats_mmio; + void *priv; + ipa_uc_ready_cb uc_ready_cb; +}; + +/** + * enum ipa3_hw_2_cpu_ntn_events - Values that represent HW event + * to be sent to CPU + * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW + * detected an error in NTN + * + */ +enum ipa3_hw_2_cpu_ntn_events { + IPA_HW_2_CPU_EVENT_NTN_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0), +}; + + +/** + * enum ipa3_hw_ntn_errors - NTN specific error types. + * @IPA_HW_NTN_ERROR_NONE : No error persists + * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel + */ +enum ipa3_hw_ntn_errors { + IPA_HW_NTN_ERROR_NONE = 0, + IPA_HW_NTN_CHANNEL_ERROR = 1 +}; + +/** + * enum ipa3_hw_ntn_channel_states - Values that represent NTN + * channel state machine. + * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is + * initialized but disabled + * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running. + * Entered after SET_UP_COMMAND is processed successfully + * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state + * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not + * be in use in operational scenario + * + * These states apply to both Tx and Rx paths. These do not reflect the + * sub-state the state machine may be in. + */ +enum ipa3_hw_ntn_channel_states { + IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1, + IPA_HW_NTN_CHANNEL_STATE_RUNNING = 2, + IPA_HW_NTN_CHANNEL_STATE_ERROR = 3, + IPA_HW_NTN_CHANNEL_STATE_INVALID = 0xFF +}; + +/** + * enum ipa3_hw_ntn_channel_errors - List of NTN Channel error + * types. This is present in the event param + * @IPA_HW_NTN_CH_ERR_NONE: No error persists + * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine + * transition + * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating + * num RE to bring + * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update + * failed in Rx ring + * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine + * transition + * @IPA_HW_NTN_RX_CACHE_NON_EMPTY: + * @IPA_HW_NTN_CH_ERR_RESERVED: + * + * These states apply to both Tx and Rx paths. These do not + * reflect the sub-state the state machine may be in. + */ +enum ipa3_hw_ntn_channel_errors { + IPA_HW_NTN_CH_ERR_NONE = 0, + IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1, + IPA_HW_NTN_TX_FSM_ERROR = 2, + IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL = 3, + IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4, + IPA_HW_NTN_RX_FSM_ERROR = 5, + IPA_HW_NTN_RX_CACHE_NON_EMPTY = 6, + IPA_HW_NTN_CH_ERR_RESERVED = 0xFF +}; + + +/** + * struct Ipa3HwNtnSetUpCmdData_t - Ntn setup command data + * @ring_base_pa: physical address of the base of the Tx/Rx NTN + * ring + * @buff_pool_base_pa: physical address of the base of the Tx/Rx + * buffer pool + * @ntn_ring_size: size of the Tx/Rx NTN ring + * @num_buffers: Rx/tx buffer pool size + * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN + * Ring's tail pointer + * @ipa_pipe_number: IPA pipe number that has to be used for the + * Tx/Rx path + * @dir: Tx/Rx Direction + * @data_buff_size: size of the each data buffer allocated in + * DDR + */ +struct Ipa3HwNtnSetUpCmdData_t { + u32 ring_base_pa; + u32 buff_pool_base_pa; + u16 ntn_ring_size; + u16 num_buffers; + u32 ntn_reg_base_ptr_pa; + u8 ipa_pipe_number; + u8 dir; + u16 data_buff_size; + +} __packed; + +struct IpaHwWdi3SetUpCmdData_t { + u32 transfer_ring_base_pa; + u32 transfer_ring_base_pa_hi; + + u32 transfer_ring_size; + + u32 transfer_ring_doorbell_pa; + u32 transfer_ring_doorbell_pa_hi; + + u32 event_ring_base_pa; + u32 event_ring_base_pa_hi; + + u32 event_ring_size; + + u32 event_ring_doorbell_pa; + u32 event_ring_doorbell_pa_hi; + + u16 num_pkt_buffers; + u8 ipa_pipe_number; + u8 dir; + + u16 pkt_offset; + u16 reserved0; + + u32 desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE]; +} __packed; + +/** + * struct Ipa3HwNtnCommonChCmdData_t - Structure holding the + * parameters for Ntn Tear down command data params + * + *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe + */ +union Ipa3HwNtnCommonChCmdData_t { + struct IpaHwNtnCommonChCmdParams_t { + u32 ipa_pipe_number :8; + u32 reserved :24; + } __packed params; + uint32_t raw32b; +} __packed; + +union IpaHwWdi3CommonChCmdData_t { + struct IpaHwWdi3CommonChCmdParams_t { + u32 ipa_pipe_number :8; + u32 reserved :24; + } __packed params; + u32 raw32b; +} __packed; + +/** + * struct Ipa3HwNTNErrorEventData_t - Structure holding the + * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed + * as immediate params in the shared memory + * + *@ntn_error_type: type of NTN error (ipa3_hw_ntn_errors) + *@ipa_pipe_number: IPA pipe number on which error has happened + * Applicable only if error type indicates channel error + *@ntn_ch_err_type: Information about the channel error (if + * available) + */ +union Ipa3HwNTNErrorEventData_t { + struct IpaHwNTNErrorEventParams_t { + u32 ntn_error_type :8; + u32 reserved :8; + u32 ipa_pipe_number :8; + u32 ntn_ch_err_type :8; + } __packed params; + uint32_t raw32b; +} __packed; + +/** + * struct NTN3RxInfoData_t - NTN Structure holding the Rx pipe + * information + * + *@num_pkts_processed: Number of packets processed - cumulative + * + *@ring_stats: + *@gsi_stats: + *@num_db: Number of times the doorbell was rung + *@num_qmb_int_handled: Number of QMB interrupts handled + *@ipa_pipe_number: The IPA Rx/Tx pipe number. + */ +struct NTN3RxInfoData_t { + u32 num_pkts_processed; + struct IpaHwRingStats_t ring_stats; + struct IpaHwBamStats_t gsi_stats; + u32 num_db; + u32 num_qmb_int_handled; + u32 ipa_pipe_number; +} __packed; + + +/** + * struct NTN3TxInfoData_t - Structure holding the NTN Tx channel + * Ensure that this is always word aligned + * + *@num_pkts_processed: Number of packets processed - cumulative + *@tail_ptr_val: Latest value of doorbell written to copy engine + *@num_db_fired: Number of DB from uC FW to Copy engine + * + *@tx_comp_ring_stats: + *@bam_stats: + *@num_db: Number of times the doorbell was rung + *@num_qmb_int_handled: Number of QMB interrupts handled + */ +struct NTN3TxInfoData_t { + u32 num_pkts_processed; + struct IpaHwRingStats_t ring_stats; + struct IpaHwBamStats_t gsi_stats; + u32 num_db; + u32 num_qmb_int_handled; + u32 ipa_pipe_number; +} __packed; + + +/** + * struct Ipa3HwStatsNTNInfoData_t - Structure holding the NTN Tx + * channel Ensure that this is always word aligned + * + */ +struct Ipa3HwStatsNTNInfoData_t { + struct NTN3RxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS]; + struct NTN3TxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS]; +} __packed; + + +/* + * uC offload related data structures + */ +#define IPA_UC_OFFLOAD_CONNECTED BIT(0) +#define IPA_UC_OFFLOAD_ENABLED BIT(1) +#define IPA_UC_OFFLOAD_RESUMED BIT(2) + +/** + * enum ipa_cpu_2_hw_offload_commands - Values that represent + * the offload commands from CPU + * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up + * Offload protocol's Tx/Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN : Command to tear down + * Offload protocol's Tx/ Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE : Command to enable + * Offload protocol's Tx/Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE : Command to disable + * Offload protocol's Tx/ Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND : Command to suspend + * Offload protocol's Tx/Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_RESUME : Command to resume + * Offload protocol's Tx/ Rx Path + */ +enum ipa_cpu_2_hw_offload_commands { + IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1), + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2), + IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3), + IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4), + IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5), + IPA_CPU_2_HW_CMD_OFFLOAD_RESUME = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6), +}; + + +/** + * enum ipa3_hw_offload_channel_states - Values that represent + * offload channel state machine. + * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is + * initialized but disabled + * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running. + * Entered after SET_UP_COMMAND is processed successfully + * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state + * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not + * be in use in operational scenario + * + * These states apply to both Tx and Rx paths. These do not + * reflect the sub-state the state machine may be in + */ +enum ipa3_hw_offload_channel_states { + IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1, + IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING = 2, + IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR = 3, + IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID = 0xFF +}; + + +/** + * enum ipa3_hw_2_cpu_cmd_resp_status - Values that represent + * offload related command response status to be sent to CPU. + */ +enum ipa3_hw_2_cpu_offload_cmd_resp_status { + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0), + IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1), + IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2), + IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3), + IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4), + IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5), + IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6), + IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7), + IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8), + IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9), + IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10), + IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11), + IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12), + IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13), + IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14), +}; + +/** + * struct IpaHwSetUpCmd - + * + * + */ +union IpaHwSetUpCmd { + struct Ipa3HwNtnSetUpCmdData_t NtnSetupCh_params; + struct IpaHwWdi3SetUpCmdData_t Wdi3SetupCh_params; +} __packed; + +/** + * struct IpaHwOffloadSetUpCmdData_t - + * + * + */ +struct IpaHwOffloadSetUpCmdData_t { + u8 protocol; + union IpaHwSetUpCmd SetupCh_params; +} __packed; + +/** + * struct IpaHwOffloadSetUpCmdData_t_v4_0 - + * + * + */ +struct IpaHwOffloadSetUpCmdData_t_v4_0 { + u32 protocol; + union IpaHwSetUpCmd SetupCh_params; +} __packed; + +/** + * struct IpaHwCommonChCmd - Structure holding the parameters + * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN + * + * + */ +union IpaHwCommonChCmd { + union Ipa3HwNtnCommonChCmdData_t NtnCommonCh_params; + union IpaHwWdi3CommonChCmdData_t Wdi3CommonCh_params; +} __packed; + +struct IpaHwOffloadCommonChCmdData_t { + u8 protocol; + union IpaHwCommonChCmd CommonCh_params; +} __packed; + +struct IpaHwOffloadCommonChCmdData_t_v4_0 { + u32 protocol; + union IpaHwCommonChCmd CommonCh_params; +} __packed; + + +#endif /* _IPA_UC_OFFLOAD_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c new file mode 100644 index 000000000000..8892068dcfe8 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c @@ -0,0 +1,2788 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#include "ipa_i.h" +#include +#include +#include +#include "ipa_qmi_service.h" + +#define IPA_HOLB_TMR_DIS 0x0 + +#define IPA_HW_INTERFACE_WDI_VERSION 0x0001 +#define IPA_HW_WDI_RX_MBOX_START_INDEX 48 +#define IPA_HW_WDI_TX_MBOX_START_INDEX 50 +#define IPA_WDI_RING_ALIGNMENT 8 + +#define IPA_GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */ + +#define IPA_AGGR_PKT_LIMIT 1 +#define IPA_AGGR_HARD_BYTE_LIMIT 2 /*2 Kbytes Agger hard byte limit*/ +#define UPDATE_RI_MODERATION_THRESHOLD 8 + + +#define IPA_WDI_CONNECTED BIT(0) +#define IPA_WDI_ENABLED BIT(1) +#define IPA_WDI_RESUMED BIT(2) +#define IPA_UC_POLL_SLEEP_USEC 100 + +#define GSI_STOP_MAX_RETRY_CNT 10 + +struct ipa_wdi_res { + struct ipa_wdi_buffer_info *res; + unsigned int nents; + bool valid; +}; + +static struct ipa_wdi_res wdi_res[IPA_WDI_MAX_RES]; + +static void ipa3_uc_wdi_loaded_handler(void); + +/** + * enum ipa_hw_2_cpu_wdi_events - Values that represent HW event to be sent to + * CPU. + * @IPA_HW_2_CPU_EVENT_WDI_ERROR : Event to specify that HW detected an error + * in WDI + */ +enum ipa_hw_2_cpu_wdi_events { + IPA_HW_2_CPU_EVENT_WDI_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0), +}; + +/** + * enum ipa_hw_wdi_channel_states - Values that represent WDI channel state + * machine. + * @IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED : Channel is initialized but + * disabled + * @IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND : Channel is enabled but in + * suspended state + * @IPA_HW_WDI_CHANNEL_STATE_RUNNING : Channel is running. Entered after + * SET_UP_COMMAND is processed successfully + * @IPA_HW_WDI_CHANNEL_STATE_ERROR : Channel is in error state + * @IPA_HW_WDI_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use in + * operational scenario + * + * These states apply to both Tx and Rx paths. These do not reflect the + * sub-state the state machine may be in. + */ +enum ipa_hw_wdi_channel_states { + IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED = 1, + IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND = 2, + IPA_HW_WDI_CHANNEL_STATE_RUNNING = 3, + IPA_HW_WDI_CHANNEL_STATE_ERROR = 4, + IPA_HW_WDI_CHANNEL_STATE_INVALID = 0xFF +}; + +/** + * enum ipa3_cpu_2_hw_commands - Values that represent the WDI commands from + * CPU + * @IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : Command to set up WDI Tx Path + * @IPA_CPU_2_HW_CMD_WDI_RX_SET_UP : Command to set up WDI Rx Path + * @IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG : Provide extended config info for Rx path + * @IPA_CPU_2_HW_CMD_WDI_CH_ENABLE : Command to enable a channel + * @IPA_CPU_2_HW_CMD_WDI_CH_DISABLE : Command to disable a channel + * @IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND : Command to suspend a channel + * @IPA_CPU_2_HW_CMD_WDI_CH_RESUME : Command to resume a channel + * @IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN : Command to tear down WDI Tx/ Rx Path + */ +enum ipa_cpu_2_hw_wdi_commands { + IPA_CPU_2_HW_CMD_WDI_TX_SET_UP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0), + IPA_CPU_2_HW_CMD_WDI_RX_SET_UP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1), + IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2), + IPA_CPU_2_HW_CMD_WDI_CH_ENABLE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3), + IPA_CPU_2_HW_CMD_WDI_CH_DISABLE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4), + IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5), + IPA_CPU_2_HW_CMD_WDI_CH_RESUME = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6), + IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7), +}; + +/** + * enum ipa_hw_2_cpu_cmd_resp_status - Values that represent WDI related + * command response status to be sent to CPU. + */ +enum ipa_hw_2_cpu_cmd_resp_status { + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0), + IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1), + IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2), + IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3), + IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4), + IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5), + IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6), + IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7), + IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8), + IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9), + IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10), + IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11), + IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12), + IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13), + IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14), + IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15), + IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16), +}; + +/** + * enum ipa_hw_wdi_errors - WDI specific error types. + * @IPA_HW_WDI_ERROR_NONE : No error persists + * @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel + */ +enum ipa_hw_wdi_errors { + IPA_HW_WDI_ERROR_NONE = 0, + IPA_HW_WDI_CHANNEL_ERROR = 1 +}; + +/** + * enum ipa_hw_wdi_ch_errors = List of WDI Channel error types. This is present + * in the event param. + * @IPA_HW_WDI_CH_ERR_NONE : No error persists + * @IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL : Write pointer update failed in Tx + * Completion ring + * @IPA_HW_WDI_TX_FSM_ERROR : Error in the state machine transition + * @IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL : Error while calculating num RE to bring + * @IPA_HW_WDI_CH_ERR_RESERVED : Reserved - Not available for CPU to use + */ +enum ipa_hw_wdi_ch_errors { + IPA_HW_WDI_CH_ERR_NONE = 0, + IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL = 1, + IPA_HW_WDI_TX_FSM_ERROR = 2, + IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL = 3, + IPA_HW_WDI_CH_ERR_RESERVED = 0xFF +}; + +/** + * struct IpaHwSharedMemWdiMapping_t - Structure referring to the common and + * WDI section of 128B shared memory located in offset zero of SW Partition in + * IPA SRAM. + * + * The shared memory is used for communication between IPA HW and CPU. + */ +struct IpaHwSharedMemWdiMapping_t { + struct IpaHwSharedMemCommonMapping_t common; + u32 reserved_2B_28; + u32 reserved_2F_2C; + u32 reserved_33_30; + u32 reserved_37_34; + u32 reserved_3B_38; + u32 reserved_3F_3C; + u16 interfaceVersionWdi; + u16 reserved_43_42; + u8 wdi_tx_ch_0_state; + u8 wdi_rx_ch_0_state; + u16 reserved_47_46; +} __packed; + +/** + * struct IpaHwWdiTxSetUpCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_TX_SET_UP command. + * @comp_ring_base_pa : This is the physical address of the base of the Tx + * completion ring + * @comp_ring_size : This is the size of the Tx completion ring + * @reserved_comp_ring : Reserved field for expansion of Completion ring params + * @ce_ring_base_pa : This is the physical address of the base of the Copy + * Engine Source Ring + * @ce_ring_size : Copy Engine Ring size + * @reserved_ce_ring : Reserved field for expansion of CE ring params + * @ce_ring_doorbell_pa : This is the physical address of the doorbell that the + * IPA uC has to write into to trigger the copy engine + * @num_tx_buffers : Number of pkt buffers allocated. The size of the CE ring + * and the Tx completion ring has to be atleast ( num_tx_buffers + 1) + * @ipa_pipe_number : This is the IPA pipe number that has to be used for the + * Tx path + * @reserved : Reserved field + * + * Parameters are sent as pointer thus should be reside in address accessible + * to HW + */ +struct IpaHwWdiTxSetUpCmdData_t { + u32 comp_ring_base_pa; + u16 comp_ring_size; + u16 reserved_comp_ring; + u32 ce_ring_base_pa; + u16 ce_ring_size; + u16 reserved_ce_ring; + u32 ce_ring_doorbell_pa; + u16 num_tx_buffers; + u8 ipa_pipe_number; + u8 reserved; +} __packed; + +struct IpaHwWdi2TxSetUpCmdData_t { + u32 comp_ring_base_pa; + u32 comp_ring_base_pa_hi; + u16 comp_ring_size; + u16 reserved_comp_ring; + u32 ce_ring_base_pa; + u32 ce_ring_base_pa_hi; + u16 ce_ring_size; + u16 reserved_ce_ring; + u32 ce_ring_doorbell_pa; + u32 ce_ring_doorbell_pa_hi; + u16 num_tx_buffers; + u8 ipa_pipe_number; + u8 reserved; +} __packed; +/** + * struct IpaHwWdiRxSetUpCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_RX_SET_UP command. + * @rx_ring_base_pa : This is the physical address of the base of the Rx ring + * (containing Rx buffers) + * @rx_ring_size : This is the size of the Rx ring + * @rx_ring_rp_pa : This is the physical address of the location through which + * IPA uc is expected to communicate about the Read pointer into the Rx Ring + * @ipa_pipe_number : This is the IPA pipe number that has to be used for the + * Rx path + * + * Parameters are sent as pointer thus should be reside in address accessible + * to HW + */ +struct IpaHwWdiRxSetUpCmdData_t { + u32 rx_ring_base_pa; + u32 rx_ring_size; + u32 rx_ring_rp_pa; + u8 ipa_pipe_number; +} __packed; + +struct IpaHwWdi2RxSetUpCmdData_t { + u32 rx_ring_base_pa; + u32 rx_ring_base_pa_hi; + u32 rx_ring_size; + u32 rx_ring_rp_pa; + u32 rx_ring_rp_pa_hi; + u32 rx_comp_ring_base_pa; + u32 rx_comp_ring_base_pa_hi; + u32 rx_comp_ring_size; + u32 rx_comp_ring_wp_pa; + u32 rx_comp_ring_wp_pa_hi; + u8 ipa_pipe_number; +} __packed; +/** + * union IpaHwWdiRxExtCfgCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG command. + * @ipa_pipe_number : The IPA pipe number for which this config is passed + * @qmap_id : QMAP ID to be set in the metadata register + * @reserved : Reserved + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwWdiRxExtCfgCmdData_t { + struct IpaHwWdiRxExtCfgCmdParams_t { + u32 ipa_pipe_number:8; + u32 qmap_id:8; + u32 reserved:16; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwWdiCommonChCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN, + * IPA_CPU_2_HW_CMD_WDI_CH_ENABLE, + * IPA_CPU_2_HW_CMD_WDI_CH_DISABLE, + * IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND, + * IPA_CPU_2_HW_CMD_WDI_CH_RESUME command. + * @ipa_pipe_number : The IPA pipe number. This could be Tx or an Rx pipe + * @reserved : Reserved + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwWdiCommonChCmdData_t { + struct IpaHwWdiCommonChCmdParams_t { + u32 ipa_pipe_number:8; + u32 reserved:24; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwWdiErrorEventData_t - parameters for IPA_HW_2_CPU_EVENT_WDI_ERROR + * event. + * @wdi_error_type : The IPA pipe number to be torn down. This could be Tx or + * an Rx pipe + * @reserved : Reserved + * @ipa_pipe_number : IPA pipe number on which error has happened. Applicable + * only if error type indicates channel error + * @wdi_ch_err_type : Information about the channel error (if available) + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwWdiErrorEventData_t { + struct IpaHwWdiErrorEventParams_t { + u32 wdi_error_type:8; + u32 reserved:8; + u32 ipa_pipe_number:8; + u32 wdi_ch_err_type:8; + } __packed params; + u32 raw32b; +} __packed; + +static void ipa3_uc_wdi_event_log_info_handler( +struct IpaHwEventLogInfoData_t *uc_event_top_mmio) + +{ + struct Ipa3HwEventInfoData_t *stats_ptr = &uc_event_top_mmio->statsInfo; + + if ((uc_event_top_mmio->featureMask & + (1 << IPA_HW_FEATURE_WDI)) == 0) { + IPAERR("WDI feature missing 0x%x\n", + uc_event_top_mmio->featureMask); + return; + } + + if (stats_ptr->featureInfo[IPA_HW_FEATURE_WDI].params.size != + sizeof(struct IpaHwStatsWDIInfoData_t)) { + IPAERR("wdi stats sz invalid exp=%zu is=%u\n", + sizeof(struct IpaHwStatsWDIInfoData_t), + stats_ptr->featureInfo[IPA_HW_FEATURE_WDI].params.size); + return; + } + + ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst = + stats_ptr->baseAddrOffset + + stats_ptr->featureInfo[IPA_HW_FEATURE_WDI].params.offset; + IPAERR("WDI stats ofst=0x%x\n", ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst); + if (ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst + + sizeof(struct IpaHwStatsWDIInfoData_t) >= + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0) + + ipa3_ctx->smem_sz) { + IPAERR("uc_wdi_stats 0x%x outside SRAM\n", + ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst); + return; + } + + ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio = + ioremap(ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst, + sizeof(struct IpaHwStatsWDIInfoData_t)); + if (!ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) { + IPAERR("fail to ioremap uc wdi stats\n"); + return; + } +} + +static void ipa3_uc_wdi_event_handler(struct IpaHwSharedMemCommonMapping_t + *uc_sram_mmio) + +{ + union IpaHwWdiErrorEventData_t wdi_evt; + struct IpaHwSharedMemWdiMapping_t *wdi_sram_mmio_ext; + + if (uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_WDI_ERROR) { + wdi_evt.raw32b = uc_sram_mmio->eventParams; + IPADBG("uC WDI evt errType=%u pipe=%d cherrType=%u\n", + wdi_evt.params.wdi_error_type, + wdi_evt.params.ipa_pipe_number, + wdi_evt.params.wdi_ch_err_type); + wdi_sram_mmio_ext = + (struct IpaHwSharedMemWdiMapping_t *) + uc_sram_mmio; + IPADBG("tx_ch_state=%u rx_ch_state=%u\n", + wdi_sram_mmio_ext->wdi_tx_ch_0_state, + wdi_sram_mmio_ext->wdi_rx_ch_0_state); + } +} + +/** + * ipa3_get_wdi_stats() - Query WDI statistics from uc + * @stats: [inout] stats blob from client populated by driver + * + * Returns: 0 on success, negative on failure + * + * @note Cannot be called from atomic context + * + */ +int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats) +{ +#define TX_STATS(y) stats->tx_ch_stats.y = \ + ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->tx_ch_stats.y +#define RX_STATS(y) stats->rx_ch_stats.y = \ + ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->rx_ch_stats.y + + if (!stats || !ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) { + IPAERR("bad parms stats=%pK wdi_stats=%pK\n", + stats, + ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio); + return -EINVAL; + } + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + TX_STATS(num_pkts_processed); + TX_STATS(copy_engine_doorbell_value); + TX_STATS(num_db_fired); + TX_STATS(tx_comp_ring_stats.ringFull); + TX_STATS(tx_comp_ring_stats.ringEmpty); + TX_STATS(tx_comp_ring_stats.ringUsageHigh); + TX_STATS(tx_comp_ring_stats.ringUsageLow); + TX_STATS(tx_comp_ring_stats.RingUtilCount); + TX_STATS(bam_stats.bamFifoFull); + TX_STATS(bam_stats.bamFifoEmpty); + TX_STATS(bam_stats.bamFifoUsageHigh); + TX_STATS(bam_stats.bamFifoUsageLow); + TX_STATS(bam_stats.bamUtilCount); + TX_STATS(num_db); + TX_STATS(num_unexpected_db); + TX_STATS(num_bam_int_handled); + TX_STATS(num_bam_int_in_non_running_state); + TX_STATS(num_qmb_int_handled); + TX_STATS(num_bam_int_handled_while_wait_for_bam); + + RX_STATS(max_outstanding_pkts); + RX_STATS(num_pkts_processed); + RX_STATS(rx_ring_rp_value); + RX_STATS(rx_ind_ring_stats.ringFull); + RX_STATS(rx_ind_ring_stats.ringEmpty); + RX_STATS(rx_ind_ring_stats.ringUsageHigh); + RX_STATS(rx_ind_ring_stats.ringUsageLow); + RX_STATS(rx_ind_ring_stats.RingUtilCount); + RX_STATS(bam_stats.bamFifoFull); + RX_STATS(bam_stats.bamFifoEmpty); + RX_STATS(bam_stats.bamFifoUsageHigh); + RX_STATS(bam_stats.bamFifoUsageLow); + RX_STATS(bam_stats.bamUtilCount); + RX_STATS(num_bam_int_handled); + RX_STATS(num_db); + RX_STATS(num_unexpected_db); + RX_STATS(num_pkts_in_dis_uninit_state); + RX_STATS(num_ic_inj_vdev_change); + RX_STATS(num_ic_inj_fw_desc_change); + RX_STATS(num_qmb_int_handled); + RX_STATS(reserved1); + RX_STATS(reserved2); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +int ipa3_wdi_init(void) +{ + struct ipa3_uc_hdlrs uc_wdi_cbs = { 0 }; + + uc_wdi_cbs.ipa_uc_event_hdlr = ipa3_uc_wdi_event_handler; + uc_wdi_cbs.ipa_uc_event_log_info_hdlr = + ipa3_uc_wdi_event_log_info_handler; + uc_wdi_cbs.ipa_uc_loaded_hdlr = + ipa3_uc_wdi_loaded_handler; + + ipa3_uc_register_handlers(IPA_HW_FEATURE_WDI, &uc_wdi_cbs); + + return 0; +} + +static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len, + bool device, unsigned long *iova) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC); + unsigned long va = roundup(cb->next_addr, PAGE_SIZE); + int prot = IOMMU_READ | IOMMU_WRITE; + size_t true_len = roundup(len + pa - rounddown(pa, PAGE_SIZE), + PAGE_SIZE); + int ret; + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + + ret = ipa3_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE), + true_len, + device ? (prot | IOMMU_MMIO) : prot); + if (ret) { + IPAERR("iommu map failed for pa=%pa len=%zu\n", &pa, true_len); + return -EINVAL; + } + + ipa3_ctx->wdi_map_cnt++; + cb->next_addr = va + true_len; + *iova = va + pa - rounddown(pa, PAGE_SIZE); + return 0; +} + +static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt, + unsigned long *iova) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC); + unsigned long va = roundup(cb->next_addr, PAGE_SIZE); + int prot = IOMMU_READ | IOMMU_WRITE; + int ret; + int i; + struct scatterlist *sg; + unsigned long start_iova = va; + phys_addr_t phys; + size_t len; + int count = 0; + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + if (!sgt) { + IPAERR("Bad parameters, scatter / gather list is NULL\n"); + return -EINVAL; + } + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + /* directly get sg_tbl PA from wlan-driver */ + phys = sg->dma_address; + len = PAGE_ALIGN(sg->offset + sg->length); + + ret = ipa3_iommu_map(cb->mapping->domain, va, phys, len, prot); + if (ret) { + IPAERR("iommu map failed for pa=%pa len=%zu\n", + &phys, len); + goto bad_mapping; + } + va += len; + ipa3_ctx->wdi_map_cnt++; + count++; + } + cb->next_addr = va; + *iova = start_iova; + + return 0; + +bad_mapping: + for_each_sg(sgt->sgl, sg, count, i) + iommu_unmap(cb->mapping->domain, sg_dma_address(sg), + sg_dma_len(sg)); + return -EINVAL; +} + +static void ipa_release_uc_smmu_mappings(enum ipa_client_type client) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC); + int i; + int j; + int start; + int end; + + if (IPA_CLIENT_IS_CONS(client)) { + start = IPA_WDI_TX_RING_RES; + end = IPA_WDI_CE_DB_RES; + } else { + start = IPA_WDI_RX_RING_RES; + if (ipa3_ctx->ipa_wdi2) + end = IPA_WDI_RX_COMP_RING_WP_RES; + else + end = IPA_WDI_RX_RING_RP_RES; + } + + for (i = start; i <= end; i++) { + if (wdi_res[i].valid) { + for (j = 0; j < wdi_res[i].nents; j++) { + iommu_unmap(cb->mapping->domain, + wdi_res[i].res[j].iova, + wdi_res[i].res[j].size); + ipa3_ctx->wdi_map_cnt--; + } + kfree(wdi_res[i].res); + wdi_res[i].valid = false; + } + } + + if (ipa3_ctx->wdi_map_cnt == 0) + cb->next_addr = cb->va_end; + +} + +static void ipa_save_uc_smmu_mapping_pa(int res_idx, phys_addr_t pa, + unsigned long iova, size_t len) +{ + IPADBG("--res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx, + &pa, iova, len); + wdi_res[res_idx].res = kzalloc(sizeof(*wdi_res[res_idx].res), + GFP_KERNEL); + if (!wdi_res[res_idx].res) { + WARN_ON(1); + return; + } + wdi_res[res_idx].nents = 1; + wdi_res[res_idx].valid = true; + wdi_res[res_idx].res->pa = rounddown(pa, PAGE_SIZE); + wdi_res[res_idx].res->iova = rounddown(iova, PAGE_SIZE); + wdi_res[res_idx].res->size = roundup(len + pa - rounddown(pa, + PAGE_SIZE), PAGE_SIZE); + IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx, + &wdi_res[res_idx].res->pa, wdi_res[res_idx].res->iova, + wdi_res[res_idx].res->size); +} + +static void ipa_save_uc_smmu_mapping_sgt(int res_idx, struct sg_table *sgt, + unsigned long iova) +{ + int i; + struct scatterlist *sg; + unsigned long curr_iova = iova; + + if (!sgt) { + IPAERR("Bad parameters, scatter / gather list is NULL\n"); + return; + } + + wdi_res[res_idx].res = kcalloc(sgt->nents, + sizeof(*wdi_res[res_idx].res), + GFP_KERNEL); + if (!wdi_res[res_idx].res) { + WARN_ON(1); + return; + } + wdi_res[res_idx].nents = sgt->nents; + wdi_res[res_idx].valid = true; + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + /* directly get sg_tbl PA from wlan */ + wdi_res[res_idx].res[i].pa = sg->dma_address; + wdi_res[res_idx].res[i].iova = curr_iova; + wdi_res[res_idx].res[i].size = PAGE_ALIGN(sg->offset + + sg->length); + IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx, + &wdi_res[res_idx].res[i].pa, + wdi_res[res_idx].res[i].iova, + wdi_res[res_idx].res[i].size); + curr_iova += wdi_res[res_idx].res[i].size; + } +} + +int ipa_create_uc_smmu_mapping(int res_idx, bool wlan_smmu_en, + phys_addr_t pa, struct sg_table *sgt, size_t len, bool device, + unsigned long *iova) +{ + /* support for SMMU on WLAN but no SMMU on IPA */ + if (wlan_smmu_en && ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) { + IPAERR("Unsupported SMMU pairing\n"); + return -EINVAL; + } + + /* legacy: no SMMUs on either end */ + if (!wlan_smmu_en && ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) { + *iova = pa; + return 0; + } + + /* no SMMU on WLAN but SMMU on IPA */ + if (!wlan_smmu_en && !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) { + if (ipa_create_uc_smmu_mapping_pa(pa, len, + (res_idx == IPA_WDI_CE_DB_RES) ? true : false, iova)) { + IPAERR("Fail to create mapping res %d\n", res_idx); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len); + return 0; + } + + /* SMMU on WLAN and SMMU on IPA */ + if (wlan_smmu_en && !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) { + switch (res_idx) { + case IPA_WDI_RX_RING_RP_RES: + case IPA_WDI_RX_COMP_RING_WP_RES: + case IPA_WDI_CE_DB_RES: + case IPA_WDI_TX_DB_RES: + if (ipa_create_uc_smmu_mapping_pa(pa, len, + (res_idx == IPA_WDI_CE_DB_RES) ? true : false, + iova)) { + IPAERR("Fail to create mapping res %d\n", + res_idx); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len); + break; + case IPA_WDI_RX_RING_RES: + case IPA_WDI_RX_COMP_RING_RES: + case IPA_WDI_TX_RING_RES: + case IPA_WDI_CE_RING_RES: + if (ipa_create_uc_smmu_mapping_sgt(sgt, iova)) { + IPAERR("Fail to create mapping res %d\n", + res_idx); + WARN_ON(1); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova); + break; + default: + WARN_ON(1); + } + } + + return 0; +} + +static int ipa_create_gsi_smmu_mapping(int res_idx, bool wlan_smmu_en, + phys_addr_t pa, struct sg_table *sgt, size_t len, bool device, + unsigned long *iova) +{ + /* support for SMMU on WLAN but no SMMU on IPA */ + if (wlan_smmu_en && ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) { + IPAERR("Unsupported SMMU pairing\n"); + return -EINVAL; + } + + /* legacy: no SMMUs on either end */ + if (!wlan_smmu_en && ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) { + *iova = pa; + return 0; + } + + /* no SMMU on WLAN but SMMU on IPA */ + if (!wlan_smmu_en && !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) { + if (ipa3_smmu_map_peer_buff(*iova, pa, len, + sgt, IPA_SMMU_CB_WLAN)) { + IPAERR("Fail to create mapping res %d\n", res_idx); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len); + return 0; + } + /* SMMU on WLAN and SMMU on IPA */ + if (wlan_smmu_en && !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) { + switch (res_idx) { + case IPA_WDI_RX_RING_RP_RES: + case IPA_WDI_RX_COMP_RING_WP_RES: + case IPA_WDI_CE_DB_RES: + if (ipa3_smmu_map_peer_buff(*iova, pa, len, sgt, + IPA_SMMU_CB_WLAN)) { + IPAERR("Fail to create mapping res %d\n", + res_idx); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len); + break; + case IPA_WDI_RX_RING_RES: + case IPA_WDI_RX_COMP_RING_RES: + case IPA_WDI_TX_RING_RES: + case IPA_WDI_CE_RING_RES: + if (ipa3_smmu_map_peer_reg(pa, true, + IPA_SMMU_CB_WLAN)) { + IPAERR("Fail to create mapping res %d\n", + res_idx); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova); + break; + default: + WARN_ON(1); + } + } + return 0; +} + +static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify) +{ + switch (notify->evt_id) { + case GSI_EVT_OUT_OF_BUFFERS_ERR: + IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n"); + break; + case GSI_EVT_OUT_OF_RESOURCES_ERR: + IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n"); + break; + case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR: + IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n"); + break; + case GSI_EVT_EVT_RING_EMPTY_ERR: + IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n"); + break; + default: + IPAERR("Unexpected err evt: %d\n", notify->evt_id); + } + ipa_assert(); +} + +static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify) +{ + switch (notify->evt_id) { + case GSI_CHAN_INVALID_TRE_ERR: + IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n"); + break; + case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR: + IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n"); + break; + case GSI_CHAN_OUT_OF_BUFFERS_ERR: + IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n"); + break; + case GSI_CHAN_OUT_OF_RESOURCES_ERR: + IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n"); + break; + case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR: + IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n"); + break; + case GSI_CHAN_HWO_1_ERR: + IPAERR("Got GSI_CHAN_HWO_1_ERR\n"); + break; + default: + IPAERR("Unexpected err evt: %d\n", notify->evt_id); + } + ipa_assert(); +} +static int ipa3_wdi2_gsi_alloc_evt_ring( + struct gsi_evt_ring_props *evt_ring_props, + enum ipa_client_type client, + unsigned long *evt_ring_hdl) +{ + union __packed gsi_evt_scratch evt_scratch; + int result = -EFAULT; + + /* GSI EVENT RING allocation */ + evt_ring_props->intf = GSI_EVT_CHTYPE_WDI2_EV; + evt_ring_props->intr = GSI_INTR_IRQ; + + if (IPA_CLIENT_IS_PROD(client)) + evt_ring_props->re_size = GSI_EVT_RING_RE_SIZE_8B; + else + evt_ring_props->re_size = GSI_EVT_RING_RE_SIZE_16B; + + evt_ring_props->exclusive = true; + evt_ring_props->err_cb = ipa_gsi_evt_ring_err_cb; + evt_ring_props->user_data = NULL; + evt_ring_props->int_modt = IPA_GSI_EVT_RING_INT_MODT; + evt_ring_props->int_modc = 1; + IPADBG("GSI evt ring len: %d\n", evt_ring_props->ring_len); + IPADBG("client=%d moderation threshold cycles=%u cnt=%u\n", + client, + evt_ring_props->int_modt, + evt_ring_props->int_modc); + + + result = gsi_alloc_evt_ring(evt_ring_props, + ipa3_ctx->gsi_dev_hdl, evt_ring_hdl); + IPADBG("gsi_alloc_evt_ring result: %d\n", result); + if (result != GSI_STATUS_SUCCESS) + goto fail_alloc_evt_ring; + + evt_scratch.wdi.update_ri_moderation_config = + UPDATE_RI_MODERATION_THRESHOLD; + evt_scratch.wdi.update_ri_mod_timer_running = 0; + evt_scratch.wdi.evt_comp_count = 0; + evt_scratch.wdi.last_update_ri = 0; + evt_scratch.wdi.resvd1 = 0; + evt_scratch.wdi.resvd2 = 0; + result = gsi_write_evt_ring_scratch(*evt_ring_hdl, evt_scratch); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("Error writing WDI event ring scratch: %d\n", result); + gsi_dealloc_evt_ring(*evt_ring_hdl); + return -EFAULT; + } + +fail_alloc_evt_ring: + return result; + +} +static int ipa3_wdi2_gsi_alloc_channel_ring( + struct gsi_chan_props *channel_props, + enum ipa_client_type client, + unsigned long *chan_hdl, + unsigned long evt_ring_hdl) +{ + int result = -EFAULT; + const struct ipa_gsi_ep_config *ep_cfg; + + ep_cfg = ipa3_get_gsi_ep_info(client); + if (!ep_cfg) { + IPAERR("Failed getting GSI EP info for client=%d\n", + client); + return -EPERM; + } + + if (IPA_CLIENT_IS_PROD(client)) { + IPAERR("Client is PROD\n"); + channel_props->dir = GSI_CHAN_DIR_TO_GSI; + channel_props->re_size = GSI_CHAN_RE_SIZE_16B; + } else { + IPAERR("Client is CONS"); + channel_props->dir = GSI_CHAN_DIR_FROM_GSI; + channel_props->re_size = GSI_CHAN_RE_SIZE_8B; + } + + channel_props->prot = GSI_CHAN_PROT_WDI2; + channel_props->ch_id = ep_cfg->ipa_gsi_chan_num; + channel_props->evt_ring_hdl = evt_ring_hdl; + + IPADBG("ch_id: %d\n", channel_props->ch_id); + IPADBG("evt_ring_hdl: %ld\n", channel_props->evt_ring_hdl); + IPADBG("re_size: %d\n", channel_props->re_size); + IPADBG("Config GSI xfer cb func"); + IPADBG("GSI channel ring len: %d\n", channel_props->ring_len); + channel_props->xfer_cb = NULL; + + IPADBG("channel ring base vaddr = 0x%pa\n", + channel_props->ring_base_vaddr); + + channel_props->use_db_eng = GSI_CHAN_DB_MODE; + channel_props->max_prefetch = GSI_ONE_PREFETCH_SEG; + channel_props->prefetch_mode = ep_cfg->prefetch_mode; + channel_props->low_weight = 1; + channel_props->err_cb = ipa_gsi_chan_err_cb; + + IPADBG("Allocating GSI channel\n"); + result = gsi_alloc_channel(channel_props, + ipa3_ctx->gsi_dev_hdl, + chan_hdl); + if (result != GSI_STATUS_SUCCESS) + goto fail_alloc_channel; + + IPADBG("gsi_chan_hdl: %ld\n", *chan_hdl); + +fail_alloc_channel: + return result; +} + + +int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out) +{ + u32 len; + int ipa_ep_idx, num_ring_ele; + int result = -EFAULT; + enum gsi_status gsi_res; + struct ipa3_ep_context *ep; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + struct gsi_chan_props gsi_channel_props; + struct gsi_evt_ring_props gsi_evt_ring_props; + union __packed gsi_channel_scratch gsi_scratch; + phys_addr_t pa; + unsigned long va; + u32 gsi_db_reg_phs_addr_lsb; + u32 gsi_db_reg_phs_addr_msb; + + ipa_ep_idx = ipa3_get_ep_mapping(in->sys.client); + if (ipa_ep_idx == -1) { + IPAERR("fail to alloc EP.\n"); + goto fail; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (ep->valid) { + IPAERR("EP already allocated.\n"); + goto fail; + } + + IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client); + + memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); + memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props)); + memset(&gsi_channel_props, 0, sizeof(gsi_channel_props)); + memset(&gsi_scratch, 0, sizeof(gsi_scratch)); + + IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx); + + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + if (in->smmu_enabled) { + IPADBG("comp_ring_size=%d\n", + in->u.dl_smmu.comp_ring_size); + IPADBG("ce_ring_size=%d\n", in->u.dl_smmu.ce_ring_size); + IPADBG("ce_ring_doorbell_pa=0x%pa\n", + &in->u.dl_smmu.ce_door_bell_pa); + IPADBG("num_tx_buffers=%d\n", + in->u.dl_smmu.num_tx_buffers); + } else { + IPADBG("comp_ring_base_pa=0x%pa\n", + &in->u.dl.comp_ring_base_pa); + IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size); + IPADBG("ce_ring_base_pa=0x%pa\n", + &in->u.dl.ce_ring_base_pa); + IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size); + IPADBG("ce_ring_doorbell_pa=0x%pa\n", + &in->u.dl.ce_door_bell_pa); + IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers); + } + } else { + if (in->smmu_enabled) { + IPADBG("rx_ring_size=%d\n", + in->u.ul_smmu.rdy_ring_size); + IPADBG("rx_ring_rp_pa=0x%pa\n", + &in->u.ul_smmu.rdy_ring_rp_pa); + IPADBG("rx_comp_ring_size=%d\n", + in->u.ul_smmu.rdy_comp_ring_size); + IPADBG("rx_comp_ring_wp_pa=0x%pa\n", + &in->u.ul_smmu.rdy_comp_ring_wp_pa); + ipa3_ctx->wdi2_ctx.rdy_ring_rp_pa = + in->u.ul_smmu.rdy_ring_rp_pa; + ipa3_ctx->wdi2_ctx.rdy_ring_size = + in->u.ul_smmu.rdy_ring_size; + ipa3_ctx->wdi2_ctx.rdy_comp_ring_wp_pa = + in->u.ul_smmu.rdy_comp_ring_wp_pa; + ipa3_ctx->wdi2_ctx.rdy_comp_ring_size = + in->u.ul_smmu.rdy_comp_ring_size; + } else { + IPADBG("rx_ring_base_pa=0x%pa\n", + &in->u.ul.rdy_ring_base_pa); + IPADBG("rx_ring_size=%d\n", + in->u.ul.rdy_ring_size); + IPADBG("rx_ring_rp_pa=0x%pa\n", + &in->u.ul.rdy_ring_rp_pa); + IPADBG("rx_comp_ring_base_pa=0x%pa\n", + &in->u.ul.rdy_comp_ring_base_pa); + IPADBG("rx_comp_ring_size=%d\n", + in->u.ul.rdy_comp_ring_size); + IPADBG("rx_comp_ring_wp_pa=0x%pa\n", + &in->u.ul.rdy_comp_ring_wp_pa); + ipa3_ctx->wdi2_ctx.rdy_ring_base_pa = + in->u.ul.rdy_ring_base_pa; + ipa3_ctx->wdi2_ctx.rdy_ring_rp_pa = + in->u.ul.rdy_ring_rp_pa; + ipa3_ctx->wdi2_ctx.rdy_ring_size = + in->u.ul.rdy_ring_size; + ipa3_ctx->wdi2_ctx.rdy_comp_ring_base_pa = + in->u.ul.rdy_comp_ring_base_pa; + ipa3_ctx->wdi2_ctx.rdy_comp_ring_wp_pa = + in->u.ul.rdy_comp_ring_wp_pa; + ipa3_ctx->wdi2_ctx.rdy_comp_ring_size = + in->u.ul.rdy_comp_ring_size; + } + } + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size : + in->u.dl.comp_ring_size; + IPADBG("TX ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.comp_ring_size, + in->u.dl.comp_ring_size); + if (ipa_create_gsi_smmu_mapping(IPA_WDI_TX_RING_RES, + in->smmu_enabled, + in->u.dl.comp_ring_base_pa, + &in->u.dl_smmu.comp_ring, + len, + false, + &va)) { + IPAERR("fail to create gsi mapping TX ring.\n"); + result = -ENOMEM; + goto gsi_timeout; + } + gsi_channel_props.ring_base_addr = va; + gsi_channel_props.ring_base_vaddr = NULL; + gsi_channel_props.ring_len = len; + + len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size : + in->u.dl.ce_ring_size; + IPADBG("CE ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.ce_ring_size, + in->u.dl.ce_ring_size); + + /* WA: wlan passed ce_ring sg_table PA directly */ + if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_RING_RES, + in->smmu_enabled, + in->u.dl.ce_ring_base_pa, + &in->u.dl_smmu.ce_ring, + len, + false, + &va)) { + IPAERR("fail to create gsi mapping CE ring.\n"); + result = -ENOMEM; + goto gsi_timeout; + } + gsi_evt_ring_props.ring_base_addr = va; + gsi_evt_ring_props.ring_base_vaddr = NULL; + gsi_evt_ring_props.ring_len = len; + pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa : + in->u.dl.ce_door_bell_pa; + if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_DB_RES, + in->smmu_enabled, + pa, + NULL, + 4, + true, + &va)) { + IPAERR("fail to create gsi mapping CE DB.\n"); + result = -ENOMEM; + goto gsi_timeout; + } + gsi_evt_ring_props.rp_update_addr = va; + } else { + len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size : + in->u.ul.rdy_ring_size; + IPADBG("RX ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.ul_smmu.rdy_ring_size, + in->u.ul.rdy_ring_size); + if (ipa_create_gsi_smmu_mapping(IPA_WDI_RX_RING_RES, + in->smmu_enabled, + in->u.ul.rdy_ring_base_pa, + &in->u.ul_smmu.rdy_ring, + len, + false, + &va)) { + IPAERR("fail to create gsi RX ring.\n"); + result = -ENOMEM; + goto gsi_timeout; + } + gsi_channel_props.ring_base_addr = va; + gsi_channel_props.ring_base_vaddr = NULL; + gsi_channel_props.ring_len = len; + pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa : + in->u.ul.rdy_ring_rp_pa; + if (ipa_create_gsi_smmu_mapping(IPA_WDI_RX_RING_RP_RES, + in->smmu_enabled, + pa, + NULL, + 4, + false, + &va)) { + IPAERR("fail to create gsi RX rng RP\n"); + result = -ENOMEM; + goto gsi_timeout; + } + gsi_scratch.wdi.wifi_rx_ri_addr_low = + va & 0xFFFFFFFF; + gsi_scratch.wdi.wifi_rx_ri_addr_high = + (va & 0xFFFFF00000000) >> 32; + + len = in->smmu_enabled ? + in->u.ul_smmu.rdy_comp_ring_size : + in->u.ul.rdy_comp_ring_size; + IPADBG("RX ring smmu_en=%d comp_ring_size=%d %d\n", + in->smmu_enabled, + in->u.ul_smmu.rdy_comp_ring_size, + in->u.ul.rdy_comp_ring_size); + if (ipa_create_gsi_smmu_mapping( + IPA_WDI_RX_COMP_RING_RES, + in->smmu_enabled, + in->u.ul.rdy_comp_ring_base_pa, + &in->u.ul_smmu.rdy_comp_ring, + len, + false, + &va)) { + IPAERR("fail to create gsi RX comp_ring.\n"); + result = -ENOMEM; + goto gsi_timeout; + } + gsi_evt_ring_props.ring_base_addr = va; + gsi_evt_ring_props.ring_base_vaddr = NULL; + gsi_evt_ring_props.ring_len = len; + pa = in->smmu_enabled ? + in->u.ul_smmu.rdy_comp_ring_wp_pa : + in->u.ul.rdy_comp_ring_wp_pa; + if (ipa_create_gsi_smmu_mapping( + IPA_WDI_RX_COMP_RING_WP_RES, + in->smmu_enabled, + pa, + NULL, + 4, + false, + &va)) { + IPAERR("fail to create gsi RX comp_rng WP\n"); + result = -ENOMEM; + goto gsi_timeout; + } + gsi_evt_ring_props.rp_update_addr = va; + gsi_scratch.wdi.wdi_rx_vdev_id = 0xff; + gsi_scratch.wdi.wdi_rx_fw_desc = 0xff; + gsi_scratch.wdi.endp_metadatareg_offset = + ipahal_get_reg_mn_ofst( + IPA_ENDP_INIT_HDR_METADATA_n, 0, + ipa_ep_idx)/4; + gsi_scratch.wdi.qmap_id = 0; + } + + ep->valid = 1; + ep->client = in->sys.client; + ep->keep_ipa_awake = in->sys.keep_ipa_awake; + ep->skip_ep_cfg = in->sys.skip_ep_cfg; + ep->client_notify = in->sys.notify; + ep->priv = in->sys.priv; + if (IPA_CLIENT_IS_PROD(in->sys.client)) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = true; + ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl); + } + + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + in->sys.ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR; + in->sys.ipa_ep_cfg.aggr.aggr = IPA_GENERIC; + in->sys.ipa_ep_cfg.aggr.aggr_pkt_limit = IPA_AGGR_PKT_LIMIT; + in->sys.ipa_ep_cfg.aggr.aggr_byte_limit = + IPA_AGGR_HARD_BYTE_LIMIT; + in->sys.ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = + IPA_ENABLE_AGGR; + } + if (!ep->skip_ep_cfg) { + if (ipa3_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto ipa_cfg_ep_fail; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("Skipping endpoint configuration.\n"); + } + result = ipa3_wdi2_gsi_alloc_evt_ring(&gsi_evt_ring_props, + in->sys.client, + &ep->gsi_evt_ring_hdl); + if (result) + goto fail_alloc_evt_ring; + /*copy mem info */ + ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len; + ep->gsi_mem_info.evt_ring_base_addr = gsi_evt_ring_props.ring_base_addr; + ep->gsi_mem_info.evt_ring_base_vaddr = + gsi_evt_ring_props.ring_base_vaddr; + IPAERR("evt ring len: %d\n", ep->gsi_mem_info.evt_ring_len); + IPAERR("element size: %d\n", gsi_evt_ring_props.re_size); + + result = ipa3_wdi2_gsi_alloc_channel_ring(&gsi_channel_props, + in->sys.client, + &ep->gsi_chan_hdl, ep->gsi_evt_ring_hdl); + if (result) + goto fail_alloc_channel; + ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len; + ep->gsi_mem_info.chan_ring_base_addr = gsi_channel_props.ring_base_addr; + ep->gsi_mem_info.chan_ring_base_vaddr = + gsi_channel_props.ring_base_vaddr; + + num_ring_ele = ep->gsi_mem_info.evt_ring_len/gsi_evt_ring_props.re_size; + IPAERR("UPDATE_RI_MODERATION_THRESHOLD: %d\n", num_ring_ele); + gsi_scratch.wdi.update_ri_moderation_threshold = + min(UPDATE_RI_MODERATION_THRESHOLD, num_ring_ele); + gsi_scratch.wdi.update_ri_moderation_counter = 0; + gsi_scratch.wdi.wdi_rx_tre_proc_in_progress = 0; + gsi_scratch.wdi.resv1 = 0; + result = gsi_write_channel_scratch(ep->gsi_chan_hdl, + gsi_scratch); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("gsi_write_channel_scratch failed %d\n", + result); + goto fail_write_channel_scratch; + } + + /* for AP+STA stats update */ + if (in->wdi_notify) + ipa3_ctx->uc_wdi_ctx.stats_notify = in->wdi_notify; + else + IPADBG("in->wdi_notify is null\n"); + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client)) + ipa3_install_dflt_flt_rules(ipa_ep_idx); + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); + + IPADBG("GSI connected.\n"); + gsi_res = gsi_query_channel_db_addr(ep->gsi_chan_hdl, + &gsi_db_reg_phs_addr_lsb, + &gsi_db_reg_phs_addr_msb); + out->uc_door_bell_pa = gsi_db_reg_phs_addr_lsb; + IPADBG("GSI query result: %d\n", gsi_res); + IPADBG("GSI lsb addr: %d\n", gsi_db_reg_phs_addr_lsb); + IPADBG("GSI msb addr: %d\n", gsi_db_reg_phs_addr_msb); + + ep->gsi_offload_state |= IPA_WDI_CONNECTED; + out->clnt_hdl = ipa_ep_idx; + return 0; + +ipa_cfg_ep_fail: + memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); +fail_write_channel_scratch: + gsi_dealloc_channel(ep->gsi_chan_hdl); +gsi_timeout: + ipa_release_uc_smmu_mappings(in->sys.client); +fail_alloc_channel: + if (ep->gsi_evt_ring_hdl != ~0) { + gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); + ep->gsi_evt_ring_hdl = ~0; + } +fail_alloc_evt_ring: +fail: + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); + return result; +} + +/** + * ipa3_connect_wdi_pipe() - WDI client connect + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out) +{ + int ipa_ep_idx; + int result = -EFAULT; + struct ipa3_ep_context *ep; + struct ipa_mem_buffer cmd; + struct IpaHwWdiTxSetUpCmdData_t *tx; + struct IpaHwWdiRxSetUpCmdData_t *rx; + struct IpaHwWdi2TxSetUpCmdData_t *tx_2; + struct IpaHwWdi2RxSetUpCmdData_t *rx_2; + + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + unsigned long va; + phys_addr_t pa; + u32 len; + + if (in == NULL || out == NULL || in->sys.client >= IPA_CLIENT_MAX) { + IPAERR("bad parm. in=%pK out=%pK\n", in, out); + if (in) + IPAERR("client = %d\n", in->sys.client); + return -EINVAL; + } + + if (!in->smmu_enabled) { + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + if (in->u.dl.comp_ring_base_pa % + IPA_WDI_RING_ALIGNMENT || + in->u.dl.ce_ring_base_pa % + IPA_WDI_RING_ALIGNMENT) { + IPAERR("alignment failure on TX\n"); + return -EINVAL; + } + } else { + if (in->u.ul.rdy_ring_base_pa % + IPA_WDI_RING_ALIGNMENT) { + IPAERR("alignment failure on RX\n"); + return -EINVAL; + } + } + } + + if (ipa3_ctx->ipa_wdi2_over_gsi) + return ipa3_connect_gsi_wdi_pipe(in, out); + + result = ipa3_uc_state_check(); + if (result) + return result; + + ipa_ep_idx = ipa3_get_ep_mapping(in->sys.client); + if (ipa_ep_idx == -1) { + IPAERR("fail to alloc EP.\n"); + goto fail; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (ep->valid) { + IPAERR("EP already allocated.\n"); + goto fail; + } + + memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); + IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client); + + IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx); + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + if (ipa3_ctx->ipa_wdi2) + cmd.size = sizeof(*tx_2); + else + cmd.size = sizeof(*tx); + if (in->smmu_enabled) { + IPADBG("comp_ring_size=%d\n", + in->u.dl_smmu.comp_ring_size); + IPADBG("ce_ring_size=%d\n", in->u.dl_smmu.ce_ring_size); + IPADBG("ce_ring_doorbell_pa=0x%pa\n", + &in->u.dl_smmu.ce_door_bell_pa); + IPADBG("num_tx_buffers=%d\n", + in->u.dl_smmu.num_tx_buffers); + } else { + IPADBG("comp_ring_base_pa=0x%pa\n", + &in->u.dl.comp_ring_base_pa); + IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size); + IPADBG("ce_ring_base_pa=0x%pa\n", + &in->u.dl.ce_ring_base_pa); + IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size); + IPADBG("ce_ring_doorbell_pa=0x%pa\n", + &in->u.dl.ce_door_bell_pa); + IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers); + } + } else { + if (ipa3_ctx->ipa_wdi2) + cmd.size = sizeof(*rx_2); + else + cmd.size = sizeof(*rx); + if (in->smmu_enabled) { + IPADBG("rx_ring_size=%d\n", + in->u.ul_smmu.rdy_ring_size); + IPADBG("rx_ring_rp_pa=0x%pa\n", + &in->u.ul_smmu.rdy_ring_rp_pa); + IPADBG("rx_comp_ring_size=%d\n", + in->u.ul_smmu.rdy_comp_ring_size); + IPADBG("rx_comp_ring_wp_pa=0x%pa\n", + &in->u.ul_smmu.rdy_comp_ring_wp_pa); + ipa3_ctx->uc_ctx.rdy_ring_rp_pa = + in->u.ul_smmu.rdy_ring_rp_pa; + ipa3_ctx->uc_ctx.rdy_ring_size = + in->u.ul_smmu.rdy_ring_size; + ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa = + in->u.ul_smmu.rdy_comp_ring_wp_pa; + ipa3_ctx->uc_ctx.rdy_comp_ring_size = + in->u.ul_smmu.rdy_comp_ring_size; + } else { + IPADBG("rx_ring_base_pa=0x%pa\n", + &in->u.ul.rdy_ring_base_pa); + IPADBG("rx_ring_size=%d\n", + in->u.ul.rdy_ring_size); + IPADBG("rx_ring_rp_pa=0x%pa\n", + &in->u.ul.rdy_ring_rp_pa); + IPADBG("rx_comp_ring_base_pa=0x%pa\n", + &in->u.ul.rdy_comp_ring_base_pa); + IPADBG("rx_comp_ring_size=%d\n", + in->u.ul.rdy_comp_ring_size); + IPADBG("rx_comp_ring_wp_pa=0x%pa\n", + &in->u.ul.rdy_comp_ring_wp_pa); + ipa3_ctx->uc_ctx.rdy_ring_base_pa = + in->u.ul.rdy_ring_base_pa; + ipa3_ctx->uc_ctx.rdy_ring_rp_pa = + in->u.ul.rdy_ring_rp_pa; + ipa3_ctx->uc_ctx.rdy_ring_size = + in->u.ul.rdy_ring_size; + ipa3_ctx->uc_ctx.rdy_comp_ring_base_pa = + in->u.ul.rdy_comp_ring_base_pa; + ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa = + in->u.ul.rdy_comp_ring_wp_pa; + ipa3_ctx->uc_ctx.rdy_comp_ring_size = + in->u.ul.rdy_comp_ring_size; + } + } + + cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + result = -ENOMEM; + goto dma_alloc_fail; + } + + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + if (ipa3_ctx->ipa_wdi2) { + tx_2 = (struct IpaHwWdi2TxSetUpCmdData_t *)cmd.base; + + len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size : + in->u.dl.comp_ring_size; + IPADBG("TX_2 ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.comp_ring_size, + in->u.dl.comp_ring_size); + if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES, + in->smmu_enabled, + in->u.dl.comp_ring_base_pa, + &in->u.dl_smmu.comp_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping TX ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx_2->comp_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + tx_2->comp_ring_base_pa = (u32) (va & 0xFFFFFFFF); + tx_2->comp_ring_size = len; + IPADBG("TX_2 comp_ring_base_pa_hi=0x%08x :0x%08x\n", + tx_2->comp_ring_base_pa_hi, + tx_2->comp_ring_base_pa); + + len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size : + in->u.dl.ce_ring_size; + IPADBG("TX_2 CE ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.ce_ring_size, + in->u.dl.ce_ring_size); + /* WA: wlan passed ce_ring sg_table PA directly */ + if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES, + in->smmu_enabled, + in->u.dl.ce_ring_base_pa, + &in->u.dl_smmu.ce_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping CE ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx_2->ce_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + tx_2->ce_ring_base_pa = (u32) (va & 0xFFFFFFFF); + tx_2->ce_ring_size = len; + IPADBG("TX_2 ce_ring_base_pa_hi=0x%08x :0x%08x\n", + tx_2->ce_ring_base_pa_hi, + tx_2->ce_ring_base_pa); + + pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa : + in->u.dl.ce_door_bell_pa; + if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES, + in->smmu_enabled, + pa, + NULL, + 4, + true, + &va)) { + IPAERR("fail to create uc mapping CE DB.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx_2->ce_ring_doorbell_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + tx_2->ce_ring_doorbell_pa = (u32) (va & 0xFFFFFFFF); + IPADBG("TX_2 ce_ring_doorbell_pa_hi=0x%08x :0x%08x\n", + tx_2->ce_ring_doorbell_pa_hi, + tx_2->ce_ring_doorbell_pa); + + tx_2->num_tx_buffers = in->smmu_enabled ? + in->u.dl_smmu.num_tx_buffers : + in->u.dl.num_tx_buffers; + tx_2->ipa_pipe_number = ipa_ep_idx; + } else { + tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base; + + len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size : + in->u.dl.comp_ring_size; + IPADBG("TX ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.comp_ring_size, + in->u.dl.comp_ring_size); + if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES, + in->smmu_enabled, + in->u.dl.comp_ring_base_pa, + &in->u.dl_smmu.comp_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping TX ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx->comp_ring_base_pa = va; + tx->comp_ring_size = len; + len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size : + in->u.dl.ce_ring_size; + IPADBG("TX CE ring smmu_en=%d ring_size=%d %d 0x%lx\n", + in->smmu_enabled, + in->u.dl_smmu.ce_ring_size, + in->u.dl.ce_ring_size, + va); + if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES, + in->smmu_enabled, + in->u.dl.ce_ring_base_pa, + &in->u.dl_smmu.ce_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping CE ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx->ce_ring_base_pa = va; + tx->ce_ring_size = len; + pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa : + in->u.dl.ce_door_bell_pa; + if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES, + in->smmu_enabled, + pa, + NULL, + 4, + true, + &va)) { + IPAERR("fail to create uc mapping CE DB.\n"); + result = -ENOMEM; + goto uc_timeout; + } + + IPADBG("CE doorbell pa: 0x%pa va:0x%lx\n", &pa, va); + IPADBG("Is wdi_over_pcie ? (%s)\n", + ipa3_ctx->wdi_over_pcie ? "Yes":"No"); + + if (ipa3_ctx->wdi_over_pcie) + tx->ce_ring_doorbell_pa = pa; + else + tx->ce_ring_doorbell_pa = va; + + tx->num_tx_buffers = in->smmu_enabled ? + in->u.dl_smmu.num_tx_buffers : + in->u.dl.num_tx_buffers; + tx->ipa_pipe_number = ipa_ep_idx; + } + out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IPA_HW_WDI_TX_MBOX_START_INDEX/32, + IPA_HW_WDI_TX_MBOX_START_INDEX % 32); + } else { + if (ipa3_ctx->ipa_wdi2) { + rx_2 = (struct IpaHwWdi2RxSetUpCmdData_t *)cmd.base; + + len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size : + in->u.ul.rdy_ring_size; + IPADBG("RX_2 ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.ul_smmu.rdy_ring_size, + in->u.ul.rdy_ring_size); + if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES, + in->smmu_enabled, + in->u.ul.rdy_ring_base_pa, + &in->u.ul_smmu.rdy_ring, + len, + false, + &va)) { + IPAERR("fail to create uc RX_2 ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_ring_base_pa = (u32) (va & 0xFFFFFFFF); + rx_2->rx_ring_size = len; + IPADBG("RX_2 rx_ring_base_pa_hi=0x%08x:0x%08x\n", + rx_2->rx_ring_base_pa_hi, + rx_2->rx_ring_base_pa); + + pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa : + in->u.ul.rdy_ring_rp_pa; + if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES, + in->smmu_enabled, + pa, + NULL, + 4, + false, + &va)) { + IPAERR("fail to create uc RX_2 rng RP\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_ring_rp_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_ring_rp_pa = (u32) (va & 0xFFFFFFFF); + IPADBG("RX_2 rx_ring_rp_pa_hi=0x%08x :0x%08x\n", + rx_2->rx_ring_rp_pa_hi, + rx_2->rx_ring_rp_pa); + len = in->smmu_enabled ? + in->u.ul_smmu.rdy_comp_ring_size : + in->u.ul.rdy_comp_ring_size; + IPADBG("RX_2 ring smmu_en=%d comp_ring_size=%d %d\n", + in->smmu_enabled, + in->u.ul_smmu.rdy_comp_ring_size, + in->u.ul.rdy_comp_ring_size); + if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_COMP_RING_RES, + in->smmu_enabled, + in->u.ul.rdy_comp_ring_base_pa, + &in->u.ul_smmu.rdy_comp_ring, + len, + false, + &va)) { + IPAERR("fail to create uc RX_2 comp_ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_comp_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_comp_ring_base_pa = (u32) (va & 0xFFFFFFFF); + rx_2->rx_comp_ring_size = len; + IPADBG("RX_2 rx_comp_ring_base_pa_hi=0x%08x:0x%08x\n", + rx_2->rx_comp_ring_base_pa_hi, + rx_2->rx_comp_ring_base_pa); + + pa = in->smmu_enabled ? + in->u.ul_smmu.rdy_comp_ring_wp_pa : + in->u.ul.rdy_comp_ring_wp_pa; + if (ipa_create_uc_smmu_mapping( + IPA_WDI_RX_COMP_RING_WP_RES, + in->smmu_enabled, + pa, + NULL, + 4, + false, + &va)) { + IPAERR("fail to create uc RX_2 comp_rng WP\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_comp_ring_wp_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_comp_ring_wp_pa = (u32) (va & 0xFFFFFFFF); + IPADBG("RX_2 rx_comp_ring_wp_pa_hi=0x%08x:0x%08x\n", + rx_2->rx_comp_ring_wp_pa_hi, + rx_2->rx_comp_ring_wp_pa); + rx_2->ipa_pipe_number = ipa_ep_idx; + } else { + rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base; + + len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size : + in->u.ul.rdy_ring_size; + IPADBG("RX ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.ul_smmu.rdy_ring_size, + in->u.ul.rdy_ring_size); + if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES, + in->smmu_enabled, + in->u.ul.rdy_ring_base_pa, + &in->u.ul_smmu.rdy_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping RX ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx->rx_ring_base_pa = va; + rx->rx_ring_size = len; + + pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa : + in->u.ul.rdy_ring_rp_pa; + if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES, + in->smmu_enabled, + pa, + NULL, + 4, + false, + &va)) { + IPAERR("fail to create uc mapping RX rng RP\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx->rx_ring_rp_pa = va; + rx->ipa_pipe_number = ipa_ep_idx; + } + out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IPA_HW_WDI_RX_MBOX_START_INDEX/32, + IPA_HW_WDI_RX_MBOX_START_INDEX % 32); + } + + ep->valid = 1; + ep->client = in->sys.client; + ep->keep_ipa_awake = in->sys.keep_ipa_awake; + result = ipa3_disable_data_path(ipa_ep_idx); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx); + goto uc_timeout; + } + if (IPA_CLIENT_IS_PROD(in->sys.client)) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = true; + ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl); + } + + result = ipa3_uc_send_cmd((u32)(cmd.phys_base), + IPA_CLIENT_IS_CONS(in->sys.client) ? + IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : + IPA_CPU_2_HW_CMD_WDI_RX_SET_UP, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + ep->skip_ep_cfg = in->sys.skip_ep_cfg; + ep->client_notify = in->sys.notify; + ep->priv = in->sys.priv; + + /* for AP+STA stats update */ + if (in->wdi_notify) + ipa3_ctx->uc_wdi_ctx.stats_notify = in->wdi_notify; + else + IPADBG("in->wdi_notify is null\n"); + + if (!ep->skip_ep_cfg) { + if (ipa3_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto ipa_cfg_ep_fail; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("Skipping endpoint configuration.\n"); + } + + ipa3_enable_data_path(ipa_ep_idx); + + out->clnt_hdl = ipa_ep_idx; + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client)) + ipa3_install_dflt_flt_rules(ipa_ep_idx); + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); + + dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + ep->uc_offload_state |= IPA_WDI_CONNECTED; + IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx); + + return 0; + +ipa_cfg_ep_fail: + memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); +uc_timeout: + ipa_release_uc_smmu_mappings(in->sys.client); + dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); +dma_alloc_fail: + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); +fail: + return result; +} + +int ipa3_disconnect_gsi_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa3_ep_context *ep; + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (ep->gsi_offload_state != IPA_WDI_CONNECTED) { + IPAERR("WDI channel bad state %d\n", ep->gsi_offload_state); + return -EFAULT; + } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipa3_reset_gsi_channel(clnt_hdl); + ipa3_reset_gsi_event_ring(clnt_hdl); + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + result = ipa3_release_gsi_channel(clnt_hdl); + if (result) { + IPAERR("GSI dealloc channel failed %d\n", + result); + goto fail_dealloc_channel; + } + ipa_release_uc_smmu_mappings(clnt_hdl); + + /* for AP+STA stats update */ + if (ipa3_ctx->uc_wdi_ctx.stats_notify) + ipa3_ctx->uc_wdi_ctx.stats_notify = NULL; + else + IPADBG("uc_wdi_ctx.stats_notify already null\n"); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + +fail_dealloc_channel: + return result; +} + +/** + * ipa3_disconnect_wdi_pipe() - WDI client disconnect + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_disconnect_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa3_ep_context *ep; + union IpaHwWdiCommonChCmdData_t tear; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + if (ipa3_ctx->ipa_wdi2_over_gsi) + return ipa3_disconnect_gsi_wdi_pipe(clnt_hdl); + + result = ipa3_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != IPA_WDI_CONNECTED) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + tear.params.ipa_pipe_number = clnt_hdl; + + result = ipa3_uc_send_cmd(tear.raw32b, + IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + ipa3_delete_dflt_flt_rules(clnt_hdl); + ipa_release_uc_smmu_mappings(ep->client); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context)); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + + /* for AP+STA stats update */ + if (ipa3_ctx->uc_wdi_ctx.stats_notify) + ipa3_ctx->uc_wdi_ctx.stats_notify = NULL; + else + IPADBG("uc_wdi_ctx.stats_notify already null\n"); + +uc_timeout: + return result; +} + +int ipa3_enable_gsi_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa3_ep_context *ep; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + int ipa_ep_idx; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa3_ctx->ep[clnt_hdl]; + if (ep->gsi_offload_state != IPA_WDI_CONNECTED) { + IPAERR("WDI channel bad state %d\n", ep->gsi_offload_state); + return -EFAULT; + } + + ipa_ep_idx = ipa3_get_ep_mapping(ipa3_get_client_mapping(clnt_hdl)); + if (ipa_ep_idx == -1) { + IPAERR("fail to alloc EP.\n"); + result = -EPERM; + goto fail; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + ipa3_enable_data_path(clnt_hdl); + + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + ep->gsi_offload_state |= IPA_WDI_ENABLED; + IPADBG("client (ep: %d) enabled\n", clnt_hdl); +fail: + return result; +} +int ipa3_disable_gsi_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa3_ep_context *ep; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + u32 prod_hdl; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (ep->gsi_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) { + IPAERR("WDI channel bad state %d\n", ep->gsi_offload_state); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + result = ipa3_disable_data_path(clnt_hdl); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", result, + clnt_hdl); + goto gsi_timeout; + } + + /** + * To avoid data stall during continuous SAP on/off before + * setting delay to IPA Consumer pipe, remove delay and enable + * holb on IPA Producer pipe + */ + if (IPA_CLIENT_IS_PROD(ep->client)) { + IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + /* remove delay on wlan-prod pipe*/ + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + + prod_hdl = ipa3_get_ep_mapping(IPA_CLIENT_WLAN1_CONS); + if (ipa3_ctx->ep[prod_hdl].valid == 1) { + result = ipa3_disable_data_path(prod_hdl); + if (result) { + IPAERR("disable data path failed\n"); + IPAERR("res=%d clnt=%d\n", + result, prod_hdl); + goto gsi_timeout; + } + } + usleep_range(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC, + IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC); + + } + + /* Set the delay after disabling IPA Producer pipe */ + if (IPA_CLIENT_IS_PROD(ep->client)) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = true; + ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + ep->gsi_offload_state &= ~IPA_WDI_ENABLED; + IPADBG("client (ep: %d) disabled\n", clnt_hdl); + +gsi_timeout: + return result; +} +/** + * ipa3_enable_wdi_pipe() - WDI client enable + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_enable_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa3_ep_context *ep; + union IpaHwWdiCommonChCmdData_t enable; + struct ipa_ep_cfg_holb holb_cfg; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + if (ipa3_ctx->ipa_wdi2_over_gsi) + return ipa3_enable_gsi_wdi_pipe(clnt_hdl); + + result = ipa3_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != IPA_WDI_CONNECTED) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + enable.params.ipa_pipe_number = clnt_hdl; + + result = ipa3_uc_send_cmd(enable.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_ENABLE, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + if (IPA_CLIENT_IS_CONS(ep->client)) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_DIS; + holb_cfg.tmr_val = 0; + result = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg); + } + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + ep->uc_offload_state |= IPA_WDI_ENABLED; + IPADBG("client (ep: %d) enabled\n", clnt_hdl); + +uc_timeout: + return result; +} + +/** + * ipa3_disable_wdi_pipe() - WDI client disable + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_disable_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa3_ep_context *ep; + union IpaHwWdiCommonChCmdData_t disable; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + u32 prod_hdl; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + if (ipa3_ctx->ipa_wdi2_over_gsi) + return ipa3_disable_gsi_wdi_pipe(clnt_hdl); + + result = ipa3_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + result = ipa3_disable_data_path(clnt_hdl); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", result, + clnt_hdl); + result = -EPERM; + goto uc_timeout; + } + + /** + * To avoid data stall during continuous SAP on/off before + * setting delay to IPA Consumer pipe, remove delay and enable + * holb on IPA Producer pipe + */ + if (IPA_CLIENT_IS_PROD(ep->client)) { + IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + /* remove delay on wlan-prod pipe*/ + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + + prod_hdl = ipa3_get_ep_mapping(IPA_CLIENT_WLAN1_CONS); + if (ipa3_ctx->ep[prod_hdl].valid == 1) { + result = ipa3_disable_data_path(prod_hdl); + if (result) { + IPAERR("disable data path failed\n"); + IPAERR("res=%d clnt=%d\n", + result, prod_hdl); + result = -EPERM; + goto uc_timeout; + } + } + usleep_range(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC, + IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC); + + } + + disable.params.ipa_pipe_number = clnt_hdl; + result = ipa3_uc_send_cmd(disable.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_DISABLE, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + /* Set the delay after disabling IPA Producer pipe */ + if (IPA_CLIENT_IS_PROD(ep->client)) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = true; + ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + ep->uc_offload_state &= ~IPA_WDI_ENABLED; + IPADBG("client (ep: %d) disabled\n", clnt_hdl); + + +uc_timeout: + return result; +} + +int ipa3_resume_gsi_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa3_ep_context *ep; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + struct gsi_chan_info chan_info; + union __packed gsi_channel_scratch gsi_scratch; + + IPADBG("ep=%d\n", clnt_hdl); + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (ep->gsi_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) { + IPAERR("WDI channel bad state %d\n", ep->gsi_offload_state); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + if (result) + IPAERR("client (ep: %d) fail un-susp/delay result=%d\n", + clnt_hdl, result); + else + IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl); + + result = gsi_start_channel(ep->gsi_chan_hdl); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("gsi_start_channel failed %d\n", result); + ipa_assert(); + } + gsi_query_channel_info(ep->gsi_chan_hdl, &chan_info); + gsi_read_channel_scratch(ep->gsi_chan_hdl, &gsi_scratch); + IPADBG("ch=%lu channel base = 0x%llx , event base 0x%llx\n", + ep->gsi_chan_hdl, + ep->gsi_mem_info.chan_ring_base_addr, + ep->gsi_mem_info.evt_ring_base_addr); + IPADBG("RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx EWP=0x%llx\n", + chan_info.rp, chan_info.wp, chan_info.evt_valid, + chan_info.evt_rp, chan_info.evt_wp); + IPADBG("Scratch 0 = %x Scratch 1 = %x Scratch 2 = %x Scratch 3 = %x\n", + gsi_scratch.data.word1, gsi_scratch.data.word2, + gsi_scratch.data.word3, gsi_scratch.data.word4); + + ep->gsi_offload_state |= IPA_WDI_RESUMED; + IPADBG("exit\n"); + return result; +} + +/** + * ipa3_resume_wdi_pipe() - WDI client resume + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_resume_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa3_ep_context *ep; + union IpaHwWdiCommonChCmdData_t resume; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + if (ipa3_ctx->ipa_wdi2_over_gsi) + return ipa3_resume_gsi_wdi_pipe(clnt_hdl); + + result = ipa3_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + resume.params.ipa_pipe_number = clnt_hdl; + + result = ipa3_uc_send_cmd(resume.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_RESUME, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + if (result) + IPAERR("client (ep: %d) fail un-susp/delay result=%d\n", + clnt_hdl, result); + else + IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl); + + ep->uc_offload_state |= IPA_WDI_RESUMED; + IPADBG("client (ep: %d) resumed\n", clnt_hdl); + +uc_timeout: + return result; +} + +int ipa3_suspend_gsi_wdi_pipe(u32 clnt_hdl) +{ + int ipa_ep_idx; + struct ipa3_ep_context *ep; + int res = 0; + u32 source_pipe_bitmask = 0; + bool disable_force_clear = false; + struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 }; + int retry_cnt = 0; + struct gsi_chan_info chan_info; + union __packed gsi_channel_scratch gsi_scratch; + + ipa_ep_idx = ipa3_get_ep_mapping(ipa3_get_client_mapping(clnt_hdl)); + if (ipa_ep_idx < 0) { + IPAERR("IPA client mapping failed\n"); + return -EPERM; + } + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (ep->gsi_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED | + IPA_WDI_RESUMED)) { + IPAERR("WDI channel bad state %d\n", ep->gsi_offload_state); + return -EFAULT; + } + if (ep->valid) { + IPADBG("suspended pipe %d\n", ipa_ep_idx); + source_pipe_bitmask = 1 << + ipa3_get_ep_mapping(ep->client); + res = ipa3_enable_force_clear(clnt_hdl, + false, source_pipe_bitmask); + if (res) { + /* + * assuming here modem SSR, AP can remove + * the delay in this case + */ + IPAERR("failed to force clear %d\n", res); + IPAERR("remove delay from SCND reg\n"); + ep_ctrl_scnd.endp_delay = false; + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_CTRL_SCND_n, clnt_hdl, + &ep_ctrl_scnd); + } else { + disable_force_clear = true; + } +retry_gsi_stop: + res = ipa3_stop_gsi_channel(ipa_ep_idx); + if (res != 0 && res != -GSI_STATUS_AGAIN && + res != -GSI_STATUS_TIMED_OUT) { + IPAERR("failed to stop channel res = %d\n", res); + goto fail_stop_channel; + } else if (res == -GSI_STATUS_AGAIN) { + IPADBG("GSI stop channel failed retry cnt = %d\n", + retry_cnt); + retry_cnt++; + if (retry_cnt >= GSI_STOP_MAX_RETRY_CNT) + goto fail_stop_channel; + goto retry_gsi_stop; + } else { + IPADBG("GSI channel %ld STOP\n", ep->gsi_chan_hdl); + } + gsi_query_channel_info(ep->gsi_chan_hdl, &chan_info); + gsi_read_channel_scratch(ep->gsi_chan_hdl, &gsi_scratch); + IPADBG("ch=%lu channel base = 0x%llx , event base 0x%llx\n", + ep->gsi_chan_hdl, + ep->gsi_mem_info.chan_ring_base_addr, + ep->gsi_mem_info.evt_ring_base_addr); + IPADBG("RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx", + chan_info.rp, chan_info.wp, + chan_info.evt_valid, chan_info.evt_rp); + IPADBG("EWP=0x%llx\n", chan_info.evt_wp); + IPADBG("Scratch 0 = %x Scratch 1 = %x Scratch 2 = %x", + gsi_scratch.data.word1, gsi_scratch.data.word2, + gsi_scratch.data.word3); + IPADBG("Scratch 3 = %x\n", gsi_scratch.data.word4); + } + + if (disable_force_clear) + ipa3_disable_force_clear(clnt_hdl); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + ep->gsi_offload_state &= ~IPA_WDI_RESUMED; + return res; +fail_stop_channel: + ipa_assert(); + return res; +} + +/** + * ipa3_suspend_wdi_pipe() - WDI client suspend + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_suspend_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa3_ep_context *ep; + union IpaHwWdiCommonChCmdData_t suspend; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + u32 source_pipe_bitmask = 0; + bool disable_force_clear = false; + struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 }; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + if (ipa3_ctx->ipa_wdi2_over_gsi) + return ipa3_suspend_gsi_wdi_pipe(clnt_hdl); + + result = ipa3_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED | + IPA_WDI_RESUMED)) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + suspend.params.ipa_pipe_number = clnt_hdl; + + if (IPA_CLIENT_IS_PROD(ep->client)) { + /* + * For WDI 2.0 need to ensure pipe will be empty before suspend + * as IPA uC will fail to suspend the pipe otherwise. + */ + if (ipa3_ctx->ipa_wdi2) { + source_pipe_bitmask = 1 << + ipa3_get_ep_mapping(ep->client); + result = ipa3_enable_force_clear(clnt_hdl, + false, source_pipe_bitmask); + if (result) { + /* + * assuming here modem SSR, AP can remove + * the delay in this case + */ + IPAERR("failed to force clear %d\n", result); + IPAERR("remove delay from SCND reg\n"); + ep_ctrl_scnd.endp_delay = false; + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_CTRL_SCND_n, clnt_hdl, + &ep_ctrl_scnd); + } else { + disable_force_clear = true; + } + } + + IPADBG("Post suspend event first for IPA Producer\n"); + IPADBG("Client: %d clnt_hdl: %d\n", ep->client, clnt_hdl); + result = ipa3_uc_send_cmd(suspend.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + } + + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + if (IPA_CLIENT_IS_CONS(ep->client)) { + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + ep_cfg_ctrl.ipa_ep_suspend = true; + result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + if (result) + IPAERR("(ep: %d) failed to suspend result=%d\n", + clnt_hdl, result); + else + IPADBG("(ep: %d) suspended\n", clnt_hdl); + } + } else { + ep_cfg_ctrl.ipa_ep_delay = true; + result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + if (result) + IPAERR("client (ep: %d) failed to delay result=%d\n", + clnt_hdl, result); + else + IPADBG("client (ep: %d) delayed\n", clnt_hdl); + } + + if (IPA_CLIENT_IS_CONS(ep->client)) { + result = ipa3_uc_send_cmd(suspend.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + } + + if (disable_force_clear) + ipa3_disable_force_clear(clnt_hdl); + + ipa3_ctx->tag_process_before_gating = true; + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + ep->uc_offload_state &= ~IPA_WDI_RESUMED; + IPADBG("client (ep: %d) suspended\n", clnt_hdl); + +uc_timeout: + return result; +} + +/** + * ipa_broadcast_wdi_quota_reach_ind() - quota reach + * @uint32_t fid: [in] input netdev ID + * @uint64_t num_bytes: [in] used bytes + * + * Returns: 0 on success, negative on failure + */ +int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, + uint64_t num_bytes) +{ + IPAERR("Quota reached indication on fid(%d) Mbytes(%lu)\n", + fid, (unsigned long)num_bytes); + ipa3_broadcast_quota_reach_ind(0, IPA_UPSTEAM_WLAN); + return 0; +} + +int ipa3_write_qmapid_gsi_wdi_pipe(u32 clnt_hdl, u8 qmap_id) +{ + int result = 0; + struct ipa3_ep_context *ep; + union __packed gsi_wdi_channel_scratch3_reg gsi_scratch; + + memset(&gsi_scratch, 0, sizeof(gsi_scratch)); + ep = &ipa3_ctx->ep[clnt_hdl]; + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + gsi_scratch.wdi.qmap_id = qmap_id; + gsi_scratch.wdi.endp_metadatareg_offset = ipahal_get_reg_mn_ofst( + IPA_ENDP_INIT_HDR_METADATA_n, 0, clnt_hdl)/4; + + result = gsi_write_channel_scratch3_reg(ep->gsi_chan_hdl, gsi_scratch); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("gsi_write_channel_scratch failed %d\n", + result); + goto fail_write_channel_scratch; + } + + IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return 0; +fail_write_channel_scratch: + ipa_assert(); + return result; +} +int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id) +{ + int result = 0; + struct ipa3_ep_context *ep; + union IpaHwWdiRxExtCfgCmdData_t qmap; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR_RL("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + if (ipa3_ctx->ipa_wdi2_over_gsi) + return ipa3_write_qmapid_gsi_wdi_pipe(clnt_hdl, qmap_id); + + result = ipa3_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) { + IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + qmap.params.ipa_pipe_number = clnt_hdl; + qmap.params.qmap_id = qmap_id; + + result = ipa3_uc_send_cmd(qmap.raw32b, + IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id); + +uc_timeout: + return result; +} + +/** + * ipa3_uc_reg_rdyCB() - To register uC + * ready CB if uC not ready + * @inout: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa3_uc_reg_rdyCB( + struct ipa_wdi_uc_ready_params *inout) +{ + int result = 0; + + if (inout == NULL) { + IPAERR("bad parm. inout=%pK ", inout); + return -EINVAL; + } + + result = ipa3_uc_state_check(); + if (result) { + inout->is_uC_ready = false; + ipa3_ctx->uc_wdi_ctx.uc_ready_cb = inout->notify; + ipa3_ctx->uc_wdi_ctx.priv = inout->priv; + } else { + inout->is_uC_ready = true; + } + + return 0; +} + +/** + * ipa3_uc_dereg_rdyCB() - To de-register uC ready CB + * + * Returns: 0 on success, negative on failure + * + */ +int ipa3_uc_dereg_rdyCB(void) +{ + ipa3_ctx->uc_wdi_ctx.uc_ready_cb = NULL; + ipa3_ctx->uc_wdi_ctx.priv = NULL; + + return 0; +} + + +/** + * ipa3_uc_wdi_get_dbpa() - To retrieve + * doorbell physical address of wlan pipes + * @param: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa3_uc_wdi_get_dbpa( + struct ipa_wdi_db_params *param) +{ + if (param == NULL || param->client >= IPA_CLIENT_MAX) { + IPAERR("bad parm. param=%pK ", param); + if (param) + IPAERR("client = %d\n", param->client); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(param->client)) { + param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IPA_HW_WDI_TX_MBOX_START_INDEX/32, + IPA_HW_WDI_TX_MBOX_START_INDEX % 32); + } else { + param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IPA_HW_WDI_RX_MBOX_START_INDEX/32, + IPA_HW_WDI_RX_MBOX_START_INDEX % 32); + } + + return 0; +} + +static void ipa3_uc_wdi_loaded_handler(void) +{ + if (!ipa3_ctx) { + IPAERR("IPA ctx is null\n"); + return; + } + + if (ipa3_ctx->uc_wdi_ctx.uc_ready_cb) { + ipa3_ctx->uc_wdi_ctx.uc_ready_cb( + ipa3_ctx->uc_wdi_ctx.priv); + + ipa3_ctx->uc_wdi_ctx.uc_ready_cb = + NULL; + ipa3_ctx->uc_wdi_ctx.priv = NULL; + } +} + +int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN); + int i; + int ret = 0; + int prot = IOMMU_READ | IOMMU_WRITE; + + if (!info) { + IPAERR("info = %pK\n", info); + return -EINVAL; + } + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + + if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]) { + IPAERR("IPA SMMU not enabled\n"); + return -EINVAL; + } + + for (i = 0; i < num_buffers; i++) { + IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i, + &info[i].pa, info[i].iova, info[i].size); + info[i].result = ipa3_iommu_map(cb->iommu, + rounddown(info[i].iova, PAGE_SIZE), + rounddown(info[i].pa, PAGE_SIZE), + roundup(info[i].size + info[i].pa - + rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE), + prot); + } + + return ret; +} + +int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN); + int i; + int ret = 0; + + if (!info) { + IPAERR("info = %pK\n", info); + return -EINVAL; + } + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + + for (i = 0; i < num_buffers; i++) { + IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i, + &info[i].pa, info[i].iova, info[i].size); + info[i].result = iommu_unmap(cb->iommu, + rounddown(info[i].iova, PAGE_SIZE), + roundup(info[i].size + info[i].pa - + rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE)); + } + + return ret; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c new file mode 100644 index 000000000000..2a4baf8f0a44 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -0,0 +1,7478 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include /* gen_pool_alloc() */ +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_fltrt.h" +#include "ipahal/ipahal_hw_stats.h" +#include "../ipa_rm_i.h" + +/* + * The following for adding code (ie. for EMULATION) not found on x86. + */ +#if defined(CONFIG_IPA_EMULATION) +# include "ipa_emulation_stubs.h" +#endif + +#define IPA_V3_0_CLK_RATE_SVS2 (37.5 * 1000 * 1000UL) +#define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL) +#define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL) +#define IPA_V3_0_CLK_RATE_TURBO (200 * 1000 * 1000UL) + +#define IPA_V3_5_CLK_RATE_SVS2 (100 * 1000 * 1000UL) +#define IPA_V3_5_CLK_RATE_SVS (200 * 1000 * 1000UL) +#define IPA_V3_5_CLK_RATE_NOMINAL (400 * 1000 * 1000UL) +#define IPA_V3_5_CLK_RATE_TURBO (42640 * 10 * 1000UL) + +#define IPA_V4_0_CLK_RATE_SVS2 (60 * 1000 * 1000UL) +#define IPA_V4_0_CLK_RATE_SVS (125 * 1000 * 1000UL) +#define IPA_V4_0_CLK_RATE_NOMINAL (220 * 1000 * 1000UL) +#define IPA_V4_0_CLK_RATE_TURBO (250 * 1000 * 1000UL) + +#define IPA_V3_0_MAX_HOLB_TMR_VAL (4294967296 - 1) + +#define IPA_V3_0_BW_THRESHOLD_TURBO_MBPS (1000) +#define IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS (600) +#define IPA_V3_0_BW_THRESHOLD_SVS_MBPS (310) + +#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000 +#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10 + +/* Max pipes + ICs for TAG process */ +#define IPA_TAG_MAX_DESC (IPA3_MAX_NUM_PIPES + 6) + +#define IPA_TAG_SLEEP_MIN_USEC (1000) +#define IPA_TAG_SLEEP_MAX_USEC (2000) +#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ) +#define IPA_BCR_REG_VAL_v3_0 (0x00000001) +#define IPA_BCR_REG_VAL_v3_5 (0x0000003B) +#define IPA_BCR_REG_VAL_v4_0 (0x00000039) +#define IPA_BCR_REG_VAL_v4_2 (0x00000000) +#define IPA_AGGR_GRAN_MIN (1) +#define IPA_AGGR_GRAN_MAX (32) +#define IPA_EOT_COAL_GRAN_MIN (1) +#define IPA_EOT_COAL_GRAN_MAX (16) + +#define IPA_FILT_ROUT_HASH_REG_VAL_v4_2 (0x00000000) +#define IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC (15) + +#define IPA_AGGR_BYTE_LIMIT (\ + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \ + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT) +#define IPA_AGGR_PKT_LIMIT (\ + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \ + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT) + +/* In IPAv3 only endpoints 0-3 can be configured to deaggregation */ +#define IPA_EP_SUPPORTS_DEAGGR(idx) ((idx) >= 0 && (idx) <= 3) + +#define IPA_TAG_TIMER_TIMESTAMP_SHFT (14) /* ~0.8msec */ +#define IPA_NAT_TIMER_TIMESTAMP_SHFT (24) /* ~0.8sec */ + +/* + * Units of time per a specific granularity + * The limitation based on H/W HOLB/AGGR time limit field width + */ +#define IPA_TIMER_SCALED_TIME_LIMIT 31 + +/* HPS, DPS sequencers Types*/ + +/* DMA Only */ +#define IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY 0x00000000 +/* DMA + decipher */ +#define IPA_DPS_HPS_SEQ_TYPE_DMA_DEC 0x00000011 +/* Packet Processing + no decipher + uCP (for Ethernet Bridging) */ +#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP 0x00000002 +/* Packet Processing + decipher + uCP */ +#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_UCP 0x00000013 +/* Packet Processing + no decipher + no uCP */ +#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP 0x00000006 +/* Packet Processing + decipher + no uCP */ +#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_NO_UCP 0x00000017 +/* 2 Packet Processing pass + no decipher + uCP */ +#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP 0x00000004 +/* 2 Packet Processing pass + decipher + uCP */ +#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP 0x00000015 +/* 2 Packet Processing pass + no decipher + uCP + HPS REP DMA Parser. */ +#define IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP 0x00000804 +/* Packet Processing + no decipher + no uCP + HPS REP DMA Parser.*/ +#define IPA_DPS_HPS_REP_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP_DMAP 0x00000806 +/* COMP/DECOMP */ +#define IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP 0x00000020 +/* 2 Packet Processing + no decipher + 2 uCP */ +#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_2ND_UCP 0x0000000a +/* 2 Packet Processing + decipher + 2 uCP */ +#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_2ND_UCP 0x0000001b +/* 3 Packet Processing + no decipher + 2 uCP */ +#define IPA_DPS_HPS_SEQ_TYPE_3RD_PKT_PROCESS_PASS_NO_DEC_2ND_UCP 0x0000000c +/* 3 Packet Processing + decipher + 2 uCP */ +#define IPA_DPS_HPS_SEQ_TYPE_3RD_PKT_PROCESS_PASS_DEC_2ND_UCP 0x0000001d +/* 2 Packet Processing + no decipher + 2 uCP + HPS REP DMA Parser */ +#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_2ND_UCP_DMAP 0x0000080a +/* 3 Packet Processing + no decipher + 2 uCP + HPS REP DMA Parser */ +#define IPA_DPS_HPS_SEQ_TYPE_3RD_PKT_PROCESS_PASS_NO_DEC_2ND_UCP_DMAP 0x0000080c +/* Invalid sequencer type */ +#define IPA_DPS_HPS_SEQ_TYPE_INVALID 0xFFFFFFFF + +#define IPA_DPS_HPS_SEQ_TYPE_IS_DMA(seq_type) \ + (seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY || \ + seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_DEC || \ + seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP) + + +/* Resource Group index*/ +#define IPA_v3_0_GROUP_UL (0) +#define IPA_v3_0_GROUP_DL (1) +#define IPA_v3_0_GROUP_DPL IPA_v3_0_GROUP_DL +#define IPA_v3_0_GROUP_DIAG (2) +#define IPA_v3_0_GROUP_DMA (3) +#define IPA_v3_0_GROUP_IMM_CMD IPA_v3_0_GROUP_UL +#define IPA_v3_0_GROUP_Q6ZIP (4) +#define IPA_v3_0_GROUP_Q6ZIP_GENERAL IPA_v3_0_GROUP_Q6ZIP +#define IPA_v3_0_GROUP_UC_RX_Q (5) +#define IPA_v3_0_GROUP_Q6ZIP_ENGINE IPA_v3_0_GROUP_UC_RX_Q +#define IPA_v3_0_GROUP_MAX (6) + +#define IPA_v3_5_GROUP_LWA_DL (0) /* currently not used */ +#define IPA_v3_5_MHI_GROUP_PCIE IPA_v3_5_GROUP_LWA_DL +#define IPA_v3_5_GROUP_UL_DL (1) +#define IPA_v3_5_MHI_GROUP_DDR IPA_v3_5_GROUP_UL_DL +#define IPA_v3_5_MHI_GROUP_DMA (2) +#define IPA_v3_5_GROUP_UC_RX_Q (3) /* currently not used */ +#define IPA_v3_5_SRC_GROUP_MAX (4) +#define IPA_v3_5_DST_GROUP_MAX (3) + +#define IPA_v4_0_GROUP_LWA_DL (0) +#define IPA_v4_0_MHI_GROUP_PCIE (0) +#define IPA_v4_0_ETHERNET (0) +#define IPA_v4_0_GROUP_UL_DL (1) +#define IPA_v4_0_MHI_GROUP_DDR (1) +#define IPA_v4_0_MHI_GROUP_DMA (2) +#define IPA_v4_0_GROUP_UC_RX_Q (3) +#define IPA_v4_0_SRC_GROUP_MAX (4) +#define IPA_v4_0_DST_GROUP_MAX (4) + +#define IPA_v4_2_GROUP_UL_DL (0) +#define IPA_v4_2_SRC_GROUP_MAX (1) +#define IPA_v4_2_DST_GROUP_MAX (1) + +#define IPA_v4_5_MHI_GROUP_PCIE (0) +#define IPA_v4_5_GROUP_UL_DL_DST (0) +#define IPA_v4_5_GROUP_UL_DL_SRC (1) +#define IPA_v4_5_MHI_GROUP_DDR (1) +#define IPA_v4_5_MHI_GROUP_DMA (2) +#define IPA_v4_5_MHI_GROUP_QDSS (3) +#define IPA_v4_5_GROUP_UC_RX_Q (4) +#define IPA_v4_5_SRC_GROUP_MAX (5) +#define IPA_v4_5_DST_GROUP_MAX (5) + +#define IPA_GROUP_MAX IPA_v3_0_GROUP_MAX + +enum ipa_rsrc_grp_type_src { + IPA_v3_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS, + IPA_v3_0_RSRC_GRP_TYPE_SRC_HDR_SECTORS, + IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER, + IPA_v3_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS, + IPA_v3_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS, + IPA_v3_0_RSRC_GRP_TYPE_SRC_HPS_DMARS, + IPA_v3_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES, + IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX, + + IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_v3_5_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS, + IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS, + IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES, + IPA_v3_5_RSRC_GRP_TYPE_SRC_MAX, + + IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS, + IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS, + IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES, + IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX +}; + +#define IPA_RSRC_GRP_TYPE_SRC_MAX IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX + +enum ipa_rsrc_grp_type_dst { + IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTORS, + IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS, + IPA_v3_0_RSRC_GRP_TYPE_DST_DPS_DMARS, + IPA_v3_0_RSRC_GRP_TYPE_DST_MAX, + + IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS = 0, + IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS, + IPA_v3_5_RSRC_GRP_TYPE_DST_MAX, + + IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS = 0, + IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS, + IPA_v4_0_RSRC_GRP_TYPE_DST_MAX, +}; +#define IPA_RSRC_GRP_TYPE_DST_MAX IPA_v3_0_RSRC_GRP_TYPE_DST_MAX + +enum ipa_rsrc_grp_type_rx { + IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ, + IPA_RSRC_GRP_TYPE_RX_MAX +}; + +enum ipa_rsrc_grp_rx_hps_weight_config { + IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG, + IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_MAX +}; + +struct rsrc_min_max { + u32 min; + u32 max; +}; + +enum ipa_ver { + IPA_3_0, + IPA_3_5, + IPA_3_5_MHI, + IPA_3_5_1, + IPA_4_0, + IPA_4_0_MHI, + IPA_4_1, + IPA_4_2, + IPA_4_5, + IPA_4_5_MHI, + IPA_VER_MAX, +}; + + +static const struct rsrc_min_max ipa3_rsrc_src_grp_config + [IPA_VER_MAX][IPA_RSRC_GRP_TYPE_SRC_MAX][IPA_GROUP_MAX] = { + [IPA_3_0] = { + /* UL DL DIAG DMA Not Used uC Rx */ + [IPA_v3_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {3, 255}, {3, 255}, {1, 255}, {1, 255}, {1, 255}, {2, 255} }, + [IPA_v3_0_RSRC_GRP_TYPE_SRC_HDR_SECTORS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} }, + [IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} }, + [IPA_v3_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {14, 14}, {16, 16}, {5, 5}, {5, 5}, {0, 0}, {8, 8} }, + [IPA_v3_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {19, 19}, {26, 26}, {3, 3}, {7, 7}, {0, 0}, {8, 8} }, + [IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} }, + [IPA_v3_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} }, + [IPA_v3_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {14, 14}, {16, 16}, {5, 5}, {5, 5}, {0, 0}, {8, 8} }, + }, + [IPA_3_5] = { + /* LWA_DL UL_DL unused UC_RX_Q, other are invalid */ + [IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {0, 0}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {0, 0}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {0, 0}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {0, 0}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} }, + }, + [IPA_3_5_MHI] = { + /* PCIE DDR DMA unused, other are invalid */ + [IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {4, 4}, {5, 5}, {1, 1}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {10, 10}, {10, 10}, {8, 8}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {12, 12}, {12, 12}, {8, 8}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {14, 14}, {14, 14}, {14, 14}, {0, 0}, {0, 0}, {0, 0} }, + }, + [IPA_3_5_1] = { + /* LWA_DL UL_DL unused UC_RX_Q, other are invalid */ + [IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {1, 255}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {10, 10}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {12, 12}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} }, + }, + [IPA_4_0] = { + /* LWA_DL UL_DL unused UC_RX_Q, other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {1, 255}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {10, 10}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {12, 12}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} }, + }, + [IPA_4_0_MHI] = { + /* PCIE DDR DMA unused, other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {4, 4}, {5, 5}, {1, 1}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {10, 10}, {10, 10}, {8, 8}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {12, 12}, {12, 12}, {8, 8}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {14, 14}, {14, 14}, {14, 14}, {0, 0}, {0, 0}, {0, 0} }, + }, + [IPA_4_1] = { + /* LWA_DL UL_DL unused UC_RX_Q, other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {1, 63}, {1, 63}, {0, 0}, {1, 63}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {10, 10}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {12, 12}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 63}, {0, 63}, {0, 63}, {0, 63}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} }, + }, + [IPA_4_2] = { + /* UL_DL other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {3, 63}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {3, 3}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {10, 10}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {1, 1}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {5, 5}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + }, + [IPA_4_5] = { + /* unused UL_DL_SRC unused unused UC_RX_Q N/A */ + [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {0, 0}, {1, 63}, {0, 0}, {0, 0}, {1, 63}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {0, 0}, {14, 14}, {0, 0}, {0, 0}, {3, 3}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {0, 0}, {18, 18}, {0, 0}, {0, 0}, {8, 8}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 0}, {0, 63}, {0, 0}, {0, 0}, {0, 63}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {0, 0}, {24, 24}, {0, 0}, {0, 0}, {8, 8}, {0, 0} }, + }, + [IPA_4_5_MHI] = { + /* PCIE DDR DMA QDSS unused N/A N/A */ + [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {3, 8}, {4, 11}, {1, 1}, {1, 1}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {9, 9}, {12, 12}, {2, 2}, {2, 2}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {9, 9}, {14, 14}, {4, 4}, {4, 4}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 63}, {0, 63}, {0, 63}, {0, 63}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {22, 22}, {16, 16}, {6, 6}, {2, 2}, {0, 0}, {0, 0} }, + }, +}; + +static const struct rsrc_min_max ipa3_rsrc_dst_grp_config + [IPA_VER_MAX][IPA_RSRC_GRP_TYPE_DST_MAX][IPA_GROUP_MAX] = { + [IPA_3_0] = { + /* UL DL/DPL DIAG DMA Q6zip_gen Q6zip_eng */ + [IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {2, 2}, {3, 3}, {0, 0}, {2, 2}, {3, 3}, {3, 3} }, + [IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} }, + [IPA_v3_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 0} }, + }, + [IPA_3_5] = { + /* unused UL/DL/DPL unused N/A N/A N/A */ + [IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} }, + }, + [IPA_3_5_MHI] = { + /* PCIE DDR DMA N/A N/A N/A */ + [IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} }, + }, + [IPA_3_5_1] = { + /* LWA_DL UL/DL/DPL unused N/A N/A N/A */ + [IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} }, + }, + [IPA_4_0] = { + /* LWA_DL UL/DL/DPL uC, other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {4, 4}, {4, 4}, {3, 3}, {2, 2}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 255}, {1, 255}, {1, 2}, {0, 2}, {0, 0}, {0, 0} }, + }, + [IPA_4_0_MHI] = { + /* LWA_DL UL/DL/DPL uC, other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {4, 4}, {4, 4}, {3, 3}, {2, 2}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 255}, {1, 255}, {1, 2}, {0, 2}, {0, 0}, {0, 0} }, + }, + [IPA_4_1] = { + /* LWA_DL UL/DL/DPL uC, other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {4, 4}, {4, 4}, {3, 3}, {2, 2}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 63}, {1, 63}, {1, 2}, {0, 2}, {0, 0}, {0, 0} }, + }, + [IPA_4_2] = { + /* UL/DL/DPL, other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {3, 3}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {1, 63}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + }, + [IPA_4_5] = { + /* UL/DL/DPL_DST unused unused unused uC N/A */ + [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {16, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 63}, {0, 0}, {0, 0}, {0, 0}, {0, 2}, {0, 0} }, + }, + [IPA_4_5_MHI] = { + /* PCIE/DPL DDR DMA/CV2X QDSS uC N/A */ + [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {16, 16}, {5, 5}, {2, 2}, {2, 2}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 63}, {1, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} }, + }, +}; + +static const struct rsrc_min_max ipa3_rsrc_rx_grp_config + [IPA_VER_MAX][IPA_RSRC_GRP_TYPE_RX_MAX][IPA_GROUP_MAX] = { + [IPA_3_0] = { + /* UL DL DIAG DMA unused uC Rx */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + {16, 16}, {24, 24}, {8, 8}, {8, 8}, {0, 0}, {8, 8} }, + }, + [IPA_3_5] = { + /* unused UL_DL unused UC_RX_Q N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + {0, 0}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} }, + }, + [IPA_3_5_MHI] = { + /* PCIE DDR DMA unused N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + { 3, 3 }, { 7, 7 }, { 2, 2 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }, + }, + [IPA_3_5_1] = { + /* LWA_DL UL_DL unused UC_RX_Q N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + {3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} }, + }, + [IPA_4_0] = { + /* LWA_DL UL_DL unused UC_RX_Q, other are invalid */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + {3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} }, + }, + [IPA_4_0_MHI] = { + /* PCIE DDR DMA unused N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + { 3, 3 }, { 7, 7 }, { 2, 2 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }, + }, + [IPA_4_1] = { + /* LWA_DL UL_DL unused UC_RX_Q, other are invalid */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + {3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} }, + }, + [IPA_4_2] = { + /* UL_DL, other are invalid */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + {4, 4}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + }, + [IPA_4_5] = { + /* unused UL_DL unused unused UC_RX_Q N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + {0, 0}, {3, 3}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + }, + [IPA_4_5_MHI] = { + /* PCIE DDR DMA QDSS unused N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + { 3, 3 }, {3, 3}, {3, 3}, {3, 3}, {0, 0}, { 0, 0 } }, + }, + +}; + +static const u32 ipa3_rsrc_rx_grp_hps_weight_config + [IPA_VER_MAX][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_MAX][IPA_GROUP_MAX] = { + [IPA_3_0] = { + /* UL DL DIAG DMA unused uC Rx */ + [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 0, 0, 0, 0, 0, 0 }, + }, + [IPA_3_5] = { + /* unused UL_DL unused UC_RX_Q N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 }, + }, + [IPA_3_5_MHI] = { + /* PCIE DDR DMA unused N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 3, 5, 1, 1, 0, 0 }, + }, + [IPA_3_5_1] = { + /* LWA_DL UL_DL unused UC_RX_Q N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 }, + }, + [IPA_4_0] = { + /* LWA_DL UL_DL unused UC_RX_Q N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 }, + }, + [IPA_4_0_MHI] = { + /* PCIE DDR DMA unused N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 3, 5, 1, 1, 0, 0 }, + }, + [IPA_4_1] = { + /* LWA_DL UL_DL unused UC_RX_Q, other are invalid */ + [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 }, + }, +}; + +enum ipa_ees { + IPA_EE_AP = 0, + IPA_EE_Q6 = 1, + IPA_EE_UC = 2, +}; + +enum ipa_qmb_instance_type { + IPA_QMB_INSTANCE_DDR = 0, + IPA_QMB_INSTANCE_PCIE = 1, + IPA_QMB_INSTANCE_MAX +}; + +#define QMB_MASTER_SELECT_DDR IPA_QMB_INSTANCE_DDR +#define QMB_MASTER_SELECT_PCIE IPA_QMB_INSTANCE_PCIE + +struct ipa_qmb_outstanding { + u16 ot_reads; + u16 ot_writes; +}; + +static const struct ipa_qmb_outstanding ipa3_qmb_outstanding + [IPA_VER_MAX][IPA_QMB_INSTANCE_MAX] = { + [IPA_3_0][IPA_QMB_INSTANCE_DDR] = {8, 8}, + [IPA_3_0][IPA_QMB_INSTANCE_PCIE] = {8, 2}, + [IPA_3_5][IPA_QMB_INSTANCE_DDR] = {8, 8}, + [IPA_3_5][IPA_QMB_INSTANCE_PCIE] = {12, 4}, + [IPA_3_5_MHI][IPA_QMB_INSTANCE_DDR] = {8, 8}, + [IPA_3_5_MHI][IPA_QMB_INSTANCE_PCIE] = {12, 4}, + [IPA_3_5_1][IPA_QMB_INSTANCE_DDR] = {8, 8}, + [IPA_3_5_1][IPA_QMB_INSTANCE_PCIE] = {12, 4}, + [IPA_4_0][IPA_QMB_INSTANCE_DDR] = {12, 8}, + [IPA_4_0][IPA_QMB_INSTANCE_PCIE] = {12, 4}, + [IPA_4_0_MHI][IPA_QMB_INSTANCE_DDR] = {12, 8}, + [IPA_4_0_MHI][IPA_QMB_INSTANCE_PCIE] = {12, 4}, + [IPA_4_1][IPA_QMB_INSTANCE_DDR] = {12, 8}, + [IPA_4_1][IPA_QMB_INSTANCE_PCIE] = {12, 4}, + [IPA_4_2][IPA_QMB_INSTANCE_DDR] = {12, 8}, + [IPA_4_5][IPA_QMB_INSTANCE_DDR] = {16, 8}, + [IPA_4_5][IPA_QMB_INSTANCE_PCIE] = {12, 8}, + [IPA_4_5_MHI][IPA_QMB_INSTANCE_DDR] = {16, 8}, + [IPA_4_5_MHI][IPA_QMB_INSTANCE_PCIE] = {12, 8}, +}; + +struct ipa_ep_configuration { + bool valid; + int group_num; + bool support_flt; + int sequencer_type; + u8 qmb_master_sel; + struct ipa_gsi_ep_config ipa_gsi_ep_info; +}; + +/* clients not included in the list below are considered as invalid */ +static const struct ipa_ep_configuration ipa3_ep_mapping + [IPA_VER_MAX][IPA_CLIENT_MAX] = { + [IPA_3_0][IPA_CLIENT_WLAN1_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 10, 1, 8, 16, IPA_EE_UC } }, + [IPA_3_0][IPA_CLIENT_USB_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 3, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_APPS_LAN_PROD] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 14, 11, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 5, 16, 32, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v3_0_GROUP_IMM_CMD, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 22, 6, 18, 28, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_ODU_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 12, 9, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_MHI_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_PCIE, + { 0, 0, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_Q6_LAN_PROD] = { + true, IPA_v3_0_GROUP_UL, false, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 9, 4, 8, 12, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v3_0_GROUP_DL, true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 5, 0, 16, 32, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v3_0_GROUP_IMM_CMD, false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 6, 1, 18, 28, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_DECOMP_PROD] = { + true, IPA_v3_0_GROUP_Q6ZIP, + false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 7, 2, 0, 0, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_DECOMP2_PROD] = { + true, IPA_v3_0_GROUP_Q6ZIP, + false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 3, 0, 0, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = { + true, IPA_v3_0_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_PCIE, + { 12, 9, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = { + true, IPA_v3_0_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_PCIE, + { 13, 10, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_ETHERNET_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {2, 0, 8, 16, IPA_EE_UC} }, + /* Only for test purpose */ + [IPA_3_0][IPA_CLIENT_TEST_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 3, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST1_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 3, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 5, 16, 32, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 12, 9, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 13, 10, 8, 16, IPA_EE_AP } }, + + [IPA_3_0][IPA_CLIENT_WLAN1_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 25, 4, 8, 8, IPA_EE_UC } }, + [IPA_3_0][IPA_CLIENT_WLAN2_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 27, 4, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_WLAN3_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 28, 13, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_WLAN4_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 29, 14, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_USB_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 26, 12, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v3_0_GROUP_DPL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 2, 8, 12, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v3_0_GROUP_UL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 7, 8, 12, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 8, 8, 12, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_ODU_EMB_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 23, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_MHI_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 23, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 6, 8, 12, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v3_0_GROUP_UL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 5, 8, 12, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_DUN_CONS] = { + true, IPA_v3_0_GROUP_DIAG, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 30, 7, 4, 4, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_DECOMP_CONS] = { + true, IPA_v3_0_GROUP_Q6ZIP, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 21, 8, 4, 4, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_DECOMP2_CONS] = { + true, IPA_v3_0_GROUP_Q6ZIP, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 4, 9, 4, 4, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = { + true, IPA_v3_0_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 28, 13, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = { + true, IPA_v3_0_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 29, 14, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_ETHERNET_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + {24, 3, 8, 8, IPA_EE_UC} }, + /* Only for test purpose */ + [IPA_3_0][IPA_CLIENT_TEST_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 26, 12, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 26, 12, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 27, 4, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 28, 13, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 29, 14, 8, 8, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_3_0][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, + + /* IPA_3_5 */ + [IPA_3_5][IPA_CLIENT_WLAN1_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 6, 1, 8, 16, IPA_EE_UC } }, + [IPA_3_5][IPA_CLIENT_USB_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 0, 7, 8, 16, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_APPS_LAN_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 9, 8, 16, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 2, 3, 16, 32, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 5, 4, 20, 23, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_ODU_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_UC } }, + [IPA_3_5][IPA_CLIENT_Q6_LAN_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 0, 16, 32, IPA_EE_Q6 } }, + [IPA_3_5][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 1, 20, 23, IPA_EE_Q6 } }, + /* Only for test purpose */ + [IPA_3_5][IPA_CLIENT_TEST_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 7, 8, 16, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST1_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 7, 8, 16, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {7, 8, 8, 16, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 9, 8, 16, IPA_EE_AP } }, + + [IPA_3_5][IPA_CLIENT_WLAN1_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 3, 8, 8, IPA_EE_UC } }, + [IPA_3_5][IPA_CLIENT_WLAN2_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 12, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_WLAN3_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 13, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_USB_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 17, 11, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 10, 4, 6, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 9, 5, 8, 12, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 10, 6, 8, 12, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_ODU_EMB_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 13, 3, 8, 12, IPA_EE_Q6 } }, + [IPA_3_5][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 2, 8, 12, IPA_EE_Q6 } }, + /* Only for test purpose */ + /* MBIM aggregation test pipes should have the same QMB as USB_CONS */ + [IPA_3_5][IPA_CLIENT_TEST_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 15, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 17, 11, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 12, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 19, 13, 8, 8, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_3_5][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 31, 31, 8, 8, IPA_EE_AP } }, + + /* IPA_3_5_MHI */ + [IPA_3_5_MHI][IPA_CLIENT_USB_PROD] = { + false, IPA_EP_NOT_ALLOCATED, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { -1, -1, -1, -1, -1 } }, + [IPA_3_5_MHI][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v3_5_MHI_GROUP_DDR, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 2, 3, 16, 32, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 5, 4, 20, 23, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_MHI_PROD] = { + true, IPA_v3_5_MHI_GROUP_PCIE, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_PCIE, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_Q6_LAN_PROD] = { + true, IPA_v3_5_MHI_GROUP_DDR, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 0, 16, 32, IPA_EE_Q6 } }, + [IPA_3_5_MHI][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v3_5_MHI_GROUP_DDR, true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 6, 4, 10, 30, IPA_EE_Q6 } }, + [IPA_3_5_MHI][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v3_5_MHI_GROUP_PCIE, false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 1, 20, 23, IPA_EE_Q6 } }, + [IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 7, 8, 8, 16, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 8, 9, 8, 16, IPA_EE_AP } }, + /* Only for test purpose */ + [IPA_3_5_MHI][IPA_CLIENT_TEST_PROD] = { + true, IPA_v3_5_MHI_GROUP_DDR, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 7, 8, 16, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST1_PROD] = { + 0, IPA_v3_5_MHI_GROUP_DDR, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 7, 8, 16, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v3_5_MHI_GROUP_PCIE, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_PCIE, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v3_5_MHI_GROUP_DMA, true, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 7, 8, 8, 16, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v3_5_MHI_GROUP_DMA, true, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 8, 9, 8, 16, IPA_EE_AP } }, + + [IPA_3_5_MHI][IPA_CLIENT_WLAN1_CONS] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 3, 8, 8, IPA_EE_UC } }, + [IPA_3_5_MHI][IPA_CLIENT_USB_CONS] = { + false, IPA_EP_NOT_ALLOCATED, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { -1, -1, -1, -1, -1 } }, + [IPA_3_5_MHI][IPA_CLIENT_USB_DPL_CONS] = { + false, IPA_EP_NOT_ALLOCATED, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { -1, -1, -1, -1, -1 } }, + [IPA_3_5_MHI][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 9, 5, 8, 12, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 10, 6, 8, 12, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_MHI_CONS] = { + true, IPA_v3_5_MHI_GROUP_PCIE, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 15, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 13, 3, 8, 12, IPA_EE_Q6 } }, + [IPA_3_5_MHI][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 2, 8, 12, IPA_EE_Q6 } }, + [IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 18, 12, 8, 8, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 19, 13, 8, 8, IPA_EE_AP } }, + /* Only for test purpose */ + [IPA_3_5_MHI][IPA_CLIENT_TEST_CONS] = { + true, IPA_v3_5_MHI_GROUP_PCIE, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 15, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v3_5_MHI_GROUP_PCIE, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 15, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 11, 8, 8, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 18, 12, 8, 8, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 19, 13, 8, 8, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_3_5_MHI][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 31, 31, 8, 8, IPA_EE_AP } }, + + /* IPA_3_5_1 */ + [IPA_3_5_1][IPA_CLIENT_WLAN1_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 7, 1, 8, 16, IPA_EE_UC } }, + [IPA_3_5_1][IPA_CLIENT_USB_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 0, 0, 8, 16, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_APPS_LAN_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 7, 8, 16, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 2, 3, 16, 32, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 5, 4, 20, 23, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_Q6_LAN_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 0, 16, 32, IPA_EE_Q6 } }, + [IPA_3_5_1][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 6, 4, 12, 30, IPA_EE_Q6 } }, + [IPA_3_5_1][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 1, 20, 23, IPA_EE_Q6 } }, + /* Only for test purpose */ + [IPA_3_5_1][IPA_CLIENT_TEST_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 0, 0, 8, 16, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_TEST1_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 0, 0, 8, 16, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 2, 3, 16, 32, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 1, 20, 23, IPA_EE_Q6 } }, + [IPA_3_5_1][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_UC } }, + + [IPA_3_5_1][IPA_CLIENT_WLAN1_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 3, 8, 8, IPA_EE_UC } }, + [IPA_3_5_1][IPA_CLIENT_WLAN2_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 9, 8, 8, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_WLAN3_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 10, 8, 8, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_USB_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 8, 8, 8, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 2, 4, 6, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 9, 5, 8, 12, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 10, 6, 8, 12, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 13, 3, 8, 12, IPA_EE_Q6 } }, + [IPA_3_5_1][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 2, 8, 12, IPA_EE_Q6 } }, + /* Only for test purpose */ + [IPA_3_5_1][IPA_CLIENT_TEST_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 8, 8, 8, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 8, 8, 8, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 9, 8, 8, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 10, 8, 8, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 2, 4, 6, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_3_5_1][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, + + /* IPA_4_0 */ + [IPA_4_0][IPA_CLIENT_WLAN1_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 6, 2, 8, 16, IPA_EE_UC } }, + [IPA_4_0][IPA_CLIENT_USB_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 0, 8, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_APPS_LAN_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 10, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 2, 3, 16, 32, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 5, 4, 20, 24, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_ODU_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_ETHERNET_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 9, 0, 8, 16, IPA_EE_UC } }, + [IPA_4_0][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 0, 16, 32, IPA_EE_Q6 } }, + [IPA_4_0][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 1, 20, 24, IPA_EE_Q6 } }, + /* Only for test purpose */ + [IPA_4_0][IPA_CLIENT_TEST_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 8, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST1_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 8, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 7, 9, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {8, 10, 8, 16, IPA_EE_AP } }, + + + [IPA_4_0][IPA_CLIENT_WLAN1_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 3, 6, 9, IPA_EE_UC } }, + [IPA_4_0][IPA_CLIENT_WLAN2_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 20, 13, 9, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_WLAN3_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 21, 14, 9, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_USB_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 12, 9, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 7, 5, 5, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 10, 5, 9, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 6, 9, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_ODU_EMB_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 1, 17, 17, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_ETHERNET_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 22, 1, 17, 17, IPA_EE_UC } }, + [IPA_4_0][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 4, 9, 9, IPA_EE_Q6 } }, + [IPA_4_0][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 13, 3, 9, 9, IPA_EE_Q6 } }, + [IPA_4_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 5, 9, 9, IPA_EE_Q6 } }, + /* Only for test purpose */ + /* MBIM aggregation test pipes should have the same QMB as USB_CONS */ + [IPA_4_0][IPA_CLIENT_TEST_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 6, 9, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 6, 9, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 2, 5, 5, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 12, 9, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 21, 14, 9, 9, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_4_0][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, + + /* IPA_4_0_MHI */ + [IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v4_0_MHI_GROUP_DDR, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 2, 3, 16, 32, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v4_0_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 5, 4, 20, 24, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_MHI_PROD] = { + true, IPA_v4_0_MHI_GROUP_PCIE, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_PCIE, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 0, 16, 32, IPA_EE_Q6 } }, + [IPA_4_0_MHI][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v4_0_MHI_GROUP_PCIE, + false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 1, 20, 24, IPA_EE_Q6 } }, + [IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = { + true, IPA_v4_0_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 7, 9, 8, 16, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = { + true, IPA_v4_0_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 8, 10, 8, 16, IPA_EE_AP } }, + /* Only for test purpose */ + [IPA_4_0_MHI][IPA_CLIENT_TEST_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 8, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST1_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 8, 8, 16, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 7, 9, 8, 16, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 10, 8, 16, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v4_0_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 10, 5, 9, 9, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v4_0_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 6, 9, 9, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_MHI_CONS] = { + true, IPA_v4_0_MHI_GROUP_PCIE, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 17, 1, 17, 17, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v4_0_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 4, 9, 9, IPA_EE_Q6 } }, + [IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v4_0_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 13, 3, 9, 9, IPA_EE_Q6 } }, + [IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = { + true, IPA_v4_0_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 20, 13, 9, 9, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = { + true, IPA_v4_0_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 21, 14, 9, 9, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 5, 9, 9, IPA_EE_Q6 } }, + /* Only for test purpose */ + [IPA_4_0_MHI][IPA_CLIENT_TEST_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 11, 6, 9, 9, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 11, 6, 9, 9, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 2, 5, 5, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 19, 12, 9, 9, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 21, 14, 9, 9, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_4_0_MHI][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, + /* IPA_4_1 */ + [IPA_4_1][IPA_CLIENT_WLAN1_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 6, 2, 8, 16, IPA_EE_UC } }, + [IPA_4_1][IPA_CLIENT_USB_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 0, 8, 8, 16, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_APPS_LAN_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 10, 8, 16, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 2, 3, 16, 32, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 5, 4, 20, 24, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_ODU_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_ETHERNET_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 9, 0, 8, 16, IPA_EE_UC } }, + [IPA_4_1][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 0, 16, 32, IPA_EE_Q6 } }, + [IPA_4_1][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 1, 20, 24, IPA_EE_Q6 } }, + /* Only for test purpose */ + [IPA_4_1][IPA_CLIENT_TEST_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 8, 8, 16, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_TEST1_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 0, 8, 8, 16, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {7, 9, 8, 16, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 10, 8, 16, IPA_EE_AP } }, + + + [IPA_4_1][IPA_CLIENT_WLAN1_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 3, 9, 9, IPA_EE_UC } }, + [IPA_4_1][IPA_CLIENT_WLAN2_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 20, 13, 9, 9, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_WLAN3_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 21, 14, 9, 9, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_USB_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 12, 9, 9, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 7, 5, 5, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 10, 5, 9, 9, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 6, 9, 9, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_ODL_DPL_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 2, 9, 9, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_ETHERNET_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 22, 1, 9, 9, IPA_EE_UC } }, + [IPA_4_1][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 4, 9, 9, IPA_EE_Q6 } }, + [IPA_4_1][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 13, 3, 9, 9, IPA_EE_Q6 } }, + [IPA_4_1][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 5, 9, 9, IPA_EE_Q6 } }, + /* Only for test purpose */ + /* MBIM aggregation test pipes should have the same QMB as USB_CONS */ + [IPA_4_1][IPA_CLIENT_TEST_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 6, 9, 9, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 6, 9, 9, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 2, 9, 9, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 12, 9, 9, IPA_EE_AP } }, + [IPA_4_1][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 21, 14, 9, 9, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_4_1][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, + + /* IPA_4_2 */ + [IPA_4_2][IPA_CLIENT_WLAN1_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 3, 7, 6, 7, IPA_EE_AP, GSI_USE_PREFETCH_BUFS} }, + [IPA_4_2][IPA_CLIENT_USB_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 0, 5, 8, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_APPS_LAN_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 2, 6, 8, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 12, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 6, 1, 20, 20, IPA_EE_AP, GSI_USE_PREFETCH_BUFS} }, + [IPA_4_2][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 0, 8, 12, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS} }, + [IPA_4_2][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 5, 1, 20, 20, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS} }, + [IPA_4_2][IPA_CLIENT_ETHERNET_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 7, 0, 8, 10, IPA_EE_UC, GSI_USE_PREFETCH_BUFS} }, + /* Only for test purpose */ + [IPA_4_2][IPA_CLIENT_TEST_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + {0, 5, 8, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_TEST1_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 0, 5, 8, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 7, 6, 7, IPA_EE_AP, GSI_USE_PREFETCH_BUFS} }, + [IPA_4_2][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + {1, 0, 8, 12, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 7, 0, 8, 10, IPA_EE_AP, GSI_USE_PREFETCH_BUFS} }, + + + [IPA_4_2][IPA_CLIENT_WLAN1_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 8, 6, 9, IPA_EE_AP, GSI_USE_PREFETCH_BUFS} }, + [IPA_4_2][IPA_CLIENT_USB_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 9, 6, 6, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 4, 4, 4, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 8, 2, 6, 6, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 9, 3, 6, 6, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 3, 6, 6, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 10, 2, 6, 6, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 13, 4, 6, 6, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_ETHERNET_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 1, 6, 6, IPA_EE_UC, GSI_USE_PREFETCH_BUFS} }, + /* Only for test purpose */ + /* MBIM aggregation test pipes should have the same QMB as USB_CONS */ + [IPA_4_2][IPA_CLIENT_TEST_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 9, 6, 6, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 9, 6, 6, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 4, 4, 4, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} }, + [IPA_4_2][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 8, 6, 9, IPA_EE_AP, GSI_USE_PREFETCH_BUFS} }, + [IPA_4_2][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 9, 3, 6, 6, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_4_2][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP, GSI_USE_PREFETCH_BUFS} }, + + /* IPA_4_5 */ + [IPA_4_5][IPA_CLIENT_WLAN1_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 9, 12, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } }, + [IPA_4_5][IPA_CLIENT_USB_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5][IPA_CLIENT_APPS_LAN_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + false, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 11, 14, 10, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2 } }, + [IPA_4_5][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 2, 7, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } }, + [IPA_4_5][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 7, 9, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5][IPA_CLIENT_ODU_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 10, 13, 8, 19, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5][IPA_CLIENT_ETHERNET_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 12, 0, 8, 16, IPA_EE_UC, GSI_SMART_PRE_FETCH, 4 } }, + [IPA_4_5][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 8 } }, + [IPA_4_5][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 6, 1, 20, 24, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5][IPA_CLIENT_Q6_DL_NLO_DATA_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 } }, + /* Only for test purpose */ + [IPA_4_5][IPA_CLIENT_TEST_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_4_5][IPA_CLIENT_TEST1_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_4_5][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 5, 8, 16, IPA_EE_AP } }, + [IPA_4_5][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 9, 12, 8, 16, IPA_EE_AP } }, + [IPA_4_5][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 11, 14, 8, 16, IPA_EE_AP } }, + + [IPA_4_5][IPA_CLIENT_WLAN1_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 24, 3, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } }, + [IPA_4_5][IPA_CLIENT_USB_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 26, 17, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 15, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5][IPA_CLIENT_ODL_DPL_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 22, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 10, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 1, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5][IPA_CLIENT_ODU_EMB_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 23, 8, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } }, + [IPA_4_5][IPA_CLIENT_ETHERNET_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 28, 1, 9, 9, IPA_EE_UC, GSI_SMART_PRE_FETCH, 4 } }, + [IPA_4_5][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 3, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 21, 7, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5][IPA_CLIENT_Q6_UL_NLO_DATA_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 5, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } }, + [IPA_4_5][IPA_CLIENT_Q6_UL_NLO_ACK_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 20, 6, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } }, + [IPA_4_5][IPA_CLIENT_Q6_QBAP_STATUS_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 4, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } }, + /* Only for test purpose */ + /* MBIM aggregation test pipes should have the same QMB as USB_CONS */ + [IPA_4_5][IPA_CLIENT_TEST_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 1, 9, 9, IPA_EE_AP } }, + [IPA_4_5][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 1, 9, 9, IPA_EE_AP } }, + [IPA_4_5][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 24, 3, 8, 14, IPA_EE_AP } }, + [IPA_4_5][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 26, 17, 9, 9, IPA_EE_AP } }, + [IPA_4_5][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 27, 18, 9, 9, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_4_5][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, + + /* IPA_4_5_MHI */ + [IPA_4_5_MHI][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v4_5_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 7, 9, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5_MHI][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v4_5_MHI_GROUP_DDR, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 8 } }, + [IPA_4_5_MHI][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v4_5_MHI_GROUP_PCIE, + false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 6, 1, 20, 24, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5_MHI][IPA_CLIENT_Q6_DL_NLO_DATA_PROD] = { + true, IPA_v4_5_MHI_GROUP_DDR, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 } }, + [IPA_4_5_MHI][IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD] = { + true, IPA_v4_5_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 4, 8, 8, 16, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 4 } }, + [IPA_4_5_MHI][IPA_CLIENT_MHI_PROD] = { + true, IPA_v4_5_MHI_GROUP_PCIE, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_PCIE, + { 1, 0, 16, 20, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } }, + [IPA_4_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = { + true, IPA_v4_5_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 9, 12, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = { + true, IPA_v4_5_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 10, 13, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + /* Only for test purpose */ + [IPA_4_5_MHI][IPA_CLIENT_TEST_PROD] = { + true, IPA_v4_5_GROUP_UL_DL_SRC, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_AP } }, + + [IPA_4_5_MHI][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v4_5_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 10, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v4_5_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 15, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5_MHI][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v4_5_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 3, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5_MHI][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v4_5_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 21, 7, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5_MHI][IPA_CLIENT_Q6_UL_NLO_DATA_CONS] = { + true, IPA_v4_5_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 5, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } }, + [IPA_4_5_MHI][IPA_CLIENT_Q6_UL_NLO_ACK_CONS] = { + true, IPA_v4_5_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 20, 6, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } }, + [IPA_4_5_MHI][IPA_CLIENT_Q6_QBAP_STATUS_CONS] = { + true, IPA_v4_5_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 4, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5_MHI][IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS] = { + true, IPA_v4_5_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 29, 9, 9, 9, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 4 } }, + [IPA_4_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = { + true, IPA_v4_5_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 26, 17, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = { + true, IPA_v4_5_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 27, 18, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5_MHI][IPA_CLIENT_MHI_CONS] = { + true, IPA_v4_5_MHI_GROUP_PCIE, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 14, 1, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } }, + [IPA_4_5_MHI][IPA_CLIENT_MHI_DPL_CONS] = { + true, IPA_v4_5_MHI_GROUP_PCIE, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 22, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_4_5_MHI][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v4_5_GROUP_UL_DL_DST, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, +}; + +static struct ipa3_mem_partition ipa_4_1_mem_part = { + .ofst_start = 0x280, + .v4_flt_hash_ofst = 0x288, + .v4_flt_hash_size = 0x78, + .v4_flt_hash_size_ddr = 0x4000, + .v4_flt_nhash_ofst = 0x308, + .v4_flt_nhash_size = 0x78, + .v4_flt_nhash_size_ddr = 0x4000, + .v6_flt_hash_ofst = 0x388, + .v6_flt_hash_size = 0x78, + .v6_flt_hash_size_ddr = 0x4000, + .v6_flt_nhash_ofst = 0x408, + .v6_flt_nhash_size = 0x78, + .v6_flt_nhash_size_ddr = 0x4000, + .v4_rt_num_index = 0xf, + .v4_modem_rt_index_lo = 0x0, + .v4_modem_rt_index_hi = 0x7, + .v4_apps_rt_index_lo = 0x8, + .v4_apps_rt_index_hi = 0xe, + .v4_rt_hash_ofst = 0x488, + .v4_rt_hash_size = 0x78, + .v4_rt_hash_size_ddr = 0x4000, + .v4_rt_nhash_ofst = 0x508, + .v4_rt_nhash_size = 0x78, + .v4_rt_nhash_size_ddr = 0x4000, + .v6_rt_num_index = 0xf, + .v6_modem_rt_index_lo = 0x0, + .v6_modem_rt_index_hi = 0x7, + .v6_apps_rt_index_lo = 0x8, + .v6_apps_rt_index_hi = 0xe, + .v6_rt_hash_ofst = 0x588, + .v6_rt_hash_size = 0x78, + .v6_rt_hash_size_ddr = 0x4000, + .v6_rt_nhash_ofst = 0x608, + .v6_rt_nhash_size = 0x78, + .v6_rt_nhash_size_ddr = 0x4000, + .modem_hdr_ofst = 0x688, + .modem_hdr_size = 0x140, + .apps_hdr_ofst = 0x7c8, + .apps_hdr_size = 0x0, + .apps_hdr_size_ddr = 0x800, + .modem_hdr_proc_ctx_ofst = 0x7d0, + .modem_hdr_proc_ctx_size = 0x200, + .apps_hdr_proc_ctx_ofst = 0x9d0, + .apps_hdr_proc_ctx_size = 0x200, + .apps_hdr_proc_ctx_size_ddr = 0x0, + .modem_comp_decomp_ofst = 0x0, + .modem_comp_decomp_size = 0x0, + .modem_ofst = 0x13f0, + .modem_size = 0x100c, + .apps_v4_flt_hash_ofst = 0x23fc, + .apps_v4_flt_hash_size = 0x0, + .apps_v4_flt_nhash_ofst = 0x23fc, + .apps_v4_flt_nhash_size = 0x0, + .apps_v6_flt_hash_ofst = 0x23fc, + .apps_v6_flt_hash_size = 0x0, + .apps_v6_flt_nhash_ofst = 0x23fc, + .apps_v6_flt_nhash_size = 0x0, + .uc_info_ofst = 0x80, + .uc_info_size = 0x200, + .end_ofst = 0x2800, + .apps_v4_rt_hash_ofst = 0x23fc, + .apps_v4_rt_hash_size = 0x0, + .apps_v4_rt_nhash_ofst = 0x23fc, + .apps_v4_rt_nhash_size = 0x0, + .apps_v6_rt_hash_ofst = 0x23fc, + .apps_v6_rt_hash_size = 0x0, + .apps_v6_rt_nhash_ofst = 0x23fc, + .apps_v6_rt_nhash_size = 0x0, + .uc_descriptor_ram_ofst = 0x2400, + .uc_descriptor_ram_size = 0x400, + .pdn_config_ofst = 0xbd8, + .pdn_config_size = 0x50, + .stats_quota_ofst = 0xc30, + .stats_quota_size = 0x60, + .stats_tethering_ofst = 0xc90, + .stats_tethering_size = 0x140, + .stats_flt_v4_ofst = 0xdd0, + .stats_flt_v4_size = 0x180, + .stats_flt_v6_ofst = 0xf50, + .stats_flt_v6_size = 0x180, + .stats_rt_v4_ofst = 0x10d0, + .stats_rt_v4_size = 0x180, + .stats_rt_v6_ofst = 0x1250, + .stats_rt_v6_size = 0x180, + .stats_drop_ofst = 0x13d0, + .stats_drop_size = 0x20, +}; + +static struct ipa3_mem_partition ipa_4_2_mem_part = { + .ofst_start = 0x280, + .v4_flt_hash_ofst = 0x288, + .v4_flt_hash_size = 0x0, + .v4_flt_hash_size_ddr = 0x0, + .v4_flt_nhash_ofst = 0x290, + .v4_flt_nhash_size = 0x78, + .v4_flt_nhash_size_ddr = 0x4000, + .v6_flt_hash_ofst = 0x310, + .v6_flt_hash_size = 0x0, + .v6_flt_hash_size_ddr = 0x0, + .v6_flt_nhash_ofst = 0x318, + .v6_flt_nhash_size = 0x78, + .v6_flt_nhash_size_ddr = 0x4000, + .v4_rt_num_index = 0xf, + .v4_modem_rt_index_lo = 0x0, + .v4_modem_rt_index_hi = 0x7, + .v4_apps_rt_index_lo = 0x8, + .v4_apps_rt_index_hi = 0xe, + .v4_rt_hash_ofst = 0x398, + .v4_rt_hash_size = 0x0, + .v4_rt_hash_size_ddr = 0x0, + .v4_rt_nhash_ofst = 0x3A0, + .v4_rt_nhash_size = 0x78, + .v4_rt_nhash_size_ddr = 0x4000, + .v6_rt_num_index = 0xf, + .v6_modem_rt_index_lo = 0x0, + .v6_modem_rt_index_hi = 0x7, + .v6_apps_rt_index_lo = 0x8, + .v6_apps_rt_index_hi = 0xe, + .v6_rt_hash_ofst = 0x420, + .v6_rt_hash_size = 0x0, + .v6_rt_hash_size_ddr = 0x0, + .v6_rt_nhash_ofst = 0x428, + .v6_rt_nhash_size = 0x78, + .v6_rt_nhash_size_ddr = 0x4000, + .modem_hdr_ofst = 0x4A8, + .modem_hdr_size = 0x140, + .apps_hdr_ofst = 0x5E8, + .apps_hdr_size = 0x0, + .apps_hdr_size_ddr = 0x800, + .modem_hdr_proc_ctx_ofst = 0x5F0, + .modem_hdr_proc_ctx_size = 0x200, + .apps_hdr_proc_ctx_ofst = 0x7F0, + .apps_hdr_proc_ctx_size = 0x200, + .apps_hdr_proc_ctx_size_ddr = 0x0, + .modem_comp_decomp_ofst = 0x0, + .modem_comp_decomp_size = 0x0, + .modem_ofst = 0xbf0, + .modem_size = 0x140c, + .apps_v4_flt_hash_ofst = 0x1bfc, + .apps_v4_flt_hash_size = 0x0, + .apps_v4_flt_nhash_ofst = 0x1bfc, + .apps_v4_flt_nhash_size = 0x0, + .apps_v6_flt_hash_ofst = 0x1bfc, + .apps_v6_flt_hash_size = 0x0, + .apps_v6_flt_nhash_ofst = 0x1bfc, + .apps_v6_flt_nhash_size = 0x0, + .uc_info_ofst = 0x80, + .uc_info_size = 0x200, + .end_ofst = 0x2000, + .apps_v4_rt_hash_ofst = 0x1bfc, + .apps_v4_rt_hash_size = 0x0, + .apps_v4_rt_nhash_ofst = 0x1bfc, + .apps_v4_rt_nhash_size = 0x0, + .apps_v6_rt_hash_ofst = 0x1bfc, + .apps_v6_rt_hash_size = 0x0, + .apps_v6_rt_nhash_ofst = 0x1bfc, + .apps_v6_rt_nhash_size = 0x0, + .uc_descriptor_ram_ofst = 0x2000, + .uc_descriptor_ram_size = 0x0, + .pdn_config_ofst = 0x9F8, + .pdn_config_size = 0x50, + .stats_quota_ofst = 0xa50, + .stats_quota_size = 0x60, + .stats_tethering_ofst = 0xab0, + .stats_tethering_size = 0x140, + .stats_flt_v4_ofst = 0xbf0, + .stats_flt_v4_size = 0x0, + .stats_flt_v6_ofst = 0xbf0, + .stats_flt_v6_size = 0x0, + .stats_rt_v4_ofst = 0xbf0, + .stats_rt_v4_size = 0x0, + .stats_rt_v6_ofst = 0xbf0, + .stats_rt_v6_size = 0x0, + .stats_drop_ofst = 0xbf0, + .stats_drop_size = 0x0, +}; + +static struct ipa3_mem_partition ipa_4_5_mem_part = { + .uc_info_ofst = 0x80, + .uc_info_size = 0x200, + .ofst_start = 0x280, + .v4_flt_hash_ofst = 0x288, + .v4_flt_hash_size = 0x78, + .v4_flt_hash_size_ddr = 0x4000, + .v4_flt_nhash_ofst = 0x308, + .v4_flt_nhash_size = 0x78, + .v4_flt_nhash_size_ddr = 0x4000, + .v6_flt_hash_ofst = 0x388, + .v6_flt_hash_size = 0x78, + .v6_flt_hash_size_ddr = 0x4000, + .v6_flt_nhash_ofst = 0x408, + .v6_flt_nhash_size = 0x78, + .v6_flt_nhash_size_ddr = 0x4000, + .v4_rt_num_index = 0xf, + .v4_modem_rt_index_lo = 0x0, + .v4_modem_rt_index_hi = 0x7, + .v4_apps_rt_index_lo = 0x8, + .v4_apps_rt_index_hi = 0xe, + .v4_rt_hash_ofst = 0x488, + .v4_rt_hash_size = 0x78, + .v4_rt_hash_size_ddr = 0x4000, + .v4_rt_nhash_ofst = 0x508, + .v4_rt_nhash_size = 0x78, + .v4_rt_nhash_size_ddr = 0x4000, + .v6_rt_num_index = 0xf, + .v6_modem_rt_index_lo = 0x0, + .v6_modem_rt_index_hi = 0x7, + .v6_apps_rt_index_lo = 0x8, + .v6_apps_rt_index_hi = 0xe, + .v6_rt_hash_ofst = 0x588, + .v6_rt_hash_size = 0x78, + .v6_rt_hash_size_ddr = 0x4000, + .v6_rt_nhash_ofst = 0x608, + .v6_rt_nhash_size = 0x78, + .v6_rt_nhash_size_ddr = 0x4000, + .modem_hdr_ofst = 0x688, + .modem_hdr_size = 0x240, + .apps_hdr_ofst = 0x8c8, + .apps_hdr_size = 0x200, + .apps_hdr_size_ddr = 0x800, + .modem_hdr_proc_ctx_ofst = 0xad0, + .modem_hdr_proc_ctx_size = 0x200, + .apps_hdr_proc_ctx_ofst = 0xcd0, + .apps_hdr_proc_ctx_size = 0x200, + .apps_hdr_proc_ctx_size_ddr = 0x0, + .nat_tbl_ofst = 0xee0, + .nat_tbl_size = 0x800, + .nat_index_tbl_ofst = 0x16e0, + .nat_index_tbl_size = 0x100, + .nat_exp_tbl_ofst = 0x17e0, + .nat_exp_tbl_size = 0x400, + .pdn_config_ofst = 0x1be8, + .pdn_config_size = 0x50, + .stats_quota_ofst = 0x1c40, + .stats_quota_size = 0x78, + .stats_tethering_ofst = 0x1cb8, + .stats_tethering_size = 0x238, + .stats_flt_v4_ofst = 0, + .stats_flt_v4_size = 0, + .stats_flt_v6_ofst = 0, + .stats_flt_v6_size = 0, + .stats_rt_v4_ofst = 0, + .stats_rt_v4_size = 0, + .stats_rt_v6_ofst = 0, + .stats_rt_v6_size = 0, + .stats_fnr_ofst = 0x1ef0, + .stats_fnr_size = 0x800, + .stats_drop_ofst = 0x26f0, + .stats_drop_size = 0x20, + .modem_comp_decomp_ofst = 0x0, + .modem_comp_decomp_size = 0x0, + .modem_ofst = 0x2718, + .modem_size = 0x100c, + .apps_v4_flt_hash_ofst = 0x2718, + .apps_v4_flt_hash_size = 0x0, + .apps_v4_flt_nhash_ofst = 0x2718, + .apps_v4_flt_nhash_size = 0x0, + .apps_v6_flt_hash_ofst = 0x2718, + .apps_v6_flt_hash_size = 0x0, + .apps_v6_flt_nhash_ofst = 0x2718, + .apps_v6_flt_nhash_size = 0x0, + .apps_v4_rt_hash_ofst = 0x2718, + .apps_v4_rt_hash_size = 0x0, + .apps_v4_rt_nhash_ofst = 0x2718, + .apps_v4_rt_nhash_size = 0x0, + .apps_v6_rt_hash_ofst = 0x2718, + .apps_v6_rt_hash_size = 0x0, + .apps_v6_rt_nhash_ofst = 0x2718, + .apps_v6_rt_nhash_size = 0x0, + .uc_descriptor_ram_ofst = 0x3800, + .uc_descriptor_ram_size = 0x1000, + .end_ofst = 0x4800, +}; + + +/** + * ipa3_get_clients_from_rm_resource() - get IPA clients which are related to an + * IPA_RM resource + * + * @resource: [IN] IPA Resource Manager resource + * @clients: [OUT] Empty array which will contain the list of clients. The + * caller must initialize this array. + * + * Return codes: 0 on success, negative on failure. + */ +int ipa3_get_clients_from_rm_resource( + enum ipa_rm_resource_name resource, + struct ipa3_client_names *clients) +{ + int i = 0; + + if (resource < 0 || + resource >= IPA_RM_RESOURCE_MAX || + !clients) { + IPAERR("Bad parameters\n"); + return -EINVAL; + } + + switch (resource) { + case IPA_RM_RESOURCE_USB_CONS: + if (ipa3_get_ep_mapping(IPA_CLIENT_USB_CONS) != -1) + clients->names[i++] = IPA_CLIENT_USB_CONS; + break; + case IPA_RM_RESOURCE_USB_DPL_CONS: + if (ipa3_get_ep_mapping(IPA_CLIENT_USB_DPL_CONS) != -1) + clients->names[i++] = IPA_CLIENT_USB_DPL_CONS; + break; + case IPA_RM_RESOURCE_HSIC_CONS: + clients->names[i++] = IPA_CLIENT_HSIC1_CONS; + break; + case IPA_RM_RESOURCE_WLAN_CONS: + clients->names[i++] = IPA_CLIENT_WLAN1_CONS; + clients->names[i++] = IPA_CLIENT_WLAN2_CONS; + clients->names[i++] = IPA_CLIENT_WLAN3_CONS; + break; + case IPA_RM_RESOURCE_MHI_CONS: + clients->names[i++] = IPA_CLIENT_MHI_CONS; + break; + case IPA_RM_RESOURCE_ODU_ADAPT_CONS: + clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS; + clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS; + break; + case IPA_RM_RESOURCE_ETHERNET_CONS: + clients->names[i++] = IPA_CLIENT_ETHERNET_CONS; + break; + case IPA_RM_RESOURCE_USB_PROD: + if (ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD) != -1) + clients->names[i++] = IPA_CLIENT_USB_PROD; + break; + case IPA_RM_RESOURCE_HSIC_PROD: + clients->names[i++] = IPA_CLIENT_HSIC1_PROD; + break; + case IPA_RM_RESOURCE_MHI_PROD: + clients->names[i++] = IPA_CLIENT_MHI_PROD; + break; + case IPA_RM_RESOURCE_ODU_ADAPT_PROD: + clients->names[i++] = IPA_CLIENT_ODU_PROD; + break; + case IPA_RM_RESOURCE_ETHERNET_PROD: + clients->names[i++] = IPA_CLIENT_ETHERNET_PROD; + break; + default: + break; + } + clients->length = i; + + return 0; +} + +/** + * ipa3_should_pipe_be_suspended() - returns true when the client's pipe should + * be suspended during a power save scenario. False otherwise. + * + * @client: [IN] IPA client + */ +bool ipa3_should_pipe_be_suspended(enum ipa_client_type client) +{ + struct ipa3_ep_context *ep; + int ipa_ep_idx; + + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + WARN_ON(1); + return false; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + /* + * starting IPA 4.0 pipe no longer can be suspended. Instead, + * the corresponding GSI channel should be stopped. Usually client + * driver will take care of stopping the channel. For client drivers + * that are not stopping the channel, IPA RM will do that based on + * ipa3_should_pipe_channel_be_stopped(). + */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + return false; + + if (ep->keep_ipa_awake) + return false; + + if (client == IPA_CLIENT_USB_CONS || + client == IPA_CLIENT_USB_DPL_CONS || + client == IPA_CLIENT_MHI_CONS || + client == IPA_CLIENT_MHI_DPL_CONS || + client == IPA_CLIENT_HSIC1_CONS || + client == IPA_CLIENT_WLAN1_CONS || + client == IPA_CLIENT_WLAN2_CONS || + client == IPA_CLIENT_WLAN3_CONS || + client == IPA_CLIENT_WLAN4_CONS || + client == IPA_CLIENT_ODU_EMB_CONS || + client == IPA_CLIENT_ODU_TETH_CONS || + client == IPA_CLIENT_ETHERNET_CONS) + return true; + + return false; +} + +/** + * ipa3_should_pipe_channel_be_stopped() - returns true when the client's + * channel should be stopped during a power save scenario. False otherwise. + * Most client already stops the GSI channel on suspend, and are not included + * in the list below. + * + * @client: [IN] IPA client + */ +static bool ipa3_should_pipe_channel_be_stopped(enum ipa_client_type client) +{ + struct ipa3_ep_context *ep; + int ipa_ep_idx; + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) + return false; + + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + WARN_ON(1); + return false; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (ep->keep_ipa_awake) + return false; + + if (client == IPA_CLIENT_ODU_EMB_CONS || + client == IPA_CLIENT_ODU_TETH_CONS) + return true; + + return false; +} + +/** + * ipa3_suspend_resource_sync() - suspend client endpoints related to the IPA_RM + * resource and decrement active clients counter, which may result in clock + * gating of IPA clocks. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource) +{ + struct ipa3_client_names clients; + int res; + int index; + struct ipa_ep_cfg_ctrl suspend; + enum ipa_client_type client; + int ipa_ep_idx; + bool pipe_suspended = false; + + memset(&clients, 0, sizeof(clients)); + res = ipa3_get_clients_from_rm_resource(resource, &clients); + if (res) { + IPAERR("Bad params.\n"); + return res; + } + + for (index = 0; index < clients.length; index++) { + client = clients.names[index]; + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + res = -EINVAL; + continue; + } + ipa3_ctx->resume_on_connect[client] = false; + if (ipa3_ctx->ep[ipa_ep_idx].client == client && + ipa3_should_pipe_be_suspended(client)) { + if (ipa3_ctx->ep[ipa_ep_idx].valid) { + /* suspend endpoint */ + memset(&suspend, 0, sizeof(suspend)); + suspend.ipa_ep_suspend = true; + ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend); + pipe_suspended = true; + } + } + + if (ipa3_ctx->ep[ipa_ep_idx].client == client && + ipa3_should_pipe_channel_be_stopped(client)) { + if (ipa3_ctx->ep[ipa_ep_idx].valid) { + /* Stop GSI channel */ + res = ipa3_stop_gsi_channel(ipa_ep_idx); + if (res) { + IPAERR("failed stop gsi ch %lu\n", + ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl); + return res; + } + } + } + } + /* Sleep ~1 msec */ + if (pipe_suspended) + usleep_range(1000, 2000); + + /* before gating IPA clocks do TAG process */ + ipa3_ctx->tag_process_before_gating = true; + IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource)); + + return 0; +} + +/** + * ipa3_suspend_resource_no_block() - suspend client endpoints related to the + * IPA_RM resource and decrement active clients counter. This function is + * guaranteed to avoid sleeping. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource) +{ + int res; + struct ipa3_client_names clients; + int index; + enum ipa_client_type client; + struct ipa_ep_cfg_ctrl suspend; + int ipa_ep_idx; + struct ipa_active_client_logging_info log_info; + + memset(&clients, 0, sizeof(clients)); + res = ipa3_get_clients_from_rm_resource(resource, &clients); + if (res) { + IPAERR( + "ipa3_get_clients_from_rm_resource() failed, name = %d.\n", + resource); + goto bail; + } + + for (index = 0; index < clients.length; index++) { + client = clients.names[index]; + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + res = -EINVAL; + continue; + } + ipa3_ctx->resume_on_connect[client] = false; + if (ipa3_ctx->ep[ipa_ep_idx].client == client && + ipa3_should_pipe_be_suspended(client)) { + if (ipa3_ctx->ep[ipa_ep_idx].valid) { + /* suspend endpoint */ + memset(&suspend, 0, sizeof(suspend)); + suspend.ipa_ep_suspend = true; + ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend); + } + } + + if (ipa3_ctx->ep[ipa_ep_idx].client == client && + ipa3_should_pipe_channel_be_stopped(client)) { + res = -EPERM; + goto bail; + } + } + + if (res == 0) { + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, + ipa_rm_resource_str(resource)); + /* before gating IPA clocks do TAG process */ + ipa3_ctx->tag_process_before_gating = true; + ipa3_dec_client_disable_clks_no_block(&log_info); + } +bail: + return res; +} + +/** + * ipa3_resume_resource() - resume client endpoints related to the IPA_RM + * resource. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa3_resume_resource(enum ipa_rm_resource_name resource) +{ + + struct ipa3_client_names clients; + int res; + int index; + struct ipa_ep_cfg_ctrl suspend; + enum ipa_client_type client; + int ipa_ep_idx; + + memset(&clients, 0, sizeof(clients)); + res = ipa3_get_clients_from_rm_resource(resource, &clients); + if (res) { + IPAERR("ipa3_get_clients_from_rm_resource() failed.\n"); + return res; + } + + for (index = 0; index < clients.length; index++) { + client = clients.names[index]; + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + res = -EINVAL; + continue; + } + /* + * The related ep, will be resumed on connect + * while its resource is granted + */ + ipa3_ctx->resume_on_connect[client] = true; + IPADBG("%d will be resumed on connect.\n", client); + if (ipa3_ctx->ep[ipa_ep_idx].client == client && + ipa3_should_pipe_be_suspended(client)) { + if (ipa3_ctx->ep[ipa_ep_idx].valid) { + memset(&suspend, 0, sizeof(suspend)); + suspend.ipa_ep_suspend = false; + ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend); + } + } + + if (ipa3_ctx->ep[ipa_ep_idx].client == client && + ipa3_should_pipe_channel_be_stopped(client)) { + if (ipa3_ctx->ep[ipa_ep_idx].valid) { + res = gsi_start_channel( + ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl); + if (res) { + IPAERR("failed to start gsi ch %lu\n", + ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl); + return res; + } + } + } + } + + return res; +} + +/** + * ipa3_get_hw_type_index() - Get HW type index which is used as the entry index + * for ep\resource groups related arrays . + * + * Return value: HW type index + */ +static u8 ipa3_get_hw_type_index(void) +{ + u8 hw_type_index; + + switch (ipa3_ctx->ipa_hw_type) { + case IPA_HW_v3_0: + case IPA_HW_v3_1: + hw_type_index = IPA_3_0; + break; + case IPA_HW_v3_5: + hw_type_index = IPA_3_5; + /* + *this flag is initialized only after fw load trigger from + * user space (ipa3_write) + */ + if (ipa3_ctx->ipa_config_is_mhi) + hw_type_index = IPA_3_5_MHI; + break; + case IPA_HW_v3_5_1: + hw_type_index = IPA_3_5_1; + break; + case IPA_HW_v4_0: + hw_type_index = IPA_4_0; + /* + *this flag is initialized only after fw load trigger from + * user space (ipa3_write) + */ + if (ipa3_ctx->ipa_config_is_mhi) + hw_type_index = IPA_4_0_MHI; + break; + case IPA_HW_v4_1: + hw_type_index = IPA_4_1; + break; + case IPA_HW_v4_2: + hw_type_index = IPA_4_2; + break; + case IPA_HW_v4_5: + hw_type_index = IPA_4_5; + if (ipa3_ctx->ipa_config_is_mhi) + hw_type_index = IPA_4_5_MHI; + break; + default: + IPAERR("Incorrect IPA version %d\n", ipa3_ctx->ipa_hw_type); + hw_type_index = IPA_3_0; + break; + } + + return hw_type_index; +} + +/** + * _ipa_sram_settings_read_v3_0() - Read SRAM settings from HW + * + * Returns: None + */ +void _ipa_sram_settings_read_v3_0(void) +{ + struct ipahal_reg_shared_mem_size smem_sz; + + memset(&smem_sz, 0, sizeof(smem_sz)); + + ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz); + + ipa3_ctx->smem_restricted_bytes = smem_sz.shared_mem_baddr; + ipa3_ctx->smem_sz = smem_sz.shared_mem_sz; + + /* reg fields are in 8B units */ + ipa3_ctx->smem_restricted_bytes *= 8; + ipa3_ctx->smem_sz *= 8; + ipa3_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst); + ipa3_ctx->hdr_tbl_lcl = false; + ipa3_ctx->hdr_proc_ctx_tbl_lcl = true; + + /* + * when proc ctx table is located in internal memory, + * modem entries resides first. + */ + if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) { + ipa3_ctx->hdr_proc_ctx_tbl.start_offset = + IPA_MEM_PART(modem_hdr_proc_ctx_size); + } + ipa3_ctx->ip4_rt_tbl_hash_lcl = false; + ipa3_ctx->ip4_rt_tbl_nhash_lcl = false; + ipa3_ctx->ip6_rt_tbl_hash_lcl = false; + ipa3_ctx->ip6_rt_tbl_nhash_lcl = false; + ipa3_ctx->ip4_flt_tbl_hash_lcl = false; + ipa3_ctx->ip4_flt_tbl_nhash_lcl = false; + ipa3_ctx->ip6_flt_tbl_hash_lcl = false; + ipa3_ctx->ip6_flt_tbl_nhash_lcl = false; +} + +/** + * ipa3_cfg_route() - configure IPA route + * @route: IPA route + * + * Return codes: + * 0: success + */ +int ipa3_cfg_route(struct ipahal_reg_route *route) +{ + + IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n", + route->route_dis, + route->route_def_pipe, + route->route_def_hdr_table); + IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n", + route->route_def_hdr_ofst, + route->route_frag_def_pipe); + + IPADBG("default_retain_hdr=%d\n", + route->route_def_retain_hdr); + + if (route->route_dis) { + IPAERR("Route disable is not supported!\n"); + return -EPERM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + ipahal_write_reg_fields(IPA_ROUTE, route); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa3_cfg_filter() - configure filter + * @disable: disable value + * + * Return codes: + * 0: success + */ +int ipa3_cfg_filter(u32 disable) +{ + IPAERR_RL("Filter disable is not supported!\n"); + return -EPERM; +} + +/** + * ipa_disable_hashing_rt_flt_v4_2() - Disable filer and route hashing. + * + * Return codes: 0 for success, negative value for failure + */ +static int ipa_disable_hashing_rt_flt_v4_2(void) +{ + + IPADBG("Disable hashing for filter and route table in IPA 4.2 HW\n"); + ipahal_write_reg(IPA_FILT_ROUT_HASH_EN, + IPA_FILT_ROUT_HASH_REG_VAL_v4_2); + return 0; +} + + +/** + * ipa_comp_cfg() - Configure QMB/Master port selection + * + * Returns: None + */ +static void ipa_comp_cfg(void) +{ + struct ipahal_reg_comp_cfg comp_cfg; + + /* IPAv4 specific, on NON-MHI config*/ + if ((ipa3_ctx->ipa_hw_type == IPA_HW_v4_0) && + (!ipa3_ctx->ipa_config_is_mhi)) { + + ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg); + IPADBG("Before comp config\n"); + IPADBG("ipa_qmb_select_by_address_global_en = %d\n", + comp_cfg.ipa_qmb_select_by_address_global_en); + + IPADBG("ipa_qmb_select_by_address_prod_en = %d\n", + comp_cfg.ipa_qmb_select_by_address_prod_en); + + IPADBG("ipa_qmb_select_by_address_cons_en = %d\n", + comp_cfg.ipa_qmb_select_by_address_cons_en); + + comp_cfg.ipa_qmb_select_by_address_global_en = false; + comp_cfg.ipa_qmb_select_by_address_prod_en = false; + comp_cfg.ipa_qmb_select_by_address_cons_en = false; + + ipahal_write_reg_fields(IPA_COMP_CFG, &comp_cfg); + + ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg); + IPADBG("After comp config\n"); + IPADBG("ipa_qmb_select_by_address_global_en = %d\n", + comp_cfg.ipa_qmb_select_by_address_global_en); + + IPADBG("ipa_qmb_select_by_address_prod_en = %d\n", + comp_cfg.ipa_qmb_select_by_address_prod_en); + + IPADBG("ipa_qmb_select_by_address_cons_en = %d\n", + comp_cfg.ipa_qmb_select_by_address_cons_en); + } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg); + IPADBG("Before comp config\n"); + IPADBG("gsi_multi_inorder_rd_dis = %d\n", + comp_cfg.gsi_multi_inorder_rd_dis); + + IPADBG("gsi_multi_inorder_wr_dis = %d\n", + comp_cfg.gsi_multi_inorder_wr_dis); + + comp_cfg.gsi_multi_inorder_rd_dis = true; + comp_cfg.gsi_multi_inorder_wr_dis = true; + + ipahal_write_reg_fields(IPA_COMP_CFG, &comp_cfg); + + ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg); + IPADBG("After comp config\n"); + IPADBG("gsi_multi_inorder_rd_dis = %d\n", + comp_cfg.gsi_multi_inorder_rd_dis); + + IPADBG("gsi_multi_inorder_wr_dis = %d\n", + comp_cfg.gsi_multi_inorder_wr_dis); + } + + /* set GSI_MULTI_AXI_MASTERS_DIS = true after HW.4.1 */ + if ((ipa3_ctx->ipa_hw_type == IPA_HW_v4_1) || + (ipa3_ctx->ipa_hw_type == IPA_HW_v4_2)) { + ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg); + IPADBG("Before comp config\n"); + IPADBG("gsi_multi_axi_masters_dis = %d\n", + comp_cfg.gsi_multi_axi_masters_dis); + + comp_cfg.gsi_multi_axi_masters_dis = true; + + ipahal_write_reg_fields(IPA_COMP_CFG, &comp_cfg); + + ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg); + IPADBG("After comp config\n"); + IPADBG("gsi_multi_axi_masters_dis = %d\n", + comp_cfg.gsi_multi_axi_masters_dis); + } +} + +/** + * ipa3_cfg_qsb() - Configure IPA QSB maximal reads and writes + * + * Returns: None + */ +static void ipa3_cfg_qsb(void) +{ + u8 hw_type_idx; + const struct ipa_qmb_outstanding *qmb_ot; + struct ipahal_reg_qsb_max_reads max_reads = { 0 }; + struct ipahal_reg_qsb_max_writes max_writes = { 0 }; + + hw_type_idx = ipa3_get_hw_type_index(); + + qmb_ot = &(ipa3_qmb_outstanding[hw_type_idx][IPA_QMB_INSTANCE_DDR]); + max_reads.qmb_0_max_reads = qmb_ot->ot_reads; + max_writes.qmb_0_max_writes = qmb_ot->ot_writes; + + qmb_ot = &(ipa3_qmb_outstanding[hw_type_idx][IPA_QMB_INSTANCE_PCIE]); + max_reads.qmb_1_max_reads = qmb_ot->ot_reads; + max_writes.qmb_1_max_writes = qmb_ot->ot_writes; + + ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, &max_writes); + ipahal_write_reg_fields(IPA_QSB_MAX_READS, &max_reads); +} + +/* relevant starting IPA4.5 */ +static void ipa_cfg_qtime(void) +{ + struct ipahal_reg_qtime_timestamp_cfg ts_cfg; + struct ipahal_reg_timers_pulse_gran_cfg gran_cfg; + struct ipahal_reg_timers_xo_clk_div_cfg div_cfg; + u32 val; + + /* Configure timestamp resolution */ + memset(&ts_cfg, 0, sizeof(ts_cfg)); + ts_cfg.dpl_timestamp_lsb = 0; + ts_cfg.dpl_timestamp_sel = false; /* DPL: use legacy 1ms resolution */ + ts_cfg.tag_timestamp_lsb = IPA_TAG_TIMER_TIMESTAMP_SHFT; + ts_cfg.nat_timestamp_lsb = IPA_NAT_TIMER_TIMESTAMP_SHFT; + val = ipahal_read_reg(IPA_QTIME_TIMESTAMP_CFG); + IPADBG("qtime timestamp before cfg: 0x%x\n", val); + ipahal_write_reg_fields(IPA_QTIME_TIMESTAMP_CFG, &ts_cfg); + val = ipahal_read_reg(IPA_QTIME_TIMESTAMP_CFG); + IPADBG("qtime timestamp after cfg: 0x%x\n", val); + + /* Configure timers pulse generators granularity */ + memset(&gran_cfg, 0, sizeof(gran_cfg)); + gran_cfg.gran_0 = IPA_TIMERS_TIME_GRAN_100_USEC; + gran_cfg.gran_1 = IPA_TIMERS_TIME_GRAN_1_MSEC; + gran_cfg.gran_2 = IPA_TIMERS_TIME_GRAN_10_USEC; + val = ipahal_read_reg(IPA_TIMERS_PULSE_GRAN_CFG); + IPADBG("timer pulse granularity before cfg: 0x%x\n", val); + ipahal_write_reg_fields(IPA_TIMERS_PULSE_GRAN_CFG, &gran_cfg); + val = ipahal_read_reg(IPA_TIMERS_PULSE_GRAN_CFG); + IPADBG("timer pulse granularity after cfg: 0x%x\n", val); + + /* Configure timers XO Clock divider */ + memset(&div_cfg, 0, sizeof(div_cfg)); + ipahal_read_reg_fields(IPA_TIMERS_XO_CLK_DIV_CFG, &div_cfg); + IPADBG("timer XO clk divider before cfg: enabled=%d divider=%u\n", + div_cfg.enable, div_cfg.value); + + /* Make sure divider is disabled */ + if (div_cfg.enable) { + div_cfg.enable = false; + ipahal_write_reg_fields(IPA_TIMERS_XO_CLK_DIV_CFG, &div_cfg); + } + + /* At emulation systems XO clock is lower than on real target. + * (e.g. 19.2Mhz compared to 96Khz) + * Use lowest possible divider. + */ + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + div_cfg.value = 0; + } + + div_cfg.enable = true; /* Enable the divider */ + ipahal_write_reg_fields(IPA_TIMERS_XO_CLK_DIV_CFG, &div_cfg); + ipahal_read_reg_fields(IPA_TIMERS_XO_CLK_DIV_CFG, &div_cfg); + IPADBG("timer XO clk divider after cfg: enabled=%d divider=%u\n", + div_cfg.enable, div_cfg.value); +} + +/** + * ipa3_init_hw() - initialize HW + * + * Return codes: + * 0: success + */ +int ipa3_init_hw(void) +{ + u32 ipa_version = 0; + struct ipahal_reg_counter_cfg cnt_cfg; + + /* Read IPA version and make sure we have access to the registers */ + ipa_version = ipahal_read_reg(IPA_VERSION); + IPADBG("IPA_VERSION=%u\n", ipa_version); + if (ipa_version == 0) + return -EFAULT; + + switch (ipa3_ctx->ipa_hw_type) { + case IPA_HW_v3_0: + case IPA_HW_v3_1: + ipahal_write_reg(IPA_BCR, IPA_BCR_REG_VAL_v3_0); + break; + case IPA_HW_v3_5: + case IPA_HW_v3_5_1: + ipahal_write_reg(IPA_BCR, IPA_BCR_REG_VAL_v3_5); + break; + case IPA_HW_v4_0: + case IPA_HW_v4_1: + ipahal_write_reg(IPA_BCR, IPA_BCR_REG_VAL_v4_0); + break; + case IPA_HW_v4_2: + ipahal_write_reg(IPA_BCR, IPA_BCR_REG_VAL_v4_2); + break; + default: + IPADBG("Do not update BCR - hw_type=%d\n", + ipa3_ctx->ipa_hw_type); + break; + } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 && + ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) { + struct ipahal_reg_clkon_cfg clkon_cfg; + struct ipahal_reg_tx_cfg tx_cfg; + + memset(&clkon_cfg, 0, sizeof(clkon_cfg)); + + /*enable open global clocks*/ + clkon_cfg.open_global_2x_clk = true; + clkon_cfg.open_global = true; + ipahal_write_reg_fields(IPA_CLKON_CFG, &clkon_cfg); + + ipahal_read_reg_fields(IPA_TX_CFG, &tx_cfg); + /* disable PA_MASK_EN to allow holb drop */ + tx_cfg.pa_mask_en = 0; + ipahal_write_reg_fields(IPA_TX_CFG, &tx_cfg); + } + + ipa3_cfg_qsb(); + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) { + /* set aggr granularity for 0.5 msec*/ + cnt_cfg.aggr_granularity = GRAN_VALUE_500_USEC; + ipahal_write_reg_fields(IPA_COUNTER_CFG, &cnt_cfg); + } else { + ipa_cfg_qtime(); + } + + ipa_comp_cfg(); + + /* + * In IPA 4.2 filter and routing hashing not supported + * disabling hash enable register. + */ + if (ipa3_ctx->ipa_fltrt_not_hashable) + ipa_disable_hashing_rt_flt_v4_2(); + + return 0; +} + +/** + * ipa3_get_ep_mapping() - provide endpoint mapping + * @client: client type + * + * Return value: endpoint mapping + */ +int ipa3_get_ep_mapping(enum ipa_client_type client) +{ + int ipa_ep_idx; + u8 hw_idx = ipa3_get_hw_type_index(); + + if (client >= IPA_CLIENT_MAX || client < 0) { + IPAERR_RL("Bad client number! client =%d\n", client); + return IPA_EP_NOT_ALLOCATED; + } + + if (!ipa3_ep_mapping[hw_idx][client].valid) + return IPA_EP_NOT_ALLOCATED; + + ipa_ep_idx = + ipa3_ep_mapping[hw_idx][client].ipa_gsi_ep_info.ipa_ep_num; + if (ipa_ep_idx < 0 || (ipa_ep_idx >= IPA3_MAX_NUM_PIPES + && client != IPA_CLIENT_DUMMY_CONS)) + return IPA_EP_NOT_ALLOCATED; + + return ipa_ep_idx; +} + +/** + * ipa3_get_gsi_ep_info() - provide gsi ep information + * @client: IPA client value + * + * Return value: pointer to ipa_gsi_ep_info + */ +const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info + (enum ipa_client_type client) +{ + int ep_idx; + + ep_idx = ipa3_get_ep_mapping(client); + if (ep_idx == IPA_EP_NOT_ALLOCATED) + return NULL; + + if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid) + return NULL; + + return &(ipa3_ep_mapping[ipa3_get_hw_type_index()] + [client].ipa_gsi_ep_info); +} + +/** + * ipa_get_ep_group() - provide endpoint group by client + * @client: client type + * + * Return value: endpoint group + */ +int ipa_get_ep_group(enum ipa_client_type client) +{ + if (client >= IPA_CLIENT_MAX || client < 0) { + IPAERR("Bad client number! client =%d\n", client); + return -EINVAL; + } + + if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid) + return -EINVAL; + + return ipa3_ep_mapping[ipa3_get_hw_type_index()][client].group_num; +} + +/** + * ipa3_get_qmb_master_sel() - provide QMB master selection for the client + * @client: client type + * + * Return value: QMB master index + */ +u8 ipa3_get_qmb_master_sel(enum ipa_client_type client) +{ + if (client >= IPA_CLIENT_MAX || client < 0) { + IPAERR("Bad client number! client =%d\n", client); + return -EINVAL; + } + + if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid) + return -EINVAL; + + return ipa3_ep_mapping[ipa3_get_hw_type_index()] + [client].qmb_master_sel; +} + +/* ipa3_set_client() - provide client mapping + * @client: client type + * + * Return value: none + */ + +void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink) +{ + if (client > IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) { + IPAERR("Bad client number! client =%d\n", client); + } else if (index >= IPA3_MAX_NUM_PIPES || index < 0) { + IPAERR("Bad pipe index! index =%d\n", index); + } else { + ipa3_ctx->ipacm_client[index].client_enum = client; + ipa3_ctx->ipacm_client[index].uplink = uplink; + } +} + +/* ipa3_get_wlan_stats() - get ipa wifi stats + * + * Return value: success or failure + */ +int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats) +{ + if (ipa3_ctx->uc_wdi_ctx.stats_notify) { + ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS, + wdi_sap_stats); + } else { + IPAERR_RL("uc_wdi_ctx.stats_notify NULL\n"); + return -EFAULT; + } + return 0; +} + +int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota) +{ + if (ipa3_ctx->uc_wdi_ctx.stats_notify) { + ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA, + wdi_quota); + } else { + IPAERR("uc_wdi_ctx.stats_notify NULL\n"); + return -EFAULT; + } + return 0; +} + +/** + * ipa3_get_client() - provide client mapping + * @client: client type + * + * Return value: client mapping enum + */ +enum ipacm_client_enum ipa3_get_client(int pipe_idx) +{ + if (pipe_idx >= IPA3_MAX_NUM_PIPES || pipe_idx < 0) { + IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx); + return IPACM_CLIENT_MAX; + } else { + return ipa3_ctx->ipacm_client[pipe_idx].client_enum; + } +} + +/** + * ipa2_get_client_uplink() - provide client mapping + * @client: client type + * + * Return value: none + */ +bool ipa3_get_client_uplink(int pipe_idx) +{ + if (pipe_idx < 0 || pipe_idx >= IPA3_MAX_NUM_PIPES) { + IPAERR("invalid pipe idx %d\n", pipe_idx); + return false; + } + + return ipa3_ctx->ipacm_client[pipe_idx].uplink; +} + +/** + * ipa3_get_rm_resource_from_ep() - get the IPA_RM resource which is related to + * the supplied pipe index. + * + * @pipe_idx: + * + * Return value: IPA_RM resource related to the pipe, -1 if a resource was not + * found. + */ +enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx) +{ + int i; + int j; + enum ipa_client_type client; + struct ipa3_client_names clients; + bool found = false; + + if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) { + IPAERR("Bad pipe index!\n"); + return -EINVAL; + } + + client = ipa3_ctx->ep[pipe_idx].client; + + for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) { + memset(&clients, 0, sizeof(clients)); + ipa3_get_clients_from_rm_resource(i, &clients); + for (j = 0; j < clients.length; j++) { + if (clients.names[j] == client) { + found = true; + break; + } + } + if (found) + break; + } + + if (!found) + return -EFAULT; + + return i; +} + +/** + * ipa3_get_client_mapping() - provide client mapping + * @pipe_idx: IPA end-point number + * + * Return value: client mapping + */ +enum ipa_client_type ipa3_get_client_mapping(int pipe_idx) +{ + if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) { + IPAERR("Bad pipe index!\n"); + WARN_ON(1); + return -EINVAL; + } + + return ipa3_ctx->ep[pipe_idx].client; +} + +/** + * ipa3_get_client_by_pipe() - return client type relative to pipe + * index + * @pipe_idx: IPA end-point number + * + * Return value: client type + */ +enum ipa_client_type ipa3_get_client_by_pipe(int pipe_idx) +{ + int j = 0; + + for (j = 0; j < IPA_CLIENT_MAX; j++) { + const struct ipa_ep_configuration *iec_ptr = + &(ipa3_ep_mapping[ipa3_get_hw_type_index()][j]); + if (iec_ptr->valid && + iec_ptr->ipa_gsi_ep_info.ipa_ep_num == pipe_idx) + break; + } + + if (j == IPA_CLIENT_MAX) + IPADBG("Got to IPA_CLIENT_MAX (%d) while searching for (%d)\n", + j, pipe_idx); + + return j; +} + +/** + * ipa_init_ep_flt_bitmap() - Initialize the bitmap + * that represents the End-points that supports filtering + */ +void ipa_init_ep_flt_bitmap(void) +{ + enum ipa_client_type cl; + u8 hw_idx = ipa3_get_hw_type_index(); + u32 bitmap; + u32 pipe_num; + const struct ipa_gsi_ep_config *gsi_ep_ptr; + + bitmap = 0; + if (ipa3_ctx->ep_flt_bitmap) { + WARN_ON(1); + return; + } + + for (cl = 0; cl < IPA_CLIENT_MAX ; cl++) { + if (ipa3_ep_mapping[hw_idx][cl].support_flt) { + gsi_ep_ptr = + &ipa3_ep_mapping[hw_idx][cl].ipa_gsi_ep_info; + pipe_num = + gsi_ep_ptr->ipa_ep_num; + bitmap |= (1U << pipe_num); + if (bitmap != ipa3_ctx->ep_flt_bitmap) { + ipa3_ctx->ep_flt_bitmap = bitmap; + ipa3_ctx->ep_flt_num++; + } + } + } +} + +/** + * ipa_is_ep_support_flt() - Given an End-point check + * whether it supports filtering or not. + * + * @pipe_idx: + * + * Return values: + * true if supports and false if not + */ +bool ipa_is_ep_support_flt(int pipe_idx) +{ + if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) { + IPAERR("Bad pipe index!\n"); + return false; + } + + return ipa3_ctx->ep_flt_bitmap & (1U<= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad param, clnt_hdl = %d", clnt_hdl); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) { + IPAERR("SEQ does not apply to IPA consumer EP %d\n", clnt_hdl); + return -EINVAL; + } + + /* + * Skip Configure sequencers type for test clients. + * These are configured dynamically in ipa3_cfg_ep_mode + */ + if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) { + IPADBG("Skip sequencers configuration for test clients\n"); + return 0; + } + + if (seq_cfg->set_dynamic) + type = seq_cfg->seq_type; + else + type = ipa3_ep_mapping[ipa3_get_hw_type_index()] + [ipa3_ctx->ep[clnt_hdl].client].sequencer_type; + + if (type != IPA_DPS_HPS_SEQ_TYPE_INVALID) { + if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA && + !IPA_DPS_HPS_SEQ_TYPE_IS_DMA(type)) { + IPAERR("Configuring non-DMA SEQ type to DMA pipe\n"); + WARN_ON(1); + return -EINVAL; + } + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + /* Configure sequencers type*/ + + IPADBG("set sequencers to sequence 0x%x, ep = %d\n", type, + clnt_hdl); + ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + } else { + IPADBG("should not set sequencer type of ep = %d\n", clnt_hdl); + } + + return 0; +} + +/** + * ipa3_cfg_ep - IPA end-point configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * This includes nat, IPv6CT, header, mode, aggregation and route settings and + * is a one shot API to configure the IPA end-point fully + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg) +{ + int result = -EINVAL; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + result = ipa3_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr); + if (result) + return result; + + result = ipa3_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext); + if (result) + return result; + + result = ipa3_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr); + if (result) + return result; + + result = ipa3_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg); + if (result) + return result; + + if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) { + result = ipa3_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat); + if (result) + return result; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + result = ipa3_cfg_ep_conn_track(clnt_hdl, + &ipa_ep_cfg->conn_track); + if (result) + return result; + } + + result = ipa3_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode); + if (result) + return result; + + result = ipa3_cfg_ep_seq(clnt_hdl, &ipa_ep_cfg->seq); + if (result) + return result; + + result = ipa3_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route); + if (result) + return result; + + result = ipa3_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr); + if (result) + return result; + } else { + result = ipa3_cfg_ep_metadata_mask(clnt_hdl, + &ipa_ep_cfg->metadata_mask); + if (result) + return result; + } + + return 0; +} + +static const char *ipa3_get_nat_en_str(enum ipa_nat_en_type nat_en) +{ + switch (nat_en) { + case (IPA_BYPASS_NAT): + return "NAT disabled"; + case (IPA_SRC_NAT): + return "Source NAT"; + case (IPA_DST_NAT): + return "Dst NAT"; + } + + return "undefined"; +} + +static const char *ipa3_get_ipv6ct_en_str(enum ipa_ipv6ct_en_type ipv6ct_en) +{ + switch (ipv6ct_en) { + case (IPA_BYPASS_IPV6CT): + return "ipv6ct disabled"; + case (IPA_ENABLE_IPV6CT): + return "ipv6ct enabled"; + } + + return "undefined"; +} + +/** + * ipa3_cfg_ep_nat() - IPA end-point NAT configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_nat: [in] IPA NAT end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat) +{ + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) { + IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl); + return -EINVAL; + } + + IPADBG("pipe=%d, nat_en=%d(%s)\n", + clnt_hdl, + ep_nat->nat_en, + ipa3_get_nat_en_str(ep_nat->nat_en)); + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].cfg.nat = *ep_nat; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_NAT_n, clnt_hdl, ep_nat); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_conn_track() - IPA end-point IPv6CT configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_conn_track: [in] IPA IPv6CT end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_conn_track(u32 clnt_hdl, + const struct ipa_ep_cfg_conn_track *ep_conn_track) +{ + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_conn_track == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) { + IPAERR("IPv6CT does not apply to IPA out EP %d\n", clnt_hdl); + return -EINVAL; + } + + IPADBG("pipe=%d, conn_track_en=%d(%s)\n", + clnt_hdl, + ep_conn_track->conn_track_en, + ipa3_get_ipv6ct_en_str(ep_conn_track->conn_track_en)); + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].cfg.conn_track = *ep_conn_track; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_CONN_TRACK_n, clnt_hdl, + ep_conn_track); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + + +/** + * ipa3_cfg_ep_status() - IPA end-point status configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_status(u32 clnt_hdl, + const struct ipahal_reg_ep_cfg_status *ep_status) +{ + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d, status_en=%d status_ep=%d status_location=%d\n", + clnt_hdl, + ep_status->status_en, + ep_status->status_ep, + ep_status->status_location); + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].status = *ep_status; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_STATUS_n, clnt_hdl, ep_status); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_cfg() - IPA end-point cfg configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg) +{ + u8 qmb_master_sel; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].cfg.cfg = *cfg; + + /* Override QMB master selection */ + qmb_master_sel = ipa3_get_qmb_master_sel(ipa3_ctx->ep[clnt_hdl].client); + ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel = qmb_master_sel; + IPADBG( + "pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d gen_qmb_master_sel=%d\n", + clnt_hdl, + ipa3_ctx->ep[clnt_hdl].cfg.cfg.frag_offload_en, + ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_offload_en, + ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_metadata_hdr_offset, + ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel); + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_CFG_n, clnt_hdl, + &ipa3_ctx->ep[clnt_hdl].cfg.cfg); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask + *metadata_mask) +{ + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d, metadata_mask=0x%x\n", + clnt_hdl, + metadata_mask->metadata_mask); + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_MASK_n, + clnt_hdl, metadata_mask); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_hdr() - IPA end-point header configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr) +{ + struct ipa3_ep_context *ep; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + IPADBG("pipe=%d metadata_reg_valid=%d\n", + clnt_hdl, + ep_hdr->hdr_metadata_reg_valid); + + IPADBG("remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n", + ep_hdr->hdr_remove_additional, + ep_hdr->hdr_a5_mux, + ep_hdr->hdr_ofst_pkt_size); + + IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n", + ep_hdr->hdr_ofst_pkt_size_valid, + ep_hdr->hdr_additional_const_len); + + IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x\n", + ep_hdr->hdr_ofst_metadata, + ep_hdr->hdr_ofst_metadata_valid, + ep_hdr->hdr_len); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + /* copy over EP cfg */ + ep->cfg.hdr = *ep_hdr; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl, &ep->cfg.hdr); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_hdr_ext() - IPA end-point extended header configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_hdr_ext: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) +{ + struct ipa3_ep_context *ep; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d hdr_pad_to_alignment=%d\n", + clnt_hdl, + ep_hdr_ext->hdr_pad_to_alignment); + + IPADBG("hdr_total_len_or_pad_offset=%d\n", + ep_hdr_ext->hdr_total_len_or_pad_offset); + + IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n", + ep_hdr_ext->hdr_payload_len_inc_padding, + ep_hdr_ext->hdr_total_len_or_pad); + + IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n", + ep_hdr_ext->hdr_total_len_or_pad_valid, + ep_hdr_ext->hdr_little_endian); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + /* copy over EP cfg */ + ep->cfg.hdr_ext = *ep_hdr_ext; + ep->cfg.hdr_ext.hdr = &ep->cfg.hdr; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_EXT_n, clnt_hdl, + &ep->cfg.hdr_ext); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_ctrl() - IPA end-point Control configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl) +{ + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ep_ctrl == NULL) { + IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl); + return -EINVAL; + } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 && ep_ctrl->ipa_ep_suspend) { + IPAERR("pipe suspend is not supported\n"); + WARN_ON(1); + return -EPERM; + } + + IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n", + clnt_hdl, + ep_ctrl->ipa_ep_suspend, + ep_ctrl->ipa_ep_delay); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, clnt_hdl, ep_ctrl); + + if (ep_ctrl->ipa_ep_suspend == true && + IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) + ipa3_suspend_active_aggr_wa(clnt_hdl); + + return 0; +} + +const char *ipa3_get_mode_type_str(enum ipa_mode_type mode) +{ + switch (mode) { + case (IPA_BASIC): + return "Basic"; + case (IPA_ENABLE_FRAMING_HDLC): + return "HDLC framing"; + case (IPA_ENABLE_DEFRAMING_HDLC): + return "HDLC de-framing"; + case (IPA_DMA): + return "DMA"; + } + + return "undefined"; +} + +/** + * ipa3_cfg_ep_mode() - IPA end-point mode configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode) +{ + int ep; + int type; + struct ipahal_reg_endp_init_mode init_mode; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) { + IPAERR("bad params clnt_hdl=%d , ep_valid=%d ep_mode=%pK\n", + clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid, + ep_mode); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) { + IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl); + return -EINVAL; + } + + ep = ipa3_get_ep_mapping(ep_mode->dst); + if (ep == -1 && ep_mode->mode == IPA_DMA) { + IPAERR("dst %d does not exist in DMA mode\n", ep_mode->dst); + return -EINVAL; + } + + WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst)); + + if (!IPA_CLIENT_IS_CONS(ep_mode->dst)) + ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + + IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d\n", + clnt_hdl, + ep_mode->mode, + ipa3_get_mode_type_str(ep_mode->mode), + ep_mode->dst); + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].cfg.mode = *ep_mode; + ipa3_ctx->ep[clnt_hdl].dst_pipe_index = ep; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + init_mode.dst_pipe_number = ipa3_ctx->ep[clnt_hdl].dst_pipe_index; + init_mode.ep_mode = *ep_mode; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_MODE_n, clnt_hdl, &init_mode); + + /* Configure sequencers type for test clients*/ + if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) { + if (ep_mode->mode == IPA_DMA) + type = IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY; + else + /* In IPA4.2 only single pass only supported*/ + if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_2) + type = + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP; + else + type = + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP; + + IPADBG(" set sequencers to sequance 0x%x, ep = %d\n", type, + clnt_hdl); + ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type); + } + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +const char *ipa3_get_aggr_enable_str(enum ipa_aggr_en_type aggr_en) +{ + switch (aggr_en) { + case (IPA_BYPASS_AGGR): + return "no aggregation"; + case (IPA_ENABLE_AGGR): + return "aggregation enabled"; + case (IPA_ENABLE_DEAGGR): + return "de-aggregation enabled"; + } + + return "undefined"; +} + +const char *ipa3_get_aggr_type_str(enum ipa_aggr_type aggr_type) +{ + switch (aggr_type) { + case (IPA_MBIM_16): + return "MBIM_16"; + case (IPA_HDLC): + return "HDLC"; + case (IPA_TLP): + return "TLP"; + case (IPA_RNDIS): + return "RNDIS"; + case (IPA_GENERIC): + return "GENERIC"; + case (IPA_QCMAP): + return "QCMAP"; + } + return "undefined"; +} + +static u32 ipa3_time_gran_usec_step(enum ipa_timers_time_gran_type gran) +{ + switch (gran) { + case IPA_TIMERS_TIME_GRAN_10_USEC: return 10; + case IPA_TIMERS_TIME_GRAN_20_USEC: return 20; + case IPA_TIMERS_TIME_GRAN_50_USEC: return 50; + case IPA_TIMERS_TIME_GRAN_100_USEC: return 100; + case IPA_TIMERS_TIME_GRAN_1_MSEC: return 1000; + case IPA_TIMERS_TIME_GRAN_10_MSEC: return 10000; + case IPA_TIMERS_TIME_GRAN_100_MSEC: return 100000; + case IPA_TIMERS_TIME_GRAN_NEAR_HALF_SEC: return 655350; + default: + IPAERR("Invalid granularity time unit %d\n", gran); + ipa_assert(); + break; + } + + return 100; +} + +/* + * ipa3_process_timer_cfg() - Check and produce timer config + * + * Relevant for IPA 4.5 and above + * + * Assumes clocks are voted + */ +static int ipa3_process_timer_cfg(u32 time_us, + u8 *pulse_gen, u8 *time_units) +{ + struct ipahal_reg_timers_pulse_gran_cfg gran_cfg; + u32 gran0_step, gran1_step; + + IPADBG("time in usec=%u\n", time_us); + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) { + IPAERR("Invalid IPA version %d\n", ipa3_ctx->ipa_hw_type); + return -EPERM; + } + + if (!time_us) { + *pulse_gen = 0; + *time_units = 0; + return 0; + } + + ipahal_read_reg_fields(IPA_TIMERS_PULSE_GRAN_CFG, &gran_cfg); + + gran0_step = ipa3_time_gran_usec_step(gran_cfg.gran_0); + gran1_step = ipa3_time_gran_usec_step(gran_cfg.gran_1); + /* gran_2 is not used by AP */ + + IPADBG("gran0 usec step=%u gran1 usec step=%u\n", + gran0_step, gran1_step); + + /* Lets try pulse generator #0 granularity */ + if (!(time_us % gran0_step)) { + if ((time_us / gran0_step) <= IPA_TIMER_SCALED_TIME_LIMIT) { + *pulse_gen = 0; + *time_units = time_us / gran0_step; + IPADBG("Matched: generator=0, units=%u\n", + *time_units); + return 0; + } + IPADBG("gran0 cannot be used due to range limit\n"); + } + + /* Lets try pulse generator #1 granularity */ + if (!(time_us % gran1_step)) { + if ((time_us / gran1_step) <= IPA_TIMER_SCALED_TIME_LIMIT) { + *pulse_gen = 1; + *time_units = time_us / gran1_step; + IPADBG("Matched: generator=1, units=%u\n", + *time_units); + return 0; + } + IPADBG("gran1 cannot be used due to range limit\n"); + } + + IPAERR("Cannot match requested time to configured granularities\n"); + return -EPERM; +} + +/** + * ipa3_cfg_ep_aggr() - IPA end-point aggregation configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr) +{ + int res = 0; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + if (ep_aggr->aggr_en == IPA_ENABLE_DEAGGR && + !IPA_EP_SUPPORTS_DEAGGR(clnt_hdl)) { + IPAERR("pipe=%d cannot be configured to DEAGGR\n", clnt_hdl); + WARN_ON(1); + return -EINVAL; + } + + IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n", + clnt_hdl, + ep_aggr->aggr_en, + ipa3_get_aggr_enable_str(ep_aggr->aggr_en), + ep_aggr->aggr, + ipa3_get_aggr_type_str(ep_aggr->aggr), + ep_aggr->aggr_byte_limit, + ep_aggr->aggr_time_limit); + IPADBG("hard_byte_limit_en=%d aggr_sw_eof_active=%d\n", + ep_aggr->aggr_hard_byte_limit_en, + ep_aggr->aggr_sw_eof_active); + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) { + res = ipa3_process_timer_cfg(ep_aggr->aggr_time_limit, + &ipa3_ctx->ep[clnt_hdl].cfg.aggr.pulse_generator, + &ipa3_ctx->ep[clnt_hdl].cfg.aggr.scaled_time); + if (res) { + IPAERR("failed to process AGGR timer tmr=%u\n", + ep_aggr->aggr_time_limit); + ipa_assert(); + res = -EINVAL; + goto complete; + } + } else { + /* + * Global aggregation granularity is 0.5msec. + * So if H/W programmed with 1msec, it will be + * 0.5msec defacto. + * So finest granularity is 0.5msec + */ + if (ep_aggr->aggr_time_limit % 500) { + IPAERR("given time limit %u is not in 0.5msec\n", + ep_aggr->aggr_time_limit); + WARN_ON(1); + res = -EINVAL; + goto complete; + } + + /* Due to described above global granularity */ + ipa3_ctx->ep[clnt_hdl].cfg.aggr.aggr_time_limit *= 2; + } + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, + &ipa3_ctx->ep[clnt_hdl].cfg.aggr); +complete: + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return res; +} + +/** + * ipa3_cfg_ep_route() - IPA end-point routing configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route) +{ + struct ipahal_reg_endp_init_route init_rt; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) { + IPAERR("ROUTE does not apply to IPA out EP %d\n", + clnt_hdl); + return -EINVAL; + } + + /* + * if DMA mode was configured previously for this EP, return with + * success + */ + if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) { + IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n", + clnt_hdl); + return 0; + } + + if (ep_route->rt_tbl_hdl) + IPAERR("client specified non-zero RT TBL hdl - ignore it\n"); + + IPADBG("pipe=%d, rt_tbl_hdl=%d\n", + clnt_hdl, + ep_route->rt_tbl_hdl); + + /* always use "default" routing table when programming EP ROUTE reg */ + ipa3_ctx->ep[clnt_hdl].rt_tbl_idx = + IPA_MEM_PART(v4_apps_rt_index_lo); + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + init_rt.route_table_index = ipa3_ctx->ep[clnt_hdl].rt_tbl_idx; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_ROUTE_n, + clnt_hdl, &init_rt); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + } + + return 0; +} + +#define MAX_ALLOWED_BASE_VAL 0x1f +#define MAX_ALLOWED_SCALE_VAL 0x1f + +/** + * ipa3_cal_ep_holb_scale_base_val - calculate base and scale value from tmr_val + * + * In IPA4.2 HW version need configure base and scale value in HOL timer reg + * @tmr_val: [in] timer value for HOL timer + * @ipa_ep_cfg: [out] Fill IPA end-point configuration base and scale value + * and return + */ +void ipa3_cal_ep_holb_scale_base_val(u32 tmr_val, + struct ipa_ep_cfg_holb *ep_holb) +{ + u32 base_val, scale, scale_val = 1, base = 2; + + for (scale = 0; scale <= MAX_ALLOWED_SCALE_VAL; scale++) { + base_val = tmr_val/scale_val; + if (scale != 0) + scale_val *= base; + if (base_val <= MAX_ALLOWED_BASE_VAL) + break; + } + ep_holb->base_val = base_val; + ep_holb->scale = scale_val; + +} + +/** + * ipa3_cfg_ep_holb() - IPA end-point holb configuration + * + * If an IPA producer pipe is full, IPA HW by default will block + * indefinitely till space opens up. During this time no packets + * including those from unrelated pipes will be processed. Enabling + * HOLB means IPA HW will be allowed to drop packets as/when needed + * and indefinite blocking is avoided. + * + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb) +{ + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL || + ep_holb->tmr_val > ipa3_ctx->ctrl->max_holb_tmr_val || + ep_holb->en > 1) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) { + IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl); + return -EINVAL; + } + + ipa3_ctx->ep[clnt_hdl].holb = *ep_holb; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl, + ep_holb); + + /* Configure timer */ + if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_2) { + ipa3_cal_ep_holb_scale_base_val(ep_holb->tmr_val, + &ipa3_ctx->ep[clnt_hdl].holb); + goto success; + } + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) { + int res; + + res = ipa3_process_timer_cfg(ep_holb->tmr_val * 1000, + &ipa3_ctx->ep[clnt_hdl].holb.pulse_generator, + &ipa3_ctx->ep[clnt_hdl].holb.scaled_time); + if (res) { + IPAERR("failed to process HOLB timer tmr=%u\n", + ep_holb->tmr_val); + ipa_assert(); + return res; + } + } + +success: + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, + clnt_hdl, &ipa3_ctx->ep[clnt_hdl].holb); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl, + ep_holb->tmr_val); + return 0; +} + +/** + * ipa3_cfg_ep_holb_by_client() - IPA end-point holb configuration + * + * Wrapper function for ipa3_cfg_ep_holb() with client name instead of + * client handle. This function is used for clients that does not have + * client handle. + * + * @client: [in] client name + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ep_holb) +{ + return ipa3_cfg_ep_holb(ipa3_get_ep_mapping(client), ep_holb); +} + +/** + * ipa3_cfg_ep_deaggr() - IPA end-point deaggregation configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_deaggr: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_deaggr(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ep_deaggr) +{ + struct ipa3_ep_context *ep; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d deaggr_hdr_len=%d\n", + clnt_hdl, + ep_deaggr->deaggr_hdr_len); + + IPADBG("packet_offset_valid=%d\n", + ep_deaggr->packet_offset_valid); + + IPADBG("packet_offset_location=%d max_packet_len=%d\n", + ep_deaggr->packet_offset_location, + ep_deaggr->max_packet_len); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + /* copy over EP cfg */ + ep->cfg.deaggr = *ep_deaggr; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_DEAGGR_n, clnt_hdl, + &ep->cfg.deaggr); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_metadata() - IPA end-point metadata configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md) +{ + u32 qmap_id = 0; + struct ipa_ep_cfg_metadata ep_md_reg_wrt; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id); + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].cfg.meta = *ep_md; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ep_md_reg_wrt = *ep_md; + qmap_id = (ep_md->qmap_id << + IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT) & + IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK; + + /* mark tethering bit for remote modem */ + if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_1) + qmap_id |= IPA_QMAP_TETH_BIT; + + ep_md_reg_wrt.qmap_id = qmap_id; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_n, clnt_hdl, + &ep_md_reg_wrt); + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) { + ipa3_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl, + &ipa3_ctx->ep[clnt_hdl].cfg.hdr); + } + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in) +{ + struct ipa_ep_cfg_metadata meta; + struct ipa3_ep_context *ep; + int ipa_ep_idx; + int result = -EINVAL; + + if (param_in->client >= IPA_CLIENT_MAX) { + IPAERR_RL("bad parm client:%d\n", param_in->client); + goto fail; + } + + ipa_ep_idx = ipa3_get_ep_mapping(param_in->client); + if (ipa_ep_idx == -1) { + IPAERR_RL("Invalid client.\n"); + goto fail; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + if (!ep->valid) { + IPAERR_RL("EP not allocated.\n"); + goto fail; + } + + meta.qmap_id = param_in->qmap_id; + if (param_in->client == IPA_CLIENT_USB_PROD || + param_in->client == IPA_CLIENT_HSIC1_PROD || + param_in->client == IPA_CLIENT_ODU_PROD || + param_in->client == IPA_CLIENT_ETHERNET_PROD) { + result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta); + } else if (param_in->client == IPA_CLIENT_WLAN1_PROD) { + ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta; + result = ipa3_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id); + if (result) + IPAERR_RL("qmap_id %d write failed on ep=%d\n", + meta.qmap_id, ipa_ep_idx); + result = 0; + } + +fail: + return result; +} + +/** + * ipa3_dump_buff_internal() - dumps buffer for debug purposes + * @base: buffer base address + * @phy_base: buffer physical base address + * @size: size of the buffer + */ +void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size) +{ + int i; + u32 *cur = (u32 *)base; + u8 *byt; + + IPADBG("system phys addr=%pa len=%u\n", &phy_base, size); + for (i = 0; i < size / 4; i++) { + byt = (u8 *)(cur + i); + IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i), + byt[0], byt[1], byt[2], byt[3]); + } + IPADBG("END\n"); +} + +/** + * ipa3_set_aggr_mode() - Set the aggregation mode which is a global setting + * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM, + * etc + * + * Returns: 0 on success + */ +int ipa3_set_aggr_mode(enum ipa_aggr_mode mode) +{ + struct ipahal_reg_qcncm qcncm; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + if (mode != IPA_MBIM_AGGR) { + IPAERR("Only MBIM mode is supported staring 4.0\n"); + return -EPERM; + } + } else { + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipahal_read_reg_fields(IPA_QCNCM, &qcncm); + qcncm.mode_en = mode; + ipahal_write_reg_fields(IPA_QCNCM, &qcncm); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + } + + return 0; +} + +/** + * ipa3_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation + * mode + * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be + * "QND") + * + * Set the NDP signature used for QCNCM aggregation mode. The fourth byte + * (expected to be 'P') needs to be set using the header addition mechanism + * + * Returns: 0 on success, negative on failure + */ +int ipa3_set_qcncm_ndp_sig(char sig[3]) +{ + struct ipahal_reg_qcncm qcncm; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + IPAERR("QCNCM mode is not supported staring 4.0\n"); + return -EPERM; + } + + if (sig == NULL) { + IPAERR("bad argument\n"); + return -EINVAL; + } + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipahal_read_reg_fields(IPA_QCNCM, &qcncm); + qcncm.mode_val = ((sig[0] << 16) | (sig[1] << 8) | sig[2]); + ipahal_write_reg_fields(IPA_QCNCM, &qcncm); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa3_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame + * configuration + * @enable: [in] true for single NDP/MBIM; false otherwise + * + * Returns: 0 on success + */ +int ipa3_set_single_ndp_per_mbim(bool enable) +{ + struct ipahal_reg_single_ndp_mode mode; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + IPAERR("QCNCM mode is not supported staring 4.0\n"); + return -EPERM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipahal_read_reg_fields(IPA_SINGLE_NDP_MODE, &mode); + mode.single_ndp_en = enable; + ipahal_write_reg_fields(IPA_SINGLE_NDP_MODE, &mode); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa3_straddle_boundary() - Checks whether a memory buffer straddles a + * boundary + * @start: start address of the memory buffer + * @end: end address of the memory buffer + * @boundary: boundary + * + * Return value: + * 1: if the interval [start, end] straddles boundary + * 0: otherwise + */ +int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary) +{ + u32 next_start; + u32 prev_end; + + IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary); + + next_start = (start + (boundary - 1)) & ~(boundary - 1); + prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary; + + while (next_start < prev_end) + next_start += boundary; + + if (next_start == prev_end) + return 1; + else + return 0; +} + +/** + * ipa3_init_mem_partition() - Assigns the static memory partition + * based on the IPA version + * + * Returns: 0 on success + */ +int ipa3_init_mem_partition(enum ipa_hw_type type) +{ + switch (type) { + case IPA_HW_v4_1: + ipa3_ctx->ctrl->mem_partition = &ipa_4_1_mem_part; + break; + case IPA_HW_v4_2: + ipa3_ctx->ctrl->mem_partition = &ipa_4_2_mem_part; + break; + case IPA_HW_v4_5: + ipa3_ctx->ctrl->mem_partition = &ipa_4_5_mem_part; + break; + case IPA_HW_None: + case IPA_HW_v1_0: + case IPA_HW_v1_1: + case IPA_HW_v2_0: + case IPA_HW_v2_1: + case IPA_HW_v2_5: + case IPA_HW_v2_6L: + case IPA_HW_v3_0: + case IPA_HW_v3_1: + case IPA_HW_v3_5: + case IPA_HW_v3_5_1: + case IPA_HW_v4_0: + IPAERR("unsupported version %d\n", type); + return -EPERM; + } + + if (IPA_MEM_PART(uc_info_ofst) & 3) { + IPAERR("UC INFO OFST 0x%x is unaligned\n", + IPA_MEM_PART(uc_info_ofst)); + return -ENODEV; + } + + IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(uc_info_ofst), IPA_MEM_PART(uc_info_size)); + + IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start)); + + if (IPA_MEM_PART(v4_flt_hash_ofst) & 7) { + IPAERR("V4 FLT HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v4_flt_hash_ofst)); + return -ENODEV; + } + + IPADBG("V4 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v4_flt_hash_ofst), + IPA_MEM_PART(v4_flt_hash_size), + IPA_MEM_PART(v4_flt_hash_size_ddr)); + + if (IPA_MEM_PART(v4_flt_nhash_ofst) & 7) { + IPAERR("V4 FLT NON-HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v4_flt_nhash_ofst)); + return -ENODEV; + } + + IPADBG("V4 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v4_flt_nhash_ofst), + IPA_MEM_PART(v4_flt_nhash_size), + IPA_MEM_PART(v4_flt_nhash_size_ddr)); + + if (IPA_MEM_PART(v6_flt_hash_ofst) & 7) { + IPAERR("V6 FLT HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v6_flt_hash_ofst)); + return -ENODEV; + } + + IPADBG("V6 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v6_flt_hash_ofst), IPA_MEM_PART(v6_flt_hash_size), + IPA_MEM_PART(v6_flt_hash_size_ddr)); + + if (IPA_MEM_PART(v6_flt_nhash_ofst) & 7) { + IPAERR("V6 FLT NON-HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v6_flt_nhash_ofst)); + return -ENODEV; + } + + IPADBG("V6 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v6_flt_nhash_ofst), + IPA_MEM_PART(v6_flt_nhash_size), + IPA_MEM_PART(v6_flt_nhash_size_ddr)); + + IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_rt_num_index)); + + IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_modem_rt_index_lo), + IPA_MEM_PART(v4_modem_rt_index_hi)); + + IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_apps_rt_index_lo), + IPA_MEM_PART(v4_apps_rt_index_hi)); + + if (IPA_MEM_PART(v4_rt_hash_ofst) & 7) { + IPAERR("V4 RT HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v4_rt_hash_ofst)); + return -ENODEV; + } + + IPADBG("V4 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v4_rt_hash_ofst)); + + IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v4_rt_hash_size), + IPA_MEM_PART(v4_rt_hash_size_ddr)); + + if (IPA_MEM_PART(v4_rt_nhash_ofst) & 7) { + IPAERR("V4 RT NON-HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v4_rt_nhash_ofst)); + return -ENODEV; + } + + IPADBG("V4 RT NON-HASHABLE OFST 0x%x\n", + IPA_MEM_PART(v4_rt_nhash_ofst)); + + IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v4_rt_nhash_size), + IPA_MEM_PART(v4_rt_nhash_size_ddr)); + + IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_rt_num_index)); + + IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_modem_rt_index_lo), + IPA_MEM_PART(v6_modem_rt_index_hi)); + + IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_apps_rt_index_lo), + IPA_MEM_PART(v6_apps_rt_index_hi)); + + if (IPA_MEM_PART(v6_rt_hash_ofst) & 7) { + IPAERR("V6 RT HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v6_rt_hash_ofst)); + return -ENODEV; + } + + IPADBG("V6 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v6_rt_hash_ofst)); + + IPADBG("V6 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v6_rt_hash_size), + IPA_MEM_PART(v6_rt_hash_size_ddr)); + + if (IPA_MEM_PART(v6_rt_nhash_ofst) & 7) { + IPAERR("V6 RT NON-HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v6_rt_nhash_ofst)); + return -ENODEV; + } + + IPADBG("V6 RT NON-HASHABLE OFST 0x%x\n", + IPA_MEM_PART(v6_rt_nhash_ofst)); + + IPADBG("V6 RT NON-HASHABLE SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v6_rt_nhash_size), + IPA_MEM_PART(v6_rt_nhash_size_ddr)); + + if (IPA_MEM_PART(modem_hdr_ofst) & 7) { + IPAERR("MODEM HDR OFST 0x%x is unaligned\n", + IPA_MEM_PART(modem_hdr_ofst)); + return -ENODEV; + } + + IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size)); + + if (IPA_MEM_PART(apps_hdr_ofst) & 7) { + IPAERR("APPS HDR OFST 0x%x is unaligned\n", + IPA_MEM_PART(apps_hdr_ofst)); + return -ENODEV; + } + + IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size), + IPA_MEM_PART(apps_hdr_size_ddr)); + + if (IPA_MEM_PART(modem_hdr_proc_ctx_ofst) & 7) { + IPAERR("MODEM HDR PROC CTX OFST 0x%x is unaligned\n", + IPA_MEM_PART(modem_hdr_proc_ctx_ofst)); + return -ENODEV; + } + + IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(modem_hdr_proc_ctx_ofst), + IPA_MEM_PART(modem_hdr_proc_ctx_size)); + + if (IPA_MEM_PART(apps_hdr_proc_ctx_ofst) & 7) { + IPAERR("APPS HDR PROC CTX OFST 0x%x is unaligned\n", + IPA_MEM_PART(apps_hdr_proc_ctx_ofst)); + return -ENODEV; + } + + IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(apps_hdr_proc_ctx_ofst), + IPA_MEM_PART(apps_hdr_proc_ctx_size), + IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr)); + + if (IPA_MEM_PART(pdn_config_ofst) & 7) { + IPAERR("PDN CONFIG OFST 0x%x is unaligned\n", + IPA_MEM_PART(pdn_config_ofst)); + return -ENODEV; + } + + /* + * Routing rules points to hdr_proc_ctx in 32byte offsets from base. + * Base is modem hdr_proc_ctx first address. + * AP driver install APPS hdr_proc_ctx starting at the beginning of + * apps hdr_proc_ctx part. + * So first apps hdr_proc_ctx offset at some routing + * rule will be modem_hdr_proc_ctx_size >> 5 (32B). + */ + if (IPA_MEM_PART(modem_hdr_proc_ctx_size) & 31) { + IPAERR("MODEM HDR PROC CTX SIZE 0x%x is not 32B aligned\n", + IPA_MEM_PART(modem_hdr_proc_ctx_size)); + return -ENODEV; + } + + /* + * AP driver when installing routing rule, it calcs the hdr_proc_ctx + * offset by local offset (from base of apps part) + + * modem_hdr_proc_ctx_size. This is to get offset from modem part base. + * Thus apps part must be adjacent to modem part + */ + if (IPA_MEM_PART(apps_hdr_proc_ctx_ofst) != + IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + + IPA_MEM_PART(modem_hdr_proc_ctx_size)) { + IPAERR("APPS HDR PROC CTX SIZE not adjacent to MODEM one!\n"); + return -ENODEV; + } + + IPADBG("NAT TBL OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(nat_tbl_ofst), + IPA_MEM_PART(nat_tbl_size)); + + if (IPA_MEM_PART(nat_tbl_ofst) & 31) { + IPAERR("NAT TBL OFST 0x%x is unaligned\n", + IPA_MEM_PART(nat_tbl_ofst)); + return -ENODEV; + } + + IPADBG("NAT INDEX TBL OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(nat_index_tbl_ofst), + IPA_MEM_PART(nat_index_tbl_size)); + + if (IPA_MEM_PART(nat_index_tbl_ofst) & 3) { + IPAERR("NAT INDEX TBL OFST 0x%x is unaligned\n", + IPA_MEM_PART(nat_index_tbl_ofst)); + return -ENODEV; + } + + IPADBG("NAT EXP TBL OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(nat_exp_tbl_ofst), + IPA_MEM_PART(nat_exp_tbl_size)); + + if (IPA_MEM_PART(nat_exp_tbl_ofst) & 31) { + IPAERR("NAT EXP TBL OFST 0x%x is unaligned\n", + IPA_MEM_PART(nat_exp_tbl_ofst)); + return -ENODEV; + } + + IPADBG("PDN CONFIG OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(pdn_config_ofst), + IPA_MEM_PART(pdn_config_size)); + + if (IPA_MEM_PART(pdn_config_ofst) & 7) { + IPAERR("PDN CONFIG OFST 0x%x is unaligned\n", + IPA_MEM_PART(pdn_config_ofst)); + return -ENODEV; + } + + IPADBG("QUOTA STATS OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(stats_quota_ofst), + IPA_MEM_PART(stats_quota_size)); + + if (IPA_MEM_PART(stats_quota_ofst) & 7) { + IPAERR("QUOTA STATS OFST 0x%x is unaligned\n", + IPA_MEM_PART(stats_quota_ofst)); + return -ENODEV; + } + + IPADBG("TETHERING STATS OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(stats_tethering_ofst), + IPA_MEM_PART(stats_tethering_size)); + + if (IPA_MEM_PART(stats_tethering_ofst) & 7) { + IPAERR("TETHERING STATS OFST 0x%x is unaligned\n", + IPA_MEM_PART(stats_tethering_ofst)); + return -ENODEV; + } + + IPADBG("FILTER AND ROUTING STATS OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(stats_fnr_ofst), + IPA_MEM_PART(stats_fnr_size)); + + if (IPA_MEM_PART(stats_fnr_ofst) & 7) { + IPAERR("FILTER AND ROUTING STATS OFST 0x%x is unaligned\n", + IPA_MEM_PART(stats_fnr_ofst)); + return -ENODEV; + } + + IPADBG("DROP STATS OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(stats_drop_ofst), + IPA_MEM_PART(stats_drop_size)); + + if (IPA_MEM_PART(stats_drop_ofst) & 7) { + IPAERR("DROP STATS OFST 0x%x is unaligned\n", + IPA_MEM_PART(stats_drop_ofst)); + return -ENODEV; + } + + IPADBG("V4 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v4_flt_hash_ofst), + IPA_MEM_PART(apps_v4_flt_hash_size)); + + IPADBG("V4 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v4_flt_nhash_ofst), + IPA_MEM_PART(apps_v4_flt_nhash_size)); + + IPADBG("V6 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v6_flt_hash_ofst), + IPA_MEM_PART(apps_v6_flt_hash_size)); + + IPADBG("V6 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v6_flt_nhash_ofst), + IPA_MEM_PART(apps_v6_flt_nhash_size)); + + IPADBG("RAM END OFST 0x%x\n", + IPA_MEM_PART(end_ofst)); + + IPADBG("V4 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v4_rt_hash_ofst), + IPA_MEM_PART(apps_v4_rt_hash_size)); + + IPADBG("V4 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v4_rt_nhash_ofst), + IPA_MEM_PART(apps_v4_rt_nhash_size)); + + IPADBG("V6 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v6_rt_hash_ofst), + IPA_MEM_PART(apps_v6_rt_hash_size)); + + IPADBG("V6 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v6_rt_nhash_ofst), + IPA_MEM_PART(apps_v6_rt_nhash_size)); + + if (IPA_MEM_PART(modem_ofst) & 7) { + IPAERR("MODEM OFST 0x%x is unaligned\n", + IPA_MEM_PART(modem_ofst)); + return -ENODEV; + } + + IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst), + IPA_MEM_PART(modem_size)); + + if (IPA_MEM_PART(uc_descriptor_ram_ofst) & 1023) { + IPAERR("UC DESCRIPTOR RAM OFST 0x%x is unaligned\n", + IPA_MEM_PART(uc_descriptor_ram_ofst)); + return -ENODEV; + } + + IPADBG("UC DESCRIPTOR RAM OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(uc_descriptor_ram_ofst), + IPA_MEM_PART(uc_descriptor_ram_size)); + + return 0; +} + +/** + * ipa_ctrl_static_bind() - set the appropriate methods for + * IPA Driver based on the HW version + * + * @ctrl: data structure which holds the function pointers + * @hw_type: the HW type in use + * + * This function can avoid the runtime assignment by using C99 special + * struct initialization - hard decision... time.vs.mem + */ +int ipa3_controller_static_bind(struct ipa3_controller *ctrl, + enum ipa_hw_type hw_type) +{ + if (hw_type >= IPA_HW_v4_0) { + ctrl->ipa_clk_rate_turbo = IPA_V4_0_CLK_RATE_TURBO; + ctrl->ipa_clk_rate_nominal = IPA_V4_0_CLK_RATE_NOMINAL; + ctrl->ipa_clk_rate_svs = IPA_V4_0_CLK_RATE_SVS; + ctrl->ipa_clk_rate_svs2 = IPA_V4_0_CLK_RATE_SVS2; + } else if (hw_type >= IPA_HW_v3_5) { + ctrl->ipa_clk_rate_turbo = IPA_V3_5_CLK_RATE_TURBO; + ctrl->ipa_clk_rate_nominal = IPA_V3_5_CLK_RATE_NOMINAL; + ctrl->ipa_clk_rate_svs = IPA_V3_5_CLK_RATE_SVS; + ctrl->ipa_clk_rate_svs2 = IPA_V3_5_CLK_RATE_SVS2; + } else { + ctrl->ipa_clk_rate_turbo = IPA_V3_0_CLK_RATE_TURBO; + ctrl->ipa_clk_rate_nominal = IPA_V3_0_CLK_RATE_NOMINAL; + ctrl->ipa_clk_rate_svs = IPA_V3_0_CLK_RATE_SVS; + ctrl->ipa_clk_rate_svs2 = IPA_V3_0_CLK_RATE_SVS2; + } + + ctrl->ipa_init_rt4 = _ipa_init_rt4_v3; + ctrl->ipa_init_rt6 = _ipa_init_rt6_v3; + ctrl->ipa_init_flt4 = _ipa_init_flt4_v3; + ctrl->ipa_init_flt6 = _ipa_init_flt6_v3; + ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v3_0; + ctrl->ipa3_commit_flt = __ipa_commit_flt_v3; + ctrl->ipa3_commit_rt = __ipa_commit_rt_v3; + ctrl->ipa3_commit_hdr = __ipa_commit_hdr_v3_0; + ctrl->ipa3_enable_clks = _ipa_enable_clks_v3_0; + ctrl->ipa3_disable_clks = _ipa_disable_clks_v3_0; + ctrl->clock_scaling_bw_threshold_svs = + IPA_V3_0_BW_THRESHOLD_SVS_MBPS; + ctrl->clock_scaling_bw_threshold_nominal = + IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS; + ctrl->clock_scaling_bw_threshold_turbo = + IPA_V3_0_BW_THRESHOLD_TURBO_MBPS; + ctrl->ipa_reg_base_ofst = ipahal_get_reg_base(); + ctrl->ipa_init_sram = _ipa_init_sram_v3; + ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0; + ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v4_0; + + return 0; +} + +void ipa3_skb_recycle(struct sk_buff *skb) +{ + struct skb_shared_info *shinfo; + + shinfo = skb_shinfo(skb); + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); + atomic_set(&shinfo->dataref, 1); + + memset(skb, 0, offsetof(struct sk_buff, tail)); + skb->data = skb->head + NET_SKB_PAD; + skb_reset_tail_pointer(skb); +} + +int ipa3_alloc_rule_id(struct idr *rule_ids) +{ + /* There is two groups of rule-Ids, Modem ones and Apps ones. + * Distinction by high bit: Modem Ids are high bit asserted. + */ + return idr_alloc(rule_ids, NULL, + ipahal_get_low_rule_id(), ipahal_get_rule_id_hi_bit(), + GFP_KERNEL); +} + +int ipa3_id_alloc(void *ptr) +{ + int id; + + idr_preload(GFP_KERNEL); + spin_lock(&ipa3_ctx->idr_lock); + id = idr_alloc(&ipa3_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT); + spin_unlock(&ipa3_ctx->idr_lock); + idr_preload_end(); + + return id; +} + +void *ipa3_id_find(u32 id) +{ + void *ptr; + + spin_lock(&ipa3_ctx->idr_lock); + ptr = idr_find(&ipa3_ctx->ipa_idr, id); + spin_unlock(&ipa3_ctx->idr_lock); + + return ptr; +} + +void ipa3_id_remove(u32 id) +{ + spin_lock(&ipa3_ctx->idr_lock); + idr_remove(&ipa3_ctx->ipa_idr, id); + spin_unlock(&ipa3_ctx->idr_lock); +} + +void ipa3_tag_destroy_imm(void *user1, int user2) +{ + ipahal_destroy_imm_cmd(user1); +} + +static void ipa3_tag_free_skb(void *user1, int user2) +{ + dev_kfree_skb_any((struct sk_buff *)user1); +} + +#define REQUIRED_TAG_PROCESS_DESCRIPTORS 4 +#define MAX_RETRY_ALLOC 10 +#define ALLOC_MIN_SLEEP_RX 100000 +#define ALLOC_MAX_SLEEP_RX 200000 + +/* ipa3_tag_process() - Initiates a tag process. Incorporates the input + * descriptors + * + * @desc: descriptors with commands for IC + * @desc_size: amount of descriptors in the above variable + * + * Note: The descriptors are copied (if there's room), the client needs to + * free his descriptors afterwards + * + * Return: 0 or negative in case of failure + */ +int ipa3_tag_process(struct ipa3_desc desc[], + int descs_num, + unsigned long timeout) +{ + struct ipa3_sys_context *sys; + struct ipa3_desc *tag_desc; + int desc_idx = 0; + struct ipahal_imm_cmd_ip_packet_init pktinit_cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld = NULL; + struct ipahal_imm_cmd_ip_packet_tag_status status; + int i; + struct sk_buff *dummy_skb; + int res; + struct ipa3_tag_completion *comp; + int ep_idx; + u32 retry_cnt = 0; + + /* Not enough room for the required descriptors for the tag process */ + if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) { + IPAERR("up to %d descriptors are allowed (received %d)\n", + IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS, + descs_num); + return -ENOMEM; + } + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD); + if (-1 == ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_CMD_PROD); + return -EFAULT; + } + sys = ipa3_ctx->ep[ep_idx].sys; + + tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL); + if (!tag_desc) { + IPAERR("failed to allocate memory\n"); + return -ENOMEM; + } + + /* Copy the required descriptors from the client now */ + if (desc) { + memcpy(&(tag_desc[0]), desc, descs_num * + sizeof(tag_desc[0])); + desc_idx += descs_num; + } + + /* NO-OP IC for ensuring that IPA pipeline is empty */ + cmd_pyld = ipahal_construct_nop_imm_cmd( + false, IPAHAL_FULL_PIPELINE_CLEAR, false); + if (!cmd_pyld) { + IPAERR("failed to construct NOP imm cmd\n"); + res = -ENOMEM; + goto fail_free_tag_desc; + } + ipa3_init_imm_cmd_desc(&tag_desc[desc_idx], cmd_pyld); + tag_desc[desc_idx].callback = ipa3_tag_destroy_imm; + tag_desc[desc_idx].user1 = cmd_pyld; + ++desc_idx; + + /* IP_PACKET_INIT IC for tag status to be sent to apps */ + pktinit_cmd.destination_pipe_index = + ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_PACKET_INIT, &pktinit_cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct ip_packet_init imm cmd\n"); + res = -ENOMEM; + goto fail_free_desc; + } + ipa3_init_imm_cmd_desc(&tag_desc[desc_idx], cmd_pyld); + tag_desc[desc_idx].callback = ipa3_tag_destroy_imm; + tag_desc[desc_idx].user1 = cmd_pyld; + ++desc_idx; + + /* status IC */ + status.tag = IPA_COOKIE; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &status, false); + if (!cmd_pyld) { + IPAERR("failed to construct ip_packet_tag_status imm cmd\n"); + res = -ENOMEM; + goto fail_free_desc; + } + ipa3_init_imm_cmd_desc(&tag_desc[desc_idx], cmd_pyld); + tag_desc[desc_idx].callback = ipa3_tag_destroy_imm; + tag_desc[desc_idx].user1 = cmd_pyld; + ++desc_idx; + + comp = kzalloc(sizeof(*comp), GFP_KERNEL); + if (!comp) { + IPAERR("no mem\n"); + res = -ENOMEM; + goto fail_free_desc; + } + init_completion(&comp->comp); + + /* completion needs to be released from both here and rx handler */ + atomic_set(&comp->cnt, 2); + + /* dummy packet to send to IPA. packet payload is a completion object */ + dummy_skb = alloc_skb(sizeof(comp), GFP_KERNEL); + if (!dummy_skb) { + IPAERR("failed to allocate memory\n"); + res = -ENOMEM; + goto fail_free_comp; + } + + memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp)); + + if (desc_idx >= IPA_TAG_MAX_DESC) { + IPAERR("number of commands is out of range\n"); + res = -ENOBUFS; + goto fail_free_skb; + } + + tag_desc[desc_idx].pyld = dummy_skb->data; + tag_desc[desc_idx].len = dummy_skb->len; + tag_desc[desc_idx].type = IPA_DATA_DESC_SKB; + tag_desc[desc_idx].callback = ipa3_tag_free_skb; + tag_desc[desc_idx].user1 = dummy_skb; + desc_idx++; +retry_alloc: + /* send all descriptors to IPA with single EOT */ + res = ipa3_send(sys, desc_idx, tag_desc, true); + if (res) { + if (res == -ENOMEM) { + if (retry_cnt < MAX_RETRY_ALLOC) { + IPADBG( + "failed to alloc memory retry cnt = %d\n", + retry_cnt); + retry_cnt++; + usleep_range(ALLOC_MIN_SLEEP_RX, + ALLOC_MAX_SLEEP_RX); + goto retry_alloc; + } + + } + IPAERR("failed to send TAG packets %d\n", res); + res = -ENOMEM; + goto fail_free_skb; + } + kfree(tag_desc); + tag_desc = NULL; + ipa3_ctx->tag_process_before_gating = false; + + IPADBG("waiting for TAG response\n"); + res = wait_for_completion_timeout(&comp->comp, timeout); + if (res == 0) { + IPAERR("timeout (%lu msec) on waiting for TAG response\n", + timeout); + WARN_ON(1); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + return -ETIME; + } + + IPADBG("TAG response arrived!\n"); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + + /* + * sleep for short period to ensure IPA wrote all packets to + * the transport + */ + usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC); + + return 0; + +fail_free_skb: + kfree_skb(dummy_skb); +fail_free_comp: + kfree(comp); +fail_free_desc: + /* + * Free only the first descriptors allocated here. + * [nop, pkt_init, status, dummy_skb] + * The user is responsible to free his allocations + * in case of failure. + * The min is required because we may fail during + * of the initial allocations above + */ + for (i = descs_num; + i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS, desc_idx); i++) + if (tag_desc[i].callback) + tag_desc[i].callback(tag_desc[i].user1, + tag_desc[i].user2); +fail_free_tag_desc: + kfree(tag_desc); + return res; +} + +/** + * ipa3_tag_generate_force_close_desc() - generate descriptors for force close + * immediate command + * + * @desc: descriptors for IC + * @desc_size: desc array size + * @start_pipe: first pipe to close aggregation + * @end_pipe: last (non-inclusive) pipe to close aggregation + * + * Return: number of descriptors written or negative in case of failure + */ +static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[], + int desc_size, int start_pipe, int end_pipe) +{ + int i; + struct ipa_ep_cfg_aggr ep_aggr; + int desc_idx = 0; + int res; + struct ipahal_imm_cmd_register_write reg_write_agg_close; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipahal_reg_valmask valmask; + + for (i = start_pipe; i < end_pipe; i++) { + ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, i, &ep_aggr); + if (!ep_aggr.aggr_en) + continue; + IPADBG("Force close ep: %d\n", i); + if (desc_idx + 1 > desc_size) { + IPAERR("Internal error - no descriptors\n"); + res = -EFAULT; + goto fail_no_desc; + } + + reg_write_agg_close.skip_pipeline_clear = false; + reg_write_agg_close.pipeline_clear_options = + IPAHAL_FULL_PIPELINE_CLEAR; + reg_write_agg_close.offset = + ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE); + ipahal_get_aggr_force_close_valmask(i, &valmask); + reg_write_agg_close.value = valmask.val; + reg_write_agg_close.value_mask = valmask.mask; + cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + ®_write_agg_close, false); + if (!cmd_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + res = -ENOMEM; + goto fail_alloc_reg_write_agg_close; + } + + ipa3_init_imm_cmd_desc(&desc[desc_idx], cmd_pyld); + desc[desc_idx].callback = ipa3_tag_destroy_imm; + desc[desc_idx].user1 = cmd_pyld; + ++desc_idx; + } + + return desc_idx; + +fail_alloc_reg_write_agg_close: + for (i = 0; i < desc_idx; ++i) + if (desc[desc_idx].callback) + desc[desc_idx].callback(desc[desc_idx].user1, + desc[desc_idx].user2); +fail_no_desc: + return res; +} + +/** + * ipa3_tag_aggr_force_close() - Force close aggregation + * + * @pipe_num: pipe number or -1 for all pipes + */ +int ipa3_tag_aggr_force_close(int pipe_num) +{ + struct ipa3_desc *desc; + int res = -1; + int start_pipe; + int end_pipe; + int num_descs; + int num_aggr_descs; + + if (pipe_num < -1 || pipe_num >= (int)ipa3_ctx->ipa_num_pipes) { + IPAERR("Invalid pipe number %d\n", pipe_num); + return -EINVAL; + } + + if (pipe_num == -1) { + start_pipe = 0; + end_pipe = ipa3_ctx->ipa_num_pipes; + } else { + start_pipe = pipe_num; + end_pipe = pipe_num + 1; + } + + num_descs = end_pipe - start_pipe; + + desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL); + if (!desc) { + IPAERR("no mem\n"); + return -ENOMEM; + } + + /* Force close aggregation on all valid pipes with aggregation */ + num_aggr_descs = ipa3_tag_generate_force_close_desc(desc, num_descs, + start_pipe, end_pipe); + if (num_aggr_descs < 0) { + IPAERR("ipa3_tag_generate_force_close_desc failed %d\n", + num_aggr_descs); + goto fail_free_desc; + } + + res = ipa3_tag_process(desc, num_aggr_descs, + IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT); + +fail_free_desc: + kfree(desc); + + return res; +} + +/** + * ipa3_is_ready() - check if IPA module was initialized + * successfully + * + * Return value: true for yes; false for no + */ +bool ipa3_is_ready(void) +{ + bool complete; + + if (ipa3_ctx == NULL) + return false; + mutex_lock(&ipa3_ctx->lock); + complete = ipa3_ctx->ipa_initialization_complete; + mutex_unlock(&ipa3_ctx->lock); + return complete; +} + +/** + * ipa3_is_client_handle_valid() - check if IPA client handle is valid handle + * + * Return value: true for yes; false for no + */ +bool ipa3_is_client_handle_valid(u32 clnt_hdl) +{ + if (clnt_hdl >= 0 && clnt_hdl < ipa3_ctx->ipa_num_pipes) + return true; + return false; +} + +/** + * ipa3_proxy_clk_unvote() - called to remove IPA clock proxy vote + * + * Return value: none + */ +void ipa3_proxy_clk_unvote(void) +{ + if (ipa3_ctx == NULL) + return; + mutex_lock(&ipa3_ctx->q6_proxy_clk_vote_mutex); + if (ipa3_ctx->q6_proxy_clk_vote_valid) { + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE"); + ipa3_ctx->q6_proxy_clk_vote_cnt--; + if (ipa3_ctx->q6_proxy_clk_vote_cnt == 0) + ipa3_ctx->q6_proxy_clk_vote_valid = false; + } + mutex_unlock(&ipa3_ctx->q6_proxy_clk_vote_mutex); +} + +/** + * ipa3_proxy_clk_vote() - called to add IPA clock proxy vote + * + * Return value: none + */ +void ipa3_proxy_clk_vote(void) +{ + if (ipa3_ctx == NULL) + return; + mutex_lock(&ipa3_ctx->q6_proxy_clk_vote_mutex); + if (!ipa3_ctx->q6_proxy_clk_vote_valid || + (ipa3_ctx->q6_proxy_clk_vote_cnt > 0)) { + IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE"); + ipa3_ctx->q6_proxy_clk_vote_cnt++; + ipa3_ctx->q6_proxy_clk_vote_valid = true; + } + mutex_unlock(&ipa3_ctx->q6_proxy_clk_vote_mutex); +} + +/** + * ipa3_get_smem_restr_bytes()- Return IPA smem restricted bytes + * + * Return value: u16 - number of IPA smem restricted bytes + */ +u16 ipa3_get_smem_restr_bytes(void) +{ + if (ipa3_ctx) + return ipa3_ctx->smem_restricted_bytes; + + IPAERR("IPA Driver not initialized\n"); + + return 0; +} + +/** + * ipa3_get_modem_cfg_emb_pipe_flt()- Return ipa3_ctx->modem_cfg_emb_pipe_flt + * + * Return value: true if modem configures embedded pipe flt, false otherwise + */ +bool ipa3_get_modem_cfg_emb_pipe_flt(void) +{ + if (ipa3_ctx) + return ipa3_ctx->modem_cfg_emb_pipe_flt; + + IPAERR("IPA driver has not been initialized\n"); + + return false; +} + +/** + * ipa3_get_transport_type() + * + * Return value: enum ipa_transport_type + */ +enum ipa_transport_type ipa3_get_transport_type(void) +{ + return IPA_TRANSPORT_TYPE_GSI; +} + +u32 ipa3_get_num_pipes(void) +{ + return ipahal_read_reg(IPA_ENABLED_PIPES); +} + +/** + * ipa3_disable_apps_wan_cons_deaggr()- + * set ipa_ctx->ipa_client_apps_wan_cons_agg_gro + * + * Return value: 0 or negative in case of failure + */ +int ipa3_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count) +{ + int res = -1; + + /* ipahal will adjust limits based on HW capabilities */ + + if (ipa3_ctx) { + ipa3_ctx->ipa_client_apps_wan_cons_agg_gro = true; + return 0; + } + return res; +} + +static void *ipa3_get_ipc_logbuf(void) +{ + if (ipa3_ctx) + return ipa3_ctx->logbuf; + + return NULL; +} + +static void *ipa3_get_ipc_logbuf_low(void) +{ + if (ipa3_ctx) + return ipa3_ctx->logbuf_low; + + return NULL; +} + +static void ipa3_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb) +{ + *holb = ipa3_ctx->ep[ep_idx].holb; +} + +static void ipa3_set_tag_process_before_gating(bool val) +{ + ipa3_ctx->tag_process_before_gating = val; +} + +/** + * ipa3_is_vlan_mode - check if a LAN driver should load in VLAN mode + * @iface - type of vlan capable device + * @res - query result: true for vlan mode, false for non vlan mode + * + * API must be called after ipa_is_ready() returns true, otherwise it will fail + * + * Returns: 0 on success, negative on failure + */ +int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res) +{ + if (!res) { + IPAERR("NULL out param\n"); + return -EINVAL; + } + + if (iface < 0 || iface > IPA_VLAN_IF_MAX) { + IPAERR("invalid iface %d\n", iface); + return -EINVAL; + } + + if (!ipa3_is_ready()) { + IPAERR("IPA is not ready yet\n"); + return -ENODEV; + } + + *res = ipa3_ctx->vlan_mode_iface[iface]; + + IPADBG("Driver %d vlan mode is %d\n", iface, *res); + return 0; +} + +static bool ipa3_pm_is_used(void) +{ + return (ipa3_ctx) ? ipa3_ctx->use_ipa_pm : false; +} + +int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type, + struct ipa_api_controller *api_ctrl) +{ + if (ipa_hw_type < IPA_HW_v3_0) { + IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type); + WARN_ON(1); + return -EPERM; + } + + api_ctrl->ipa_reset_endpoint = NULL; + api_ctrl->ipa_clear_endpoint_delay = ipa3_clear_endpoint_delay; + api_ctrl->ipa_disable_endpoint = NULL; + api_ctrl->ipa_cfg_ep = ipa3_cfg_ep; + api_ctrl->ipa_cfg_ep_nat = ipa3_cfg_ep_nat; + api_ctrl->ipa_cfg_ep_conn_track = ipa3_cfg_ep_conn_track; + api_ctrl->ipa_cfg_ep_hdr = ipa3_cfg_ep_hdr; + api_ctrl->ipa_cfg_ep_hdr_ext = ipa3_cfg_ep_hdr_ext; + api_ctrl->ipa_cfg_ep_mode = ipa3_cfg_ep_mode; + api_ctrl->ipa_cfg_ep_aggr = ipa3_cfg_ep_aggr; + api_ctrl->ipa_cfg_ep_deaggr = ipa3_cfg_ep_deaggr; + api_ctrl->ipa_cfg_ep_route = ipa3_cfg_ep_route; + api_ctrl->ipa_cfg_ep_holb = ipa3_cfg_ep_holb; + api_ctrl->ipa_get_holb = ipa3_get_holb; + api_ctrl->ipa_set_tag_process_before_gating = + ipa3_set_tag_process_before_gating; + api_ctrl->ipa_cfg_ep_cfg = ipa3_cfg_ep_cfg; + api_ctrl->ipa_cfg_ep_metadata_mask = ipa3_cfg_ep_metadata_mask; + api_ctrl->ipa_cfg_ep_holb_by_client = ipa3_cfg_ep_holb_by_client; + api_ctrl->ipa_cfg_ep_ctrl = ipa3_cfg_ep_ctrl; + api_ctrl->ipa_add_hdr = ipa3_add_hdr; + api_ctrl->ipa_add_hdr_usr = ipa3_add_hdr_usr; + api_ctrl->ipa_del_hdr = ipa3_del_hdr; + api_ctrl->ipa_commit_hdr = ipa3_commit_hdr; + api_ctrl->ipa_reset_hdr = ipa3_reset_hdr; + api_ctrl->ipa_get_hdr = ipa3_get_hdr; + api_ctrl->ipa_put_hdr = ipa3_put_hdr; + api_ctrl->ipa_copy_hdr = ipa3_copy_hdr; + api_ctrl->ipa_add_hdr_proc_ctx = ipa3_add_hdr_proc_ctx; + api_ctrl->ipa_del_hdr_proc_ctx = ipa3_del_hdr_proc_ctx; + api_ctrl->ipa_add_rt_rule = ipa3_add_rt_rule; + api_ctrl->ipa_add_rt_rule_usr = ipa3_add_rt_rule_usr; + api_ctrl->ipa_del_rt_rule = ipa3_del_rt_rule; + api_ctrl->ipa_commit_rt = ipa3_commit_rt; + api_ctrl->ipa_reset_rt = ipa3_reset_rt; + api_ctrl->ipa_get_rt_tbl = ipa3_get_rt_tbl; + api_ctrl->ipa_put_rt_tbl = ipa3_put_rt_tbl; + api_ctrl->ipa_query_rt_index = ipa3_query_rt_index; + api_ctrl->ipa_mdfy_rt_rule = ipa3_mdfy_rt_rule; + api_ctrl->ipa_add_flt_rule = ipa3_add_flt_rule; + api_ctrl->ipa_add_flt_rule_usr = ipa3_add_flt_rule_usr; + api_ctrl->ipa_del_flt_rule = ipa3_del_flt_rule; + api_ctrl->ipa_mdfy_flt_rule = ipa3_mdfy_flt_rule; + api_ctrl->ipa_commit_flt = ipa3_commit_flt; + api_ctrl->ipa_reset_flt = ipa3_reset_flt; + api_ctrl->ipa_allocate_nat_device = ipa3_allocate_nat_device; + api_ctrl->ipa_allocate_nat_table = ipa3_allocate_nat_table; + api_ctrl->ipa_allocate_ipv6ct_table = ipa3_allocate_ipv6ct_table; + api_ctrl->ipa_nat_init_cmd = ipa3_nat_init_cmd; + api_ctrl->ipa_ipv6ct_init_cmd = ipa3_ipv6ct_init_cmd; + api_ctrl->ipa_nat_dma_cmd = ipa3_nat_dma_cmd; + api_ctrl->ipa_table_dma_cmd = ipa3_table_dma_cmd; + api_ctrl->ipa_nat_del_cmd = ipa3_nat_del_cmd; + api_ctrl->ipa_del_nat_table = ipa3_del_nat_table; + api_ctrl->ipa_del_ipv6ct_table = ipa3_del_ipv6ct_table; + api_ctrl->ipa_nat_mdfy_pdn = ipa3_nat_mdfy_pdn; + api_ctrl->ipa_send_msg = ipa3_send_msg; + api_ctrl->ipa_register_pull_msg = ipa3_register_pull_msg; + api_ctrl->ipa_deregister_pull_msg = ipa3_deregister_pull_msg; + api_ctrl->ipa_register_intf = ipa3_register_intf; + api_ctrl->ipa_register_intf_ext = ipa3_register_intf_ext; + api_ctrl->ipa_deregister_intf = ipa3_deregister_intf; + api_ctrl->ipa_set_aggr_mode = ipa3_set_aggr_mode; + api_ctrl->ipa_set_qcncm_ndp_sig = ipa3_set_qcncm_ndp_sig; + api_ctrl->ipa_set_single_ndp_per_mbim = ipa3_set_single_ndp_per_mbim; + api_ctrl->ipa_tx_dp = ipa3_tx_dp; + api_ctrl->ipa_tx_dp_mul = ipa3_tx_dp_mul; + api_ctrl->ipa_free_skb = ipa3_free_skb; + api_ctrl->ipa_setup_sys_pipe = ipa3_setup_sys_pipe; + api_ctrl->ipa_teardown_sys_pipe = ipa3_teardown_sys_pipe; + api_ctrl->ipa_sys_setup = ipa3_sys_setup; + api_ctrl->ipa_sys_teardown = ipa3_sys_teardown; + api_ctrl->ipa_sys_update_gsi_hdls = ipa3_sys_update_gsi_hdls; + api_ctrl->ipa_connect_wdi_pipe = ipa3_connect_wdi_pipe; + api_ctrl->ipa_disconnect_wdi_pipe = ipa3_disconnect_wdi_pipe; + api_ctrl->ipa_enable_wdi_pipe = ipa3_enable_wdi_pipe; + api_ctrl->ipa_disable_wdi_pipe = ipa3_disable_wdi_pipe; + api_ctrl->ipa_resume_wdi_pipe = ipa3_resume_wdi_pipe; + api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe; + api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats; + api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes; + api_ctrl->ipa_broadcast_wdi_quota_reach_ind = + ipa3_broadcast_wdi_quota_reach_ind; + api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa; + api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB; + api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB; + api_ctrl->teth_bridge_init = ipa3_teth_bridge_init; + api_ctrl->teth_bridge_disconnect = ipa3_teth_bridge_disconnect; + api_ctrl->teth_bridge_connect = ipa3_teth_bridge_connect; + api_ctrl->ipa_set_client = ipa3_set_client; + api_ctrl->ipa_get_client = ipa3_get_client; + api_ctrl->ipa_get_client_uplink = ipa3_get_client_uplink; + api_ctrl->ipa_dma_init = ipa3_dma_init; + api_ctrl->ipa_dma_enable = ipa3_dma_enable; + api_ctrl->ipa_dma_disable = ipa3_dma_disable; + api_ctrl->ipa_dma_sync_memcpy = ipa3_dma_sync_memcpy; + api_ctrl->ipa_dma_async_memcpy = ipa3_dma_async_memcpy; + api_ctrl->ipa_dma_uc_memcpy = ipa3_dma_uc_memcpy; + api_ctrl->ipa_dma_destroy = ipa3_dma_destroy; + api_ctrl->ipa_mhi_init_engine = ipa3_mhi_init_engine; + api_ctrl->ipa_connect_mhi_pipe = ipa3_connect_mhi_pipe; + api_ctrl->ipa_disconnect_mhi_pipe = ipa3_disconnect_mhi_pipe; + api_ctrl->ipa_mhi_stop_gsi_channel = ipa3_mhi_stop_gsi_channel; + api_ctrl->ipa_uc_mhi_reset_channel = ipa3_uc_mhi_reset_channel; + api_ctrl->ipa_qmi_enable_force_clear_datapath_send = + ipa3_qmi_enable_force_clear_datapath_send; + api_ctrl->ipa_qmi_disable_force_clear_datapath_send = + ipa3_qmi_disable_force_clear_datapath_send; + api_ctrl->ipa_mhi_reset_channel_internal = + ipa3_mhi_reset_channel_internal; + api_ctrl->ipa_mhi_start_channel_internal = + ipa3_mhi_start_channel_internal; + api_ctrl->ipa_mhi_query_ch_info = ipa3_mhi_query_ch_info; + api_ctrl->ipa_mhi_resume_channels_internal = + ipa3_mhi_resume_channels_internal; + api_ctrl->ipa_has_open_aggr_frame = ipa3_has_open_aggr_frame; + api_ctrl->ipa_mhi_destroy_channel = ipa3_mhi_destroy_channel; + api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info = + ipa3_uc_mhi_send_dl_ul_sync_info; + api_ctrl->ipa_uc_mhi_init = ipa3_uc_mhi_init; + api_ctrl->ipa_uc_mhi_suspend_channel = ipa3_uc_mhi_suspend_channel; + api_ctrl->ipa_uc_mhi_stop_event_update_channel = + ipa3_uc_mhi_stop_event_update_channel; + api_ctrl->ipa_uc_mhi_cleanup = ipa3_uc_mhi_cleanup; + api_ctrl->ipa_uc_state_check = ipa3_uc_state_check; + api_ctrl->ipa_write_qmap_id = ipa3_write_qmap_id; + api_ctrl->ipa_add_interrupt_handler = ipa3_add_interrupt_handler; + api_ctrl->ipa_remove_interrupt_handler = ipa3_remove_interrupt_handler; + api_ctrl->ipa_restore_suspend_handler = ipa3_restore_suspend_handler; + api_ctrl->ipa_bam_reg_dump = NULL; + api_ctrl->ipa_get_ep_mapping = ipa3_get_ep_mapping; + api_ctrl->ipa_is_ready = ipa3_is_ready; + api_ctrl->ipa_proxy_clk_vote = ipa3_proxy_clk_vote; + api_ctrl->ipa_proxy_clk_unvote = ipa3_proxy_clk_unvote; + api_ctrl->ipa_is_client_handle_valid = ipa3_is_client_handle_valid; + api_ctrl->ipa_get_client_mapping = ipa3_get_client_mapping; + api_ctrl->ipa_get_rm_resource_from_ep = ipa3_get_rm_resource_from_ep; + api_ctrl->ipa_get_modem_cfg_emb_pipe_flt = + ipa3_get_modem_cfg_emb_pipe_flt; + api_ctrl->ipa_get_transport_type = ipa3_get_transport_type; + api_ctrl->ipa_ap_suspend = ipa3_ap_suspend; + api_ctrl->ipa_ap_resume = ipa3_ap_resume; + api_ctrl->ipa_get_smmu_domain = ipa3_get_smmu_domain; + api_ctrl->ipa_disable_apps_wan_cons_deaggr = + ipa3_disable_apps_wan_cons_deaggr; + api_ctrl->ipa_get_dma_dev = ipa3_get_dma_dev; + api_ctrl->ipa_release_wdi_mapping = ipa3_release_wdi_mapping; + api_ctrl->ipa_create_wdi_mapping = ipa3_create_wdi_mapping; + api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info; + api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel; + api_ctrl->ipa_start_gsi_channel = ipa3_start_gsi_channel; + api_ctrl->ipa_register_ipa_ready_cb = ipa3_register_ipa_ready_cb; + api_ctrl->ipa_inc_client_enable_clks = ipa3_inc_client_enable_clks; + api_ctrl->ipa_dec_client_disable_clks = ipa3_dec_client_disable_clks; + api_ctrl->ipa_inc_client_enable_clks_no_block = + ipa3_inc_client_enable_clks_no_block; + api_ctrl->ipa_suspend_resource_no_block = + ipa3_suspend_resource_no_block; + api_ctrl->ipa_resume_resource = ipa3_resume_resource; + api_ctrl->ipa_suspend_resource_sync = ipa3_suspend_resource_sync; + api_ctrl->ipa_set_required_perf_profile = + ipa3_set_required_perf_profile; + api_ctrl->ipa_get_ipc_logbuf = ipa3_get_ipc_logbuf; + api_ctrl->ipa_get_ipc_logbuf_low = ipa3_get_ipc_logbuf_low; + api_ctrl->ipa_rx_poll = ipa3_rx_poll; + api_ctrl->ipa_recycle_wan_skb = ipa3_recycle_wan_skb; + api_ctrl->ipa_setup_uc_ntn_pipes = ipa3_setup_uc_ntn_pipes; + api_ctrl->ipa_tear_down_uc_offload_pipes = + ipa3_tear_down_uc_offload_pipes; + api_ctrl->ipa_get_pdev = ipa3_get_pdev; + api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa3_ntn_uc_reg_rdyCB; + api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa3_ntn_uc_dereg_rdyCB; + api_ctrl->ipa_conn_wdi_pipes = ipa3_conn_wdi3_pipes; + api_ctrl->ipa_disconn_wdi_pipes = ipa3_disconn_wdi3_pipes; + api_ctrl->ipa_enable_wdi_pipes = ipa3_enable_wdi3_pipes; + api_ctrl->ipa_disable_wdi_pipes = ipa3_disable_wdi3_pipes; + api_ctrl->ipa_tz_unlock_reg = ipa3_tz_unlock_reg; + api_ctrl->ipa_get_smmu_params = ipa3_get_smmu_params; + api_ctrl->ipa_is_vlan_mode = ipa3_is_vlan_mode; + api_ctrl->ipa_pm_is_used = ipa3_pm_is_used; + + return 0; +} + +/** + * ipa_is_modem_pipe()- Checks if pipe is owned by the modem + * + * @pipe_idx: pipe number + * Return value: true if owned by modem, false otherwize + */ +bool ipa_is_modem_pipe(int pipe_idx) +{ + int client_idx; + + if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) { + IPAERR("Bad pipe index!\n"); + return false; + } + + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + if (!IPA_CLIENT_IS_Q6_CONS(client_idx) && + !IPA_CLIENT_IS_Q6_PROD(client_idx)) + continue; + if (ipa3_get_ep_mapping(client_idx) == pipe_idx) + return true; + } + + return false; +} + +static void ipa3_write_rsrc_grp_type_reg(int group_index, + enum ipa_rsrc_grp_type_src n, bool src, + struct ipahal_reg_rsrc_grp_cfg *val) +{ + u8 hw_type_idx; + + hw_type_idx = ipa3_get_hw_type_index(); + + switch (hw_type_idx) { + case IPA_3_0: + if (src) { + switch (group_index) { + case IPA_v3_0_GROUP_UL: + case IPA_v3_0_GROUP_DL: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + case IPA_v3_0_GROUP_DIAG: + case IPA_v3_0_GROUP_DMA: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + case IPA_v3_0_GROUP_Q6ZIP: + case IPA_v3_0_GROUP_UC_RX_Q: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid source resource group,index #%d\n", + group_index); + break; + } + } else { + switch (group_index) { + case IPA_v3_0_GROUP_UL: + case IPA_v3_0_GROUP_DL: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + case IPA_v3_0_GROUP_DIAG: + case IPA_v3_0_GROUP_DMA: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + case IPA_v3_0_GROUP_Q6ZIP_GENERAL: + case IPA_v3_0_GROUP_Q6ZIP_ENGINE: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_45_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid destination resource group,index #%d\n", + group_index); + break; + } + } + break; + case IPA_3_5: + case IPA_3_5_MHI: + case IPA_3_5_1: + if (src) { + switch (group_index) { + case IPA_v3_5_GROUP_LWA_DL: + case IPA_v3_5_GROUP_UL_DL: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + case IPA_v3_5_MHI_GROUP_DMA: + case IPA_v3_5_GROUP_UC_RX_Q: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid source resource group,index #%d\n", + group_index); + break; + } + } else { + switch (group_index) { + case IPA_v3_5_GROUP_LWA_DL: + case IPA_v3_5_GROUP_UL_DL: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + case IPA_v3_5_MHI_GROUP_DMA: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid destination resource group,index #%d\n", + group_index); + break; + } + } + break; + case IPA_4_0: + case IPA_4_0_MHI: + case IPA_4_1: + if (src) { + switch (group_index) { + case IPA_v4_0_GROUP_LWA_DL: + case IPA_v4_0_GROUP_UL_DL: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + case IPA_v4_0_MHI_GROUP_DMA: + case IPA_v4_0_GROUP_UC_RX_Q: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid source resource group,index #%d\n", + group_index); + break; + } + } else { + switch (group_index) { + case IPA_v4_0_GROUP_LWA_DL: + case IPA_v4_0_GROUP_UL_DL: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + case IPA_v4_0_MHI_GROUP_DMA: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid destination resource group,index #%d\n", + group_index); + break; + } + } + break; + case IPA_4_2: + if (src) { + switch (group_index) { + case IPA_v4_2_GROUP_UL_DL: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid source resource group,index #%d\n", + group_index); + break; + } + } else { + switch (group_index) { + case IPA_v4_2_GROUP_UL_DL: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid destination resource group,index #%d\n", + group_index); + break; + } + } + break; + case IPA_4_5: + case IPA_4_5_MHI: + if (src) { + switch (group_index) { + case IPA_v4_5_MHI_GROUP_PCIE: + case IPA_v4_5_GROUP_UL_DL_SRC: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + case IPA_v4_5_MHI_GROUP_DMA: + case IPA_v4_5_MHI_GROUP_QDSS: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + case IPA_v4_5_GROUP_UC_RX_Q: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid source resource group,index #%d\n", + group_index); + break; + } + } else { + switch (group_index) { + case IPA_v4_5_GROUP_UL_DL_DST: + case IPA_v4_5_MHI_GROUP_DDR: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + case IPA_v4_5_MHI_GROUP_DMA: + case IPA_v4_5_MHI_GROUP_QDSS: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + case IPA_v4_5_GROUP_UC_RX_Q: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_45_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid destination resource group,index #%d\n", + group_index); + break; + } + } + break; + + default: + IPAERR("invalid hw type\n"); + WARN_ON(1); + return; + } +} + +static void ipa3_configure_rx_hps_clients(int depth, + int max_clnt_in_depth, int base_index, bool min) +{ + int i; + struct ipahal_reg_rx_hps_clients val; + u8 hw_type_idx; + + hw_type_idx = ipa3_get_hw_type_index(); + + for (i = 0 ; i < max_clnt_in_depth ; i++) { + if (min) + val.client_minmax[i] = + ipa3_rsrc_rx_grp_config + [hw_type_idx] + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] + [i + base_index].min; + else + val.client_minmax[i] = + ipa3_rsrc_rx_grp_config + [hw_type_idx] + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] + [i + base_index].max; + } + if (depth) { + ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_1 : + IPA_RX_HPS_CLIENTS_MAX_DEPTH_1, + &val); + } else { + ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_0 : + IPA_RX_HPS_CLIENTS_MAX_DEPTH_0, + &val); + } +} + +static void ipa3_configure_rx_hps_weight(void) +{ + struct ipahal_reg_rx_hps_weights val; + u8 hw_type_idx; + + hw_type_idx = ipa3_get_hw_type_index(); + + val.hps_queue_weight_0 = + ipa3_rsrc_rx_grp_hps_weight_config + [hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] + [0]; + val.hps_queue_weight_1 = + ipa3_rsrc_rx_grp_hps_weight_config + [hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] + [1]; + val.hps_queue_weight_2 = + ipa3_rsrc_rx_grp_hps_weight_config + [hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] + [2]; + val.hps_queue_weight_3 = + ipa3_rsrc_rx_grp_hps_weight_config + [hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] + [3]; + + ipahal_write_reg_fields(IPA_HPS_FTCH_ARB_QUEUE_WEIGHT, &val); +} + +static void ipa3_configure_rx_hps(void) +{ + int rx_hps_max_clnt_in_depth0; + + IPADBG("Assign RX_HPS CMDQ rsrc groups min-max limits\n"); + + /* Starting IPA4.5 we have 5 RX_HPS_CMDQ */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) + rx_hps_max_clnt_in_depth0 = 4; + else + rx_hps_max_clnt_in_depth0 = 5; + + ipa3_configure_rx_hps_clients(0, rx_hps_max_clnt_in_depth0, 0, true); + ipa3_configure_rx_hps_clients(0, rx_hps_max_clnt_in_depth0, 0, false); + + /* + * IPA 3.0/3.1 uses 6 RX_HPS_CMDQ and needs depths1 for that + * which has two clients + */ + if (ipa3_ctx->ipa_hw_type <= IPA_HW_v3_1) { + ipa3_configure_rx_hps_clients(1, 2, rx_hps_max_clnt_in_depth0, + true); + ipa3_configure_rx_hps_clients(1, 2, rx_hps_max_clnt_in_depth0, + false); + } + + /* Starting IPA4.2 no support to HPS weight config */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5 && + (ipa3_ctx->ipa_hw_type < IPA_HW_v4_2)) + ipa3_configure_rx_hps_weight(); +} + +void ipa3_set_resorce_groups_min_max_limits(void) +{ + int i; + int j; + int src_rsrc_type_max; + int dst_rsrc_type_max; + int src_grp_idx_max; + int dst_grp_idx_max; + struct ipahal_reg_rsrc_grp_cfg val; + u8 hw_type_idx; + + IPADBG("ENTER\n"); + + hw_type_idx = ipa3_get_hw_type_index(); + switch (hw_type_idx) { + case IPA_3_0: + src_rsrc_type_max = IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX; + dst_rsrc_type_max = IPA_v3_0_RSRC_GRP_TYPE_DST_MAX; + src_grp_idx_max = IPA_v3_0_GROUP_MAX; + dst_grp_idx_max = IPA_v3_0_GROUP_MAX; + break; + case IPA_3_5: + case IPA_3_5_MHI: + case IPA_3_5_1: + src_rsrc_type_max = IPA_v3_5_RSRC_GRP_TYPE_SRC_MAX; + dst_rsrc_type_max = IPA_v3_5_RSRC_GRP_TYPE_DST_MAX; + src_grp_idx_max = IPA_v3_5_SRC_GROUP_MAX; + dst_grp_idx_max = IPA_v3_5_DST_GROUP_MAX; + break; + case IPA_4_0: + case IPA_4_0_MHI: + case IPA_4_1: + src_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX; + dst_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_DST_MAX; + src_grp_idx_max = IPA_v4_0_SRC_GROUP_MAX; + dst_grp_idx_max = IPA_v4_0_DST_GROUP_MAX; + break; + case IPA_4_2: + src_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX; + dst_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_DST_MAX; + src_grp_idx_max = IPA_v4_2_SRC_GROUP_MAX; + dst_grp_idx_max = IPA_v4_2_DST_GROUP_MAX; + break; + case IPA_4_5: + case IPA_4_5_MHI: + src_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX; + dst_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_DST_MAX; + src_grp_idx_max = IPA_v4_5_SRC_GROUP_MAX; + dst_grp_idx_max = IPA_v4_5_DST_GROUP_MAX; + break; + default: + IPAERR("invalid hw type index\n"); + WARN_ON(1); + return; + } + + IPADBG("Assign source rsrc groups min-max limits\n"); + for (i = 0; i < src_rsrc_type_max; i++) { + for (j = 0; j < src_grp_idx_max; j = j + 2) { + val.x_min = + ipa3_rsrc_src_grp_config[hw_type_idx][i][j].min; + val.x_max = + ipa3_rsrc_src_grp_config[hw_type_idx][i][j].max; + val.y_min = + ipa3_rsrc_src_grp_config[hw_type_idx][i][j + 1].min; + val.y_max = + ipa3_rsrc_src_grp_config[hw_type_idx][i][j + 1].max; + ipa3_write_rsrc_grp_type_reg(j, i, true, &val); + } + } + + IPADBG("Assign destination rsrc groups min-max limits\n"); + for (i = 0; i < dst_rsrc_type_max; i++) { + for (j = 0; j < dst_grp_idx_max; j = j + 2) { + val.x_min = + ipa3_rsrc_dst_grp_config[hw_type_idx][i][j].min; + val.x_max = + ipa3_rsrc_dst_grp_config[hw_type_idx][i][j].max; + val.y_min = + ipa3_rsrc_dst_grp_config[hw_type_idx][i][j + 1].min; + val.y_max = + ipa3_rsrc_dst_grp_config[hw_type_idx][i][j + 1].max; + ipa3_write_rsrc_grp_type_reg(j, i, false, &val); + } + } + + /* move rx_hps resource group configuration from HLOS to TZ + * on real platform with IPA 3.1 or later + */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v3_1 || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + ipa3_configure_rx_hps(); + } + + IPADBG("EXIT\n"); +} + +static void ipa3_gsi_poll_after_suspend(struct ipa3_ep_context *ep) +{ + bool empty; + + IPADBG("switch ch %ld to poll\n", ep->gsi_chan_hdl); + gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL); + gsi_is_channel_empty(ep->gsi_chan_hdl, &empty); + if (!empty) { + IPADBG("ch %ld not empty\n", ep->gsi_chan_hdl); + /* queue a work to start polling if don't have one */ + atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1); + if (!atomic_read(&ep->sys->curr_polling_state)) + __ipa_gsi_irq_rx_scedule_poll(ep->sys); + } +} + +static int __ipa3_stop_gsi_channel(u32 clnt_hdl) +{ + struct ipa_mem_buffer mem; + int res = 0; + int i; + struct ipa3_ep_context *ep; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + memset(&mem, 0, sizeof(mem)); + + if (IPA_CLIENT_IS_PROD(ep->client)) { + IPADBG("Calling gsi_stop_channel ch:%lu\n", + ep->gsi_chan_hdl); + res = gsi_stop_channel(ep->gsi_chan_hdl); + IPADBG("gsi_stop_channel ch: %lu returned %d\n", + ep->gsi_chan_hdl, res); + return res; + } + + for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) { + IPADBG("Calling gsi_stop_channel ch:%lu\n", + ep->gsi_chan_hdl); + res = gsi_stop_channel(ep->gsi_chan_hdl); + IPADBG("gsi_stop_channel ch: %lu returned %d\n", + ep->gsi_chan_hdl, res); + if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT) + return res; + + IPADBG("Inject a DMA_TASK with 1B packet to IPA\n"); + /* Send a 1B packet DMA_TASK to IPA and try again */ + res = ipa3_inject_dma_task_for_gsi(); + if (res) { + IPAERR("Failed to inject DMA TASk for GSI\n"); + return res; + } + + /* sleep for short period to flush IPA */ + usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC, + IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC); + } + + IPAERR("Failed to stop GSI channel with retries\n"); + return -EFAULT; +} + +/** + * ipa3_stop_gsi_channel()- Stops a GSI channel in IPA + * @chan_hdl: GSI channel handle + * + * This function implements the sequence to stop a GSI channel + * in IPA. This function returns when the channel is in STOP state. + * + * Return value: 0 on success, negative otherwise + */ +int ipa3_stop_gsi_channel(u32 clnt_hdl) +{ + int res; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + res = __ipa3_stop_gsi_channel(clnt_hdl); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return res; +} + +void ipa3_suspend_apps_pipes(bool suspend) +{ + struct ipa_ep_cfg_ctrl cfg; + int ipa_ep_idx; + struct ipa3_ep_context *ep; + int res; + + memset(&cfg, 0, sizeof(cfg)); + cfg.ipa_ep_suspend = suspend; + + ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + if (ipa_ep_idx < 0) { + IPAERR("IPA client mapping failed\n"); + ipa_assert(); + return; + } + ep = &ipa3_ctx->ep[ipa_ep_idx]; + if (ep->valid) { + IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend", + ipa_ep_idx); + /* + * move the channel to callback mode. + * This needs to happen before starting the channel to make + * sure we don't loose any interrupt + */ + if (!suspend && !atomic_read(&ep->sys->curr_polling_state)) + gsi_config_channel_mode(ep->gsi_chan_hdl, + GSI_CHAN_MODE_CALLBACK); + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + if (suspend) { + res = __ipa3_stop_gsi_channel(ipa_ep_idx); + if (res) { + IPAERR("failed to stop LAN channel\n"); + ipa_assert(); + } + } else { + res = gsi_start_channel(ep->gsi_chan_hdl); + if (res) { + IPAERR("failed to start LAN channel\n"); + ipa_assert(); + } + } + } else { + ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg); + } + if (suspend) + ipa3_gsi_poll_after_suspend(ep); + } + + ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + /* Considering the case for SSR. */ + if (ipa_ep_idx == -1) { + IPADBG("Invalid mapping for IPA_CLIENT_APPS_WAN_CONS\n"); + return; + } + ep = &ipa3_ctx->ep[ipa_ep_idx]; + if (ep->valid) { + IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend", + ipa_ep_idx); + /* + * move the channel to callback mode. + * This needs to happen before starting the channel to make + * sure we don't loose any interrupt + */ + if (!suspend && !atomic_read(&ep->sys->curr_polling_state)) + gsi_config_channel_mode(ep->gsi_chan_hdl, + GSI_CHAN_MODE_CALLBACK); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + if (suspend) { + res = __ipa3_stop_gsi_channel(ipa_ep_idx); + if (res) { + IPAERR("failed to stop WAN channel\n"); + ipa_assert(); + } + } else { + res = gsi_start_channel(ep->gsi_chan_hdl); + if (res) { + IPAERR("failed to start WAN channel\n"); + ipa_assert(); + } + } + } else { + ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg); + } + if (suspend) + ipa3_gsi_poll_after_suspend(ep); + } +} + +int ipa3_allocate_dma_task_for_gsi(void) +{ + struct ipahal_imm_cmd_dma_task_32b_addr cmd = { 0 }; + + IPADBG("Allocate mem\n"); + ipa3_ctx->dma_task_info.mem.size = IPA_GSI_CHANNEL_STOP_PKT_SIZE; + ipa3_ctx->dma_task_info.mem.base = dma_alloc_coherent(ipa3_ctx->pdev, + ipa3_ctx->dma_task_info.mem.size, + &ipa3_ctx->dma_task_info.mem.phys_base, + GFP_KERNEL); + if (!ipa3_ctx->dma_task_info.mem.base) { + IPAERR("no mem\n"); + return -EFAULT; + } + + cmd.flsh = true; + cmd.size1 = ipa3_ctx->dma_task_info.mem.size; + cmd.addr1 = ipa3_ctx->dma_task_info.mem.phys_base; + cmd.packet_size = ipa3_ctx->dma_task_info.mem.size; + ipa3_ctx->dma_task_info.cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_TASK_32B_ADDR, &cmd, false); + if (!ipa3_ctx->dma_task_info.cmd_pyld) { + IPAERR("failed to construct dma_task_32b_addr cmd\n"); + dma_free_coherent(ipa3_ctx->pdev, + ipa3_ctx->dma_task_info.mem.size, + ipa3_ctx->dma_task_info.mem.base, + ipa3_ctx->dma_task_info.mem.phys_base); + memset(&ipa3_ctx->dma_task_info, 0, + sizeof(ipa3_ctx->dma_task_info)); + return -EFAULT; + } + + return 0; +} + +void ipa3_free_dma_task_for_gsi(void) +{ + dma_free_coherent(ipa3_ctx->pdev, + ipa3_ctx->dma_task_info.mem.size, + ipa3_ctx->dma_task_info.mem.base, + ipa3_ctx->dma_task_info.mem.phys_base); + ipahal_destroy_imm_cmd(ipa3_ctx->dma_task_info.cmd_pyld); + memset(&ipa3_ctx->dma_task_info, 0, sizeof(ipa3_ctx->dma_task_info)); +} + +/** + * ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel + * + * Send a DMA_TASK of 1B to IPA to unblock GSI channel in STOP_IN_PROG. + * Return value: 0 on success, negative otherwise + */ +int ipa3_inject_dma_task_for_gsi(void) +{ + struct ipa3_desc desc; + + ipa3_init_imm_cmd_desc(&desc, ipa3_ctx->dma_task_info.cmd_pyld); + + IPADBG("sending 1B packet to IPA\n"); + if (ipa3_send_cmd_timeout(1, &desc, + IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC)) { + IPAERR("ipa3_send_cmd failed\n"); + return -EFAULT; + } + + return 0; +} + +static int ipa3_load_single_fw(const struct firmware *firmware, + const struct elf32_phdr *phdr) +{ + uint32_t *fw_mem_base; + int index; + const uint32_t *elf_data_ptr; + + if (phdr->p_offset > firmware->size) { + IPAERR("Invalid ELF: offset=%u is beyond elf_size=%zu\n", + phdr->p_offset, firmware->size); + return -EINVAL; + } + if ((firmware->size - phdr->p_offset) < phdr->p_filesz) { + IPAERR("Invalid ELF: offset=%u filesz=%u elf_size=%zu\n", + phdr->p_offset, phdr->p_filesz, firmware->size); + return -EINVAL; + } + + if (phdr->p_memsz % sizeof(uint32_t)) { + IPAERR("FW mem size %u doesn't align to 32bit\n", + phdr->p_memsz); + return -EFAULT; + } + + if (phdr->p_filesz > phdr->p_memsz) { + IPAERR("FW image too big src_size=%u dst_size=%u\n", + phdr->p_filesz, phdr->p_memsz); + return -EFAULT; + } + + fw_mem_base = ioremap(phdr->p_vaddr, phdr->p_memsz); + if (!fw_mem_base) { + IPAERR("Failed to map 0x%x for the size of %u\n", + phdr->p_vaddr, phdr->p_memsz); + return -ENOMEM; + } + + /* Set the entire region to 0s */ + memset(fw_mem_base, 0, phdr->p_memsz); + + elf_data_ptr = (uint32_t *)(firmware->data + phdr->p_offset); + + /* Write the FW */ + for (index = 0; index < phdr->p_filesz/sizeof(uint32_t); index++) { + writel_relaxed(*elf_data_ptr, &fw_mem_base[index]); + elf_data_ptr++; + } + + iounmap(fw_mem_base); + + return 0; +} + +struct ipa3_hps_dps_areas_info { + u32 dps_abs_addr; + u32 dps_sz; + u32 hps_abs_addr; + u32 hps_sz; +}; + +static void ipa3_get_hps_dps_areas_absolute_addr_and_sz( + struct ipa3_hps_dps_areas_info *info) +{ + u32 dps_area_start; + u32 dps_area_end; + u32 hps_area_start; + u32 hps_area_end; + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) { + dps_area_start = ipahal_get_reg_ofst(IPA_DPS_SEQUENCER_FIRST); + dps_area_end = ipahal_get_reg_ofst(IPA_DPS_SEQUENCER_LAST); + hps_area_start = ipahal_get_reg_ofst(IPA_HPS_SEQUENCER_FIRST); + hps_area_end = ipahal_get_reg_ofst(IPA_HPS_SEQUENCER_LAST); + + info->dps_abs_addr = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + dps_area_start; + info->hps_abs_addr = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + hps_area_start; + } else { + dps_area_start = ipahal_read_reg(IPA_DPS_SEQUENCER_FIRST); + dps_area_end = ipahal_read_reg(IPA_DPS_SEQUENCER_LAST); + hps_area_start = ipahal_read_reg(IPA_HPS_SEQUENCER_FIRST); + hps_area_end = ipahal_read_reg(IPA_HPS_SEQUENCER_LAST); + + info->dps_abs_addr = ipa3_ctx->ipa_wrapper_base + + dps_area_start; + info->hps_abs_addr = ipa3_ctx->ipa_wrapper_base + + hps_area_start; + } + + info->dps_sz = dps_area_end - dps_area_start + sizeof(u32); + info->hps_sz = hps_area_end - hps_area_start + sizeof(u32); + + IPADBG("dps area: start offset=0x%x end offset=0x%x\n", + dps_area_start, dps_area_end); + IPADBG("hps area: start offset=0x%x end offset=0x%x\n", + hps_area_start, hps_area_end); +} + +/** + * emulator_load_single_fw() - load firmware into emulator's memory + * + * @firmware: Structure which contains the FW data from the user space. + * @phdr: ELF program header + * @loc_to_map: physical location to map into virtual space + * @size_to_map: the size of memory to map into virtual space + * + * Return value: 0 on success, negative otherwise + */ +static int emulator_load_single_fw( + const struct firmware *firmware, + const struct elf32_phdr *phdr, + u32 loc_to_map, + u32 size_to_map) +{ + int index; + uint32_t ofb; + const uint32_t *elf_data_ptr; + void __iomem *fw_base; + + IPADBG("firmware(%pK) phdr(%pK) loc_to_map(0x%X) size_to_map(%u)\n", + firmware, phdr, loc_to_map, size_to_map); + + if (phdr->p_offset > firmware->size) { + IPAERR("Invalid ELF: offset=%u is beyond elf_size=%zu\n", + phdr->p_offset, firmware->size); + return -EINVAL; + } + if ((firmware->size - phdr->p_offset) < phdr->p_filesz) { + IPAERR("Invalid ELF: offset=%u filesz=%u elf_size=%zu\n", + phdr->p_offset, phdr->p_filesz, firmware->size); + return -EINVAL; + } + + if (phdr->p_memsz % sizeof(uint32_t)) { + IPAERR("FW mem size %u doesn't align to 32bit\n", + phdr->p_memsz); + return -EFAULT; + } + + if (phdr->p_filesz > phdr->p_memsz) { + IPAERR("FW image too big src_size=%u dst_size=%u\n", + phdr->p_filesz, phdr->p_memsz); + return -EFAULT; + } + + IPADBG("ELF: p_memsz(0x%x) p_filesz(0x%x) p_filesz/4(0x%x)\n", + (uint32_t) phdr->p_memsz, + (uint32_t) phdr->p_filesz, + (uint32_t) (phdr->p_filesz/sizeof(uint32_t))); + + fw_base = ioremap(loc_to_map, size_to_map); + if (!fw_base) { + IPAERR("Failed to map 0x%X for the size of %u\n", + loc_to_map, size_to_map); + return -ENOMEM; + } + + IPADBG("Physical base(0x%X) mapped to virtual (%pK) with len (%u)\n", + loc_to_map, + fw_base, + size_to_map); + + /* Set the entire region to 0s */ + ofb = 0; + for (index = 0; index < phdr->p_memsz/sizeof(uint32_t); index++) { + writel_relaxed(0, fw_base + ofb); + ofb += sizeof(uint32_t); + } + + elf_data_ptr = (uint32_t *)(firmware->data + phdr->p_offset); + + /* Write the FW */ + ofb = 0; + for (index = 0; index < phdr->p_filesz/sizeof(uint32_t); index++) { + writel_relaxed(*elf_data_ptr, fw_base + ofb); + elf_data_ptr++; + ofb += sizeof(uint32_t); + } + + iounmap(fw_base); + + return 0; +} + +/** + * ipa3_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM. + * + * @firmware: Structure which contains the FW data from the user space. + * @gsi_mem_base: GSI base address + * @gsi_ver: GSI Version + * + * Return value: 0 on success, negative otherwise + * + */ +int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base, + enum gsi_ver gsi_ver) +{ + const struct elf32_hdr *ehdr; + const struct elf32_phdr *phdr; + unsigned long gsi_iram_ofst; + unsigned long gsi_iram_size; + int rc; + struct ipa3_hps_dps_areas_info dps_hps_info; + + if (gsi_ver == GSI_VER_ERR) { + IPAERR("Invalid GSI Version\n"); + return -EINVAL; + } + + if (!gsi_mem_base) { + IPAERR("Invalid GSI base address\n"); + return -EINVAL; + } + + ipa_assert_on(!firmware); + /* One program header per FW image: GSI, DPS and HPS */ + if (firmware->size < (sizeof(*ehdr) + 3 * sizeof(*phdr))) { + IPAERR("Missing ELF and Program headers firmware size=%zu\n", + firmware->size); + return -EINVAL; + } + + ehdr = (struct elf32_hdr *) firmware->data; + ipa_assert_on(!ehdr); + if (ehdr->e_phnum != 3) { + IPAERR("Unexpected number of ELF program headers\n"); + return -EINVAL; + } + phdr = (struct elf32_phdr *)(firmware->data + sizeof(*ehdr)); + + /* + * Each ELF program header represents a FW image and contains: + * p_vaddr : The starting address to which the FW needs to loaded. + * p_memsz : The size of the IRAM (where the image loaded) + * p_filesz: The size of the FW image embedded inside the ELF + * p_offset: Absolute offset to the image from the head of the ELF + */ + + /* Load GSI FW image */ + gsi_get_inst_ram_offset_and_size(&gsi_iram_ofst, &gsi_iram_size, + gsi_ver); + if (phdr->p_vaddr != (gsi_mem_base + gsi_iram_ofst)) { + IPAERR( + "Invalid GSI FW img load addr vaddr=0x%x gsi_mem_base=%pa gsi_iram_ofst=0x%lx\n" + , phdr->p_vaddr, &gsi_mem_base, gsi_iram_ofst); + return -EINVAL; + } + if (phdr->p_memsz > gsi_iram_size) { + IPAERR("Invalid GSI FW img size memsz=%d gsi_iram_size=%lu\n", + phdr->p_memsz, gsi_iram_size); + return -EINVAL; + } + rc = ipa3_load_single_fw(firmware, phdr); + if (rc) + return rc; + + phdr++; + ipa3_get_hps_dps_areas_absolute_addr_and_sz(&dps_hps_info); + + /* Load IPA DPS FW image */ + if (phdr->p_vaddr != dps_hps_info.dps_abs_addr) { + IPAERR( + "Invalid IPA DPS img load addr vaddr=0x%x dps_abs_addr=0x%x\n" + , phdr->p_vaddr, dps_hps_info.dps_abs_addr); + return -EINVAL; + } + if (phdr->p_memsz > dps_hps_info.dps_sz) { + IPAERR("Invalid IPA DPS img size memsz=%d dps_area_size=%u\n", + phdr->p_memsz, dps_hps_info.dps_sz); + return -EINVAL; + } + rc = ipa3_load_single_fw(firmware, phdr); + if (rc) + return rc; + + phdr++; + + /* Load IPA HPS FW image */ + if (phdr->p_vaddr != dps_hps_info.hps_abs_addr) { + IPAERR( + "Invalid IPA HPS img load addr vaddr=0x%x hps_abs_addr=0x%x\n" + , phdr->p_vaddr, dps_hps_info.hps_abs_addr); + return -EINVAL; + } + if (phdr->p_memsz > dps_hps_info.hps_sz) { + IPAERR("Invalid IPA HPS img size memsz=%d hps_area_size=%u\n", + phdr->p_memsz, dps_hps_info.hps_sz); + return -EINVAL; + } + rc = ipa3_load_single_fw(firmware, phdr); + if (rc) + return rc; + + IPADBG("IPA FWs (GSI FW, DPS and HPS) loaded successfully\n"); + return 0; +} + +/* + * The following needed for the EMULATION system. On a non-emulation + * system (ie. the real UE), this functionality is done in the + * TZ... + */ + +static void ipa_gsi_setup_reg(void) +{ + u32 reg_val, start; + int i; + const struct ipa_gsi_ep_config *gsi_ep_info_cfg; + enum ipa_client_type type; + + IPADBG("Setting up registers in preparation for firmware download\n"); + + /* setup IPA_ENDP_GSI_CFG_TLV_n reg */ + start = 0; + ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes(); + IPADBG("ipa_num_pipes=%u\n", ipa3_ctx->ipa_num_pipes); + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + type = ipa3_get_client_by_pipe(i); + gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type); + IPAERR("for ep %d client is %d gsi_ep_info_cfg=%pK\n", + i, type, gsi_ep_info_cfg); + if (!gsi_ep_info_cfg) + continue; + reg_val = ((gsi_ep_info_cfg->ipa_if_tlv << 16) & 0x00FF0000); + reg_val += (start & 0xFFFF); + start += gsi_ep_info_cfg->ipa_if_tlv; + ipahal_write_reg_n(IPA_ENDP_GSI_CFG_TLV_n, i, reg_val); + } + + /* setup IPA_ENDP_GSI_CFG_AOS_n reg */ + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + type = ipa3_get_client_by_pipe(i); + gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type); + if (!gsi_ep_info_cfg) + continue; + reg_val = ((gsi_ep_info_cfg->ipa_if_aos << 16) & 0x00FF0000); + reg_val += (start & 0xFFFF); + start += gsi_ep_info_cfg->ipa_if_aos; + ipahal_write_reg_n(IPA_ENDP_GSI_CFG_AOS_n, i, reg_val); + } + + /* setup GSI_MAP_EE_n_CH_k_VP_TABLE reg */ + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + type = ipa3_get_client_by_pipe(i); + gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type); + if (!gsi_ep_info_cfg) + continue; + reg_val = i & 0x1F; + gsi_map_virtual_ch_to_per_ep( + gsi_ep_info_cfg->ee, + gsi_ep_info_cfg->ipa_gsi_chan_num, + reg_val); + } + + /* setup IPA_ENDP_GSI_CFG1_n reg */ + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + type = ipa3_get_client_by_pipe(i); + gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type); + if (!gsi_ep_info_cfg) + continue; + reg_val = (1 << 31) + (1 << 16); + ipahal_write_reg_n(IPA_ENDP_GSI_CFG1_n, i, 1<<16); + ipahal_write_reg_n(IPA_ENDP_GSI_CFG1_n, i, reg_val); + ipahal_write_reg_n(IPA_ENDP_GSI_CFG1_n, i, 1<<16); + } +} + +/** + * emulator_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM. + * + * @firmware: Structure which contains the FW data from the user space. + * @transport_mem_base: Where to load + * @transport_mem_size: Space available to load into + * @gsi_ver: Version of the gsi + * + * Return value: 0 on success, negative otherwise + */ +int emulator_load_fws( + const struct firmware *firmware, + u32 transport_mem_base, + u32 transport_mem_size, + enum gsi_ver gsi_ver) +{ + const struct elf32_hdr *ehdr; + const struct elf32_phdr *phdr; + unsigned long gsi_offset, gsi_ram_size; + struct ipa3_hps_dps_areas_info dps_hps_info; + int rc; + + IPADBG("Loading firmware(%pK)\n", firmware); + + if (!firmware) { + IPAERR("firmware pointer passed to function is NULL\n"); + return -EINVAL; + } + + /* One program header per FW image: GSI, DPS and HPS */ + if (firmware->size < (sizeof(*ehdr) + 3 * sizeof(*phdr))) { + IPAERR( + "Missing ELF and Program headers firmware size=%zu\n", + firmware->size); + return -EINVAL; + } + + ehdr = (struct elf32_hdr *) firmware->data; + + ipa_assert_on(!ehdr); + + if (ehdr->e_phnum != 3) { + IPAERR("Unexpected number of ELF program headers\n"); + return -EINVAL; + } + + ipa3_get_hps_dps_areas_absolute_addr_and_sz(&dps_hps_info); + + /* + * Each ELF program header represents a FW image and contains: + * p_vaddr : The starting address to which the FW needs to loaded. + * p_memsz : The size of the IRAM (where the image loaded) + * p_filesz: The size of the FW image embedded inside the ELF + * p_offset: Absolute offset to the image from the head of the ELF + * + * NOTE WELL: On the emulation platform, the p_vaddr address + * is not relevant and is unused. This is because + * on the emulation platform, the registers' + * address location is mutable, since it's mapped + * in via a PCIe probe. Given this, it is the + * mapped address info that's used while p_vaddr is + * ignored. + */ + phdr = (struct elf32_phdr *)(firmware->data + sizeof(*ehdr)); + + phdr += 2; + + /* + * Attempt to load IPA HPS FW image + */ + if (phdr->p_memsz > dps_hps_info.hps_sz) { + IPAERR("Invalid IPA HPS img size memsz=%d hps_size=%u\n", + phdr->p_memsz, dps_hps_info.hps_sz); + return -EINVAL; + } + IPADBG("Loading HPS FW\n"); + rc = emulator_load_single_fw( + firmware, phdr, + dps_hps_info.hps_abs_addr, dps_hps_info.hps_sz); + if (rc) + return rc; + IPADBG("Loading HPS FW complete\n"); + + --phdr; + + /* + * Attempt to load IPA DPS FW image + */ + if (phdr->p_memsz > dps_hps_info.dps_sz) { + IPAERR("Invalid IPA DPS img size memsz=%d dps_size=%u\n", + phdr->p_memsz, dps_hps_info.dps_sz); + return -EINVAL; + } + IPADBG("Loading DPS FW\n"); + rc = emulator_load_single_fw( + firmware, phdr, + dps_hps_info.dps_abs_addr, dps_hps_info.dps_sz); + if (rc) + return rc; + IPADBG("Loading DPS FW complete\n"); + + /* + * Run gsi register setup which is normally done in TZ on + * non-EMULATION systems... + */ + ipa_gsi_setup_reg(); + + --phdr; + + gsi_get_inst_ram_offset_and_size(&gsi_offset, &gsi_ram_size, gsi_ver); + + /* + * Attempt to load GSI FW image + */ + if (phdr->p_memsz > gsi_ram_size) { + IPAERR( + "Invalid GSI FW img size memsz=%d gsi_ram_size=%u\n", + phdr->p_memsz, gsi_ram_size); + return -EINVAL; + } + IPADBG("Loading GSI FW\n"); + rc = emulator_load_single_fw( + firmware, phdr, + transport_mem_base + (u32) gsi_offset, gsi_ram_size); + if (rc) + return rc; + IPADBG("Loading GSI FW complete\n"); + + IPADBG("IPA FWs (GSI FW, DPS and HPS) loaded successfully\n"); + + return 0; +} + +/** + * ipa3_is_msm_device() - Is the running device a MSM or MDM? + * Determine according to IPA version + * + * Return value: true if MSM, false if MDM + * + */ +bool ipa3_is_msm_device(void) +{ + switch (ipa3_ctx->ipa_hw_type) { + case IPA_HW_v3_0: + case IPA_HW_v3_5: + case IPA_HW_v4_0: + case IPA_HW_v4_5: + return false; + case IPA_HW_v3_1: + case IPA_HW_v3_5_1: + case IPA_HW_v4_1: + case IPA_HW_v4_2: + return true; + default: + IPAERR("unknown HW type %d\n", ipa3_ctx->ipa_hw_type); + ipa_assert(); + } + + return false; +} + +/** + * ipa3_disable_prefetch() - disable\enable tx prefetch + * + * @client: the client which is related to the TX where prefetch will be + * disabled + * + * Return value: Non applicable + * + */ +void ipa3_disable_prefetch(enum ipa_client_type client) +{ + struct ipahal_reg_tx_cfg cfg; + u8 qmb; + + qmb = ipa3_get_qmb_master_sel(client); + + IPADBG("disabling prefetch for qmb %d\n", (int)qmb); + + ipahal_read_reg_fields(IPA_TX_CFG, &cfg); + /* QMB0 (DDR) correlates with TX0, QMB1(PCIE) correlates with TX1 */ + if (qmb == QMB_MASTER_SELECT_DDR) + cfg.tx0_prefetch_disable = true; + else + cfg.tx1_prefetch_disable = true; + ipahal_write_reg_fields(IPA_TX_CFG, &cfg); +} + +/** + * ipa3_get_pdev() - return a pointer to IPA dev struct + * + * Return value: a pointer to IPA dev struct + * + */ +struct device *ipa3_get_pdev(void) +{ + if (!ipa3_ctx) + return NULL; + + return ipa3_ctx->pdev; +} + +/** + * ipa3_enable_dcd() - enable dynamic clock division on IPA + * + * Return value: Non applicable + * + */ +void ipa3_enable_dcd(void) +{ + struct ipahal_reg_idle_indication_cfg idle_indication_cfg; + + /* recommended values for IPA 3.5 according to IPA HPG */ + idle_indication_cfg.const_non_idle_enable = false; + idle_indication_cfg.enter_idle_debounce_thresh = 256; + + ipahal_write_reg_fields(IPA_IDLE_INDICATION_CFG, + &idle_indication_cfg); +} + +void ipa3_init_imm_cmd_desc(struct ipa3_desc *desc, + struct ipahal_imm_cmd_pyld *cmd_pyld) +{ + memset(desc, 0, sizeof(*desc)); + desc->opcode = cmd_pyld->opcode; + desc->pyld = cmd_pyld->data; + desc->len = cmd_pyld->len; + desc->type = IPA_IMM_CMD_DESC; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c new file mode 100644 index 000000000000..1f639ce6a58d --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c @@ -0,0 +1,560 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include "ipa_i.h" +#include + +#define IPA_HW_WDI3_RX_MBOX_START_INDEX 48 +#define IPA_HW_WDI3_TX_MBOX_START_INDEX 50 + +static int ipa3_send_wdi3_setup_pipe_cmd( + u8 is_smmu_enabled, struct ipa_wdi_pipe_setup_info *info, + struct ipa_wdi_pipe_setup_info_smmu *info_smmu, u8 dir) +{ + int ipa_ep_idx; + int result = 0, len; + unsigned long va; + struct ipa_mem_buffer cmd; + struct IpaHwWdi3SetUpCmdData_t *wdi3_params; + struct IpaHwOffloadSetUpCmdData_t *cmd_data; + + if (info == NULL || info_smmu == NULL) { + IPAERR("invalid input\n"); + return -EINVAL; + } + + cmd.size = sizeof(*cmd_data); + cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -ENOMEM; + } + + cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base; + cmd_data->protocol = IPA_HW_FEATURE_WDI3; + + if (!is_smmu_enabled) { + ipa_ep_idx = ipa_get_ep_mapping(info->client); + if (ipa_ep_idx == -1) { + IPAERR("fail to get ep idx.\n"); + return -EFAULT; + } + + IPADBG("client=%d ep=%d\n", info->client, ipa_ep_idx); + IPADBG("ring_base_pa = 0x%pad\n", &info->transfer_ring_base_pa); + IPADBG("ring_size = %hu\n", info->transfer_ring_size); + IPADBG("ring_db_pa = 0x%pad\n", + &info->transfer_ring_doorbell_pa); + IPADBG("evt_ring_base_pa = 0x%pad\n", + &info->event_ring_base_pa); + IPADBG("evt_ring_size = %hu\n", info->event_ring_size); + IPADBG("evt_ring_db_pa = 0x%pad\n", + &info->event_ring_doorbell_pa); + IPADBG("num_pkt_buffers = %hu\n", info->num_pkt_buffers); + IPADBG("pkt_offset = %d\n", info->pkt_offset); + + wdi3_params = &cmd_data->SetupCh_params.Wdi3SetupCh_params; + wdi3_params->transfer_ring_base_pa = + (u32)info->transfer_ring_base_pa; + wdi3_params->transfer_ring_base_pa_hi = + (u32)((u64)info->transfer_ring_base_pa >> 32); + wdi3_params->transfer_ring_size = info->transfer_ring_size; + wdi3_params->transfer_ring_doorbell_pa = + (u32)info->transfer_ring_doorbell_pa; + wdi3_params->transfer_ring_doorbell_pa_hi = + (u32)((u64)info->transfer_ring_doorbell_pa >> 32); + wdi3_params->event_ring_base_pa = (u32)info->event_ring_base_pa; + wdi3_params->event_ring_base_pa_hi = + (u32)((u64)info->event_ring_base_pa >> 32); + wdi3_params->event_ring_size = info->event_ring_size; + wdi3_params->event_ring_doorbell_pa = + (u32)info->event_ring_doorbell_pa; + wdi3_params->event_ring_doorbell_pa_hi = + (u32)((u64)info->event_ring_doorbell_pa >> 32); + wdi3_params->num_pkt_buffers = info->num_pkt_buffers; + wdi3_params->ipa_pipe_number = ipa_ep_idx; + wdi3_params->dir = dir; + wdi3_params->pkt_offset = info->pkt_offset; + memcpy(wdi3_params->desc_format_template, + info->desc_format_template, + sizeof(wdi3_params->desc_format_template)); + } else { + ipa_ep_idx = ipa_get_ep_mapping(info_smmu->client); + if (ipa_ep_idx == -1) { + IPAERR("fail to get ep idx.\n"); + return -EFAULT; + } + + IPADBG("client=%d ep=%d\n", info_smmu->client, ipa_ep_idx); + IPADBG("ring_size = %hu\n", info_smmu->transfer_ring_size); + IPADBG("ring_db_pa = 0x%pad\n", + &info_smmu->transfer_ring_doorbell_pa); + IPADBG("evt_ring_size = %hu\n", info_smmu->event_ring_size); + IPADBG("evt_ring_db_pa = 0x%pad\n", + &info_smmu->event_ring_doorbell_pa); + IPADBG("num_pkt_buffers = %hu\n", info_smmu->num_pkt_buffers); + IPADBG("pkt_offset = %d\n", info_smmu->pkt_offset); + + wdi3_params = &cmd_data->SetupCh_params.Wdi3SetupCh_params; + + if (dir == IPA_WDI3_TX_DIR) { + len = info_smmu->transfer_ring_size; + if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES, + true, info->transfer_ring_base_pa, + &info_smmu->transfer_ring_base, len, + false, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->transfer_ring_base_pa = (u32)va; + wdi3_params->transfer_ring_base_pa_hi = + (u32)((u64)va >> 32); + wdi3_params->transfer_ring_size = len; + + if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_DB_RES, + true, info_smmu->transfer_ring_doorbell_pa, + NULL, 4, true, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->transfer_ring_doorbell_pa = + (u32)va; + wdi3_params->transfer_ring_doorbell_pa_hi = + (u32)((u64)va >> 32); + + len = info_smmu->event_ring_size; + if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES, + true, info->event_ring_base_pa, + &info_smmu->event_ring_base, len, + false, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->event_ring_base_pa = (u32)va; + wdi3_params->event_ring_base_pa_hi = + (u32)((u64)va >> 32); + wdi3_params->event_ring_size = len; + + if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES, + true, info_smmu->event_ring_doorbell_pa, + NULL, 4, true, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->event_ring_doorbell_pa = + (u32)va; + wdi3_params->event_ring_doorbell_pa_hi = + (u32)((u64)va >> 32); + } else { + len = info_smmu->transfer_ring_size; + if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES, + true, info->transfer_ring_base_pa, + &info_smmu->transfer_ring_base, len, + false, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->transfer_ring_base_pa = (u32)va; + wdi3_params->transfer_ring_base_pa_hi = + (u32)((u64)va >> 32); + wdi3_params->transfer_ring_size = len; + + if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES, + true, info_smmu->transfer_ring_doorbell_pa, + NULL, 4, true, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->transfer_ring_doorbell_pa = + (u32)va; + wdi3_params->transfer_ring_doorbell_pa_hi = + (u32)((u64)va >> 32); + + len = info_smmu->event_ring_size; + if (ipa_create_uc_smmu_mapping( + IPA_WDI_RX_COMP_RING_RES, true, + info->event_ring_base_pa, + &info_smmu->event_ring_base, len, + false, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->event_ring_base_pa = (u32)va; + wdi3_params->event_ring_base_pa_hi = + (u32)((u64)va >> 32); + wdi3_params->event_ring_size = len; + + if (ipa_create_uc_smmu_mapping( + IPA_WDI_RX_COMP_RING_WP_RES, true, + info_smmu->event_ring_doorbell_pa, + NULL, 4, true, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->event_ring_doorbell_pa = + (u32)va; + wdi3_params->event_ring_doorbell_pa_hi = + (u32)((u64)va >> 32); + } + wdi3_params->num_pkt_buffers = info_smmu->num_pkt_buffers; + wdi3_params->ipa_pipe_number = ipa_ep_idx; + wdi3_params->dir = dir; + wdi3_params->pkt_offset = info_smmu->pkt_offset; + memcpy(wdi3_params->desc_format_template, + info_smmu->desc_format_template, + sizeof(wdi3_params->desc_format_template)); + } + + result = ipa3_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) { + IPAERR("uc setup channel cmd failed: %d\n", result); + result = -EFAULT; + } + + dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + return result; +} + +int ipa3_conn_wdi3_pipes(struct ipa_wdi_conn_in_params *in, + struct ipa_wdi_conn_out_params *out, + ipa_wdi_meter_notifier_cb wdi_notify) +{ + enum ipa_client_type rx_client; + enum ipa_client_type tx_client; + struct ipa3_ep_context *ep_rx; + struct ipa3_ep_context *ep_tx; + int ipa_ep_idx_rx; + int ipa_ep_idx_tx; + int result = 0; + + if (in == NULL || out == NULL) { + IPAERR("invalid input\n"); + return -EINVAL; + } + + if (in->is_smmu_enabled == false) { + rx_client = in->u_rx.rx.client; + tx_client = in->u_tx.tx.client; + } else { + rx_client = in->u_rx.rx_smmu.client; + tx_client = in->u_tx.tx_smmu.client; + } + + ipa_ep_idx_rx = ipa_get_ep_mapping(rx_client); + ipa_ep_idx_tx = ipa_get_ep_mapping(tx_client); + + if (ipa_ep_idx_rx == -1 || ipa_ep_idx_tx == -1) { + IPAERR("fail to alloc EP.\n"); + return -EFAULT; + } + if (ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES || + ipa_ep_idx_tx >= IPA3_MAX_NUM_PIPES) { + IPAERR("ep out of range.\n"); + return -EFAULT; + } + + ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx]; + ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx]; + + if (ep_rx->valid || ep_tx->valid) { + IPAERR("EP already allocated.\n"); + return -EFAULT; + } + + memset(ep_rx, 0, offsetof(struct ipa3_ep_context, sys)); + memset(ep_tx, 0, offsetof(struct ipa3_ep_context, sys)); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + +#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN + if (wdi_notify) + ipa3_ctx->uc_wdi_ctx.stats_notify = wdi_notify; + else + IPADBG("wdi_notify is null\n"); +#endif + + /* setup rx ep cfg */ + ep_rx->valid = 1; + ep_rx->client = rx_client; + result = ipa3_disable_data_path(ipa_ep_idx_rx); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx_rx); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return -EFAULT; + } + ep_rx->client_notify = in->notify; + ep_rx->priv = in->priv; + + if (in->is_smmu_enabled == false) + memcpy(&ep_rx->cfg, &in->u_rx.rx.ipa_ep_cfg, + sizeof(ep_rx->cfg)); + else + memcpy(&ep_rx->cfg, &in->u_rx.rx_smmu.ipa_ep_cfg, + sizeof(ep_rx->cfg)); + + if (ipa3_cfg_ep(ipa_ep_idx_rx, &ep_rx->cfg)) { + IPAERR("fail to setup rx pipe cfg\n"); + result = -EFAULT; + goto fail; + } + + if (ipa3_send_wdi3_setup_pipe_cmd(in->is_smmu_enabled, + &in->u_rx.rx, &in->u_rx.rx_smmu, IPA_WDI3_RX_DIR)) { + IPAERR("fail to send cmd to uc for rx pipe\n"); + result = -EFAULT; + goto fail; + } + ipa3_install_dflt_flt_rules(ipa_ep_idx_rx); + out->rx_uc_db_pa = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IPA_HW_WDI3_RX_MBOX_START_INDEX/32, + IPA_HW_WDI3_RX_MBOX_START_INDEX % 32); + + IPADBG("client %d (ep: %d) connected\n", rx_client, + ipa_ep_idx_rx); + + /* setup tx ep cfg */ + ep_tx->valid = 1; + ep_tx->client = tx_client; + result = ipa3_disable_data_path(ipa_ep_idx_tx); + if (result) { + IPAERR("disable data path failed res=%d ep=%d.\n", result, + ipa_ep_idx_tx); + result = -EFAULT; + goto fail; + } + + if (in->is_smmu_enabled == false) + memcpy(&ep_tx->cfg, &in->u_tx.tx.ipa_ep_cfg, + sizeof(ep_tx->cfg)); + else + memcpy(&ep_tx->cfg, &in->u_tx.tx_smmu.ipa_ep_cfg, + sizeof(ep_tx->cfg)); + + if (ipa3_cfg_ep(ipa_ep_idx_tx, &ep_tx->cfg)) { + IPAERR("fail to setup tx pipe cfg\n"); + result = -EFAULT; + goto fail; + } + + if (ipa3_send_wdi3_setup_pipe_cmd(in->is_smmu_enabled, + &in->u_tx.tx, &in->u_tx.tx_smmu, IPA_WDI3_TX_DIR)) { + IPAERR("fail to send cmd to uc for tx pipe\n"); + result = -EFAULT; + goto fail; + } + out->tx_uc_db_pa = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IPA_HW_WDI3_TX_MBOX_START_INDEX/32, + IPA_HW_WDI3_TX_MBOX_START_INDEX % 32); + IPADBG("client %d (ep: %d) connected\n", tx_client, + ipa_ep_idx_tx); + +fail: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +static int ipa3_send_wdi3_common_ch_cmd(int ipa_ep_idx, int command) +{ + struct ipa_mem_buffer cmd; + struct IpaHwOffloadCommonChCmdData_t *cmd_data; + union IpaHwWdi3CommonChCmdData_t *wdi3; + int result = 0; + + cmd.size = sizeof(*cmd_data); + cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -ENOMEM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + /* enable the TX pipe */ + cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base; + cmd_data->protocol = IPA_HW_FEATURE_WDI3; + + wdi3 = &cmd_data->CommonCh_params.Wdi3CommonCh_params; + wdi3->params.ipa_pipe_number = ipa_ep_idx; + result = ipa3_uc_send_cmd((u32)(cmd.phys_base), command, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) { + result = -EFAULT; + goto fail; + } + +fail: + dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +int ipa3_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx) +{ + struct ipa3_ep_context *ep_tx, *ep_rx; + int result = 0; + + IPADBG("ep_tx = %d\n", ipa_ep_idx_tx); + IPADBG("ep_rx = %d\n", ipa_ep_idx_rx); + + if (ipa_ep_idx_tx < 0 || ipa_ep_idx_tx >= IPA3_MAX_NUM_PIPES || + ipa_ep_idx_rx < 0 || ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES) { + IPAERR("invalid ipa ep index\n"); + return -EINVAL; + } + + ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx]; + ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx]; + + /* tear down tx pipe */ + if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN)) { + IPAERR("fail to tear down tx pipe\n"); + result = -EFAULT; + goto fail; + } + ipa3_disable_data_path(ipa_ep_idx_tx); + memset(ep_tx, 0, sizeof(struct ipa3_ep_context)); + IPADBG("tx client (ep: %d) disconnected\n", ipa_ep_idx_tx); + + /* tear down rx pipe */ + if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN)) { + IPAERR("fail to tear down rx pipe\n"); + result = -EFAULT; + goto fail; + } + ipa3_disable_data_path(ipa_ep_idx_rx); + ipa3_delete_dflt_flt_rules(ipa_ep_idx_rx); + memset(ep_rx, 0, sizeof(struct ipa3_ep_context)); + IPADBG("rx client (ep: %d) disconnected\n", ipa_ep_idx_rx); + +fail: + return result; +} + +int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx) +{ + struct ipa3_ep_context *ep_tx, *ep_rx; + int result = 0; + + IPADBG("ep_tx = %d\n", ipa_ep_idx_tx); + IPADBG("ep_rx = %d\n", ipa_ep_idx_rx); + + ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx]; + ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx]; + + /* enable tx pipe */ + if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE)) { + IPAERR("fail to enable tx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* resume tx pipe */ + if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_RESUME)) { + IPAERR("fail to resume tx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* enable rx pipe */ + if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE)) { + IPAERR("fail to enable rx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* resume rx pipe */ + if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_RESUME)) { + IPAERR("fail to resume rx pipe\n"); + result = -EFAULT; + goto fail; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + /* enable data path */ + result = ipa3_enable_data_path(ipa_ep_idx_rx); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx_rx); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return -EFAULT; + } + + result = ipa3_enable_data_path(ipa_ep_idx_tx); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx_tx); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + +fail: + return result; +} + +int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx) +{ + struct ipa3_ep_context *ep_tx, *ep_rx; + int result = 0; + + IPADBG("ep_tx = %d\n", ipa_ep_idx_tx); + IPADBG("ep_rx = %d\n", ipa_ep_idx_rx); + + ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx]; + ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx]; + + /* suspend tx pipe */ + if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND)) { + IPAERR("fail to suspend tx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* disable tx pipe */ + if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE)) { + IPAERR("fail to disable tx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* suspend rx pipe */ + if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND)) { + IPAERR("fail to suspend rx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* disable rx pipe */ + if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE)) { + IPAERR("fail to disable rx pipe\n"); + result = -EFAULT; + goto fail; + } + +fail: + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile new file mode 100644 index 000000000000..ba8c2a39b9b3 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_IPA3) += ipa_hal.o + +ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o ipahal_hw_stats.o ipahal_nat.o diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c new file mode 100644 index 000000000000..220447477e58 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c @@ -0,0 +1,1583 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include "ipahal.h" +#include "ipahal_i.h" +#include "ipahal_reg_i.h" +#include "ipahal_fltrt_i.h" +#include "ipahal_hw_stats_i.h" +#include "ipahal_nat_i.h" + +struct ipahal_context *ipahal_ctx; + +static const char *ipahal_imm_cmd_name_to_str[IPA_IMM_CMD_MAX] = { + __stringify(IPA_IMM_CMD_IP_V4_FILTER_INIT), + __stringify(IPA_IMM_CMD_IP_V6_FILTER_INIT), + __stringify(IPA_IMM_CMD_IP_V4_NAT_INIT), + __stringify(IPA_IMM_CMD_IP_V4_ROUTING_INIT), + __stringify(IPA_IMM_CMD_IP_V6_ROUTING_INIT), + __stringify(IPA_IMM_CMD_HDR_INIT_LOCAL), + __stringify(IPA_IMM_CMD_HDR_INIT_SYSTEM), + __stringify(IPA_IMM_CMD_REGISTER_WRITE), + __stringify(IPA_IMM_CMD_NAT_DMA), + __stringify(IPA_IMM_CMD_IP_PACKET_INIT), + __stringify(IPA_IMM_CMD_DMA_SHARED_MEM), + __stringify(IPA_IMM_CMD_IP_PACKET_TAG_STATUS), + __stringify(IPA_IMM_CMD_DMA_TASK_32B_ADDR), + __stringify(IPA_IMM_CMD_TABLE_DMA), + __stringify(IPA_IMM_CMD_IP_V6_CT_INIT) +}; + +static const char *ipahal_pkt_status_exception_to_str + [IPAHAL_PKT_STATUS_EXCEPTION_MAX] = { + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_NONE), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_NAT), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT), +}; + +static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd); + + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_dma_task_32b_addr *data; + struct ipahal_imm_cmd_dma_task_32b_addr *dma_params = + (struct ipahal_imm_cmd_dma_task_32b_addr *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) + return pyld; + + /* Currently supports only one packet */ + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd) + (1 << 8); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_dma_task_32b_addr *)pyld->data; + + if (unlikely(dma_params->size1 & ~0xFFFF)) { + WARN(1, "Size1 is bigger than 16bit width 0x%x\n", + dma_params->size1); + } + if (unlikely(dma_params->packet_size & ~0xFFFF)) { + WARN(1, "Pkt size is bigger than 16bit width 0x%x\n", + dma_params->packet_size); + } + data->cmplt = dma_params->cmplt ? 1 : 0; + data->eof = dma_params->eof ? 1 : 0; + data->flsh = dma_params->flsh ? 1 : 0; + data->lock = dma_params->lock ? 1 : 0; + data->unlock = dma_params->unlock ? 1 : 0; + data->size1 = dma_params->size1; + data->addr1 = dma_params->addr1; + data->packet_size = dma_params->packet_size; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_tag_status( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_packet_tag_status *data; + struct ipahal_imm_cmd_ip_packet_tag_status *tag_params = + (struct ipahal_imm_cmd_ip_packet_tag_status *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_packet_tag_status *)pyld->data; + + if (unlikely(tag_params->tag & ~0xFFFFFFFFFFFF)) { + IPAHAL_ERR("tag is bigger than 48bit width 0x%llx\n", + tag_params->tag); + WARN_ON(1); + } + data->tag = tag_params->tag; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_dma_shared_mem *data; + struct ipahal_imm_cmd_dma_shared_mem *mem_params = + (struct ipahal_imm_cmd_dma_shared_mem *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) + return pyld; + + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_dma_shared_mem *)pyld->data; + + if (unlikely(mem_params->size & ~0xFFFF)) { + WARN(1, "Size is bigger than 16bit width 0x%x\n", + mem_params->size); + } + if (unlikely(mem_params->local_addr & ~0xFFFF)) { + WARN(1, "Local addr is bigger than 16bit width 0x%x\n", + mem_params->local_addr); + } + data->direction = mem_params->is_read ? 1 : 0; + data->size = mem_params->size; + data->local_addr = mem_params->local_addr; + data->system_addr = mem_params->system_addr; + data->skip_pipeline_clear = mem_params->skip_pipeline_clear ? 1 : 0; + switch (mem_params->pipeline_clear_options) { + case IPAHAL_HPS_CLEAR: + data->pipeline_clear_options = 0; + break; + case IPAHAL_SRC_GRP_CLEAR: + data->pipeline_clear_options = 1; + break; + case IPAHAL_FULL_PIPELINE_CLEAR: + data->pipeline_clear_options = 2; + break; + default: + IPAHAL_ERR("unsupported pipline clear option %d\n", + mem_params->pipeline_clear_options); + WARN_ON(1); + } + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem_v_4_0( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 *data; + struct ipahal_imm_cmd_dma_shared_mem *mem_params = + (struct ipahal_imm_cmd_dma_shared_mem *)params; + + if (unlikely(mem_params->size & ~0xFFFF)) { + IPAHAL_ERR("Size is bigger than 16bit width 0x%x\n", + mem_params->size); + WARN_ON(1); + return NULL; + } + if (unlikely(mem_params->local_addr & ~0xFFFF)) { + IPAHAL_ERR("Local addr is bigger than 16bit width 0x%x\n", + mem_params->local_addr); + WARN_ON(1); + return NULL; + } + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + WARN_ON(1); + return pyld; + } + + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 *)pyld->data; + + data->direction = mem_params->is_read ? 1 : 0; + data->clear_after_read = mem_params->clear_after_read; + data->size = mem_params->size; + data->local_addr = mem_params->local_addr; + data->system_addr = mem_params->system_addr; + pyld->opcode |= (mem_params->skip_pipeline_clear ? 1 : 0) << 8; + switch (mem_params->pipeline_clear_options) { + case IPAHAL_HPS_CLEAR: + break; + case IPAHAL_SRC_GRP_CLEAR: + pyld->opcode |= (1 << 9); + break; + case IPAHAL_FULL_PIPELINE_CLEAR: + pyld->opcode |= (2 << 9); + break; + default: + IPAHAL_ERR("unsupported pipline clear option %d\n", + mem_params->pipeline_clear_options); + WARN_ON(1); + } + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_register_write *data; + struct ipahal_imm_cmd_register_write *regwrt_params = + (struct ipahal_imm_cmd_register_write *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_register_write *)pyld->data; + + if (unlikely(regwrt_params->offset & ~0xFFFF)) { + IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n", + regwrt_params->offset); + WARN_ON(1); + } + data->offset = regwrt_params->offset; + data->value = regwrt_params->value; + data->value_mask = regwrt_params->value_mask; + + data->skip_pipeline_clear = regwrt_params->skip_pipeline_clear ? 1 : 0; + switch (regwrt_params->pipeline_clear_options) { + case IPAHAL_HPS_CLEAR: + data->pipeline_clear_options = 0; + break; + case IPAHAL_SRC_GRP_CLEAR: + data->pipeline_clear_options = 1; + break; + case IPAHAL_FULL_PIPELINE_CLEAR: + data->pipeline_clear_options = 2; + break; + default: + IPAHAL_ERR("unsupported pipline clear option %d\n", + regwrt_params->pipeline_clear_options); + WARN_ON(1); + } + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write_v_4_0( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_register_write_v_4_0 *data; + struct ipahal_imm_cmd_register_write *regwrt_params = + (struct ipahal_imm_cmd_register_write *)params; + + if (unlikely(regwrt_params->offset & ~0xFFFF)) { + IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n", + regwrt_params->offset); + WARN_ON(1); + return NULL; + } + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + WARN_ON(1); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_register_write_v_4_0 *)pyld->data; + + data->offset = regwrt_params->offset; + data->offset_high = regwrt_params->offset >> 16; + data->value = regwrt_params->value; + data->value_mask = regwrt_params->value_mask; + + pyld->opcode |= (regwrt_params->skip_pipeline_clear ? 1 : 0) << 8; + switch (regwrt_params->pipeline_clear_options) { + case IPAHAL_HPS_CLEAR: + break; + case IPAHAL_SRC_GRP_CLEAR: + pyld->opcode |= (1 << 9); + break; + case IPAHAL_FULL_PIPELINE_CLEAR: + pyld->opcode |= (2 << 9); + break; + default: + IPAHAL_ERR("unsupported pipline clear option %d\n", + regwrt_params->pipeline_clear_options); + WARN_ON(1); + } + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_init( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_packet_init *data; + struct ipahal_imm_cmd_ip_packet_init *pktinit_params = + (struct ipahal_imm_cmd_ip_packet_init *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_packet_init *)pyld->data; + + if (unlikely(pktinit_params->destination_pipe_index & ~0x1F)) { + IPAHAL_ERR("Dst pipe idx is bigger than 5bit width 0x%x\n", + pktinit_params->destination_pipe_index); + WARN_ON(1); + } + data->destination_pipe_index = pktinit_params->destination_pipe_index; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_nat_dma( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_nat_dma *data; + struct ipahal_imm_cmd_table_dma *nat_params = + (struct ipahal_imm_cmd_table_dma *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_nat_dma *)pyld->data; + + data->table_index = nat_params->table_index; + data->base_addr = nat_params->base_addr; + data->offset = nat_params->offset; + data->data = nat_params->data; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_table_dma_ipav4( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_table_dma_ipav4 *data; + struct ipahal_imm_cmd_table_dma *nat_params = + (struct ipahal_imm_cmd_table_dma *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_table_dma_ipav4 *)pyld->data; + + data->table_index = nat_params->table_index; + data->base_addr = nat_params->base_addr; + data->offset = nat_params->offset; + data->data = nat_params->data; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_system( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_hdr_init_system *data; + struct ipahal_imm_cmd_hdr_init_system *syshdr_params = + (struct ipahal_imm_cmd_hdr_init_system *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_hdr_init_system *)pyld->data; + + data->hdr_table_addr = syshdr_params->hdr_table_addr; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_local( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_hdr_init_local *data; + struct ipahal_imm_cmd_hdr_init_local *lclhdr_params = + (struct ipahal_imm_cmd_hdr_init_local *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_hdr_init_local *)pyld->data; + + if (unlikely(lclhdr_params->size_hdr_table & ~0xFFF)) { + IPAHAL_ERR("Hdr tble size is bigger than 12bit width 0x%x\n", + lclhdr_params->size_hdr_table); + WARN_ON(1); + } + data->hdr_table_addr = lclhdr_params->hdr_table_addr; + data->size_hdr_table = lclhdr_params->size_hdr_table; + data->hdr_addr = lclhdr_params->hdr_addr; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_routing_init( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_v6_routing_init *data; + struct ipahal_imm_cmd_ip_v6_routing_init *rt6_params = + (struct ipahal_imm_cmd_ip_v6_routing_init *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_v6_routing_init *)pyld->data; + + data->hash_rules_addr = rt6_params->hash_rules_addr; + data->hash_rules_size = rt6_params->hash_rules_size; + data->hash_local_addr = rt6_params->hash_local_addr; + data->nhash_rules_addr = rt6_params->nhash_rules_addr; + data->nhash_rules_size = rt6_params->nhash_rules_size; + data->nhash_local_addr = rt6_params->nhash_local_addr; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_routing_init( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_v4_routing_init *data; + struct ipahal_imm_cmd_ip_v4_routing_init *rt4_params = + (struct ipahal_imm_cmd_ip_v4_routing_init *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_v4_routing_init *)pyld->data; + + data->hash_rules_addr = rt4_params->hash_rules_addr; + data->hash_rules_size = rt4_params->hash_rules_size; + data->hash_local_addr = rt4_params->hash_local_addr; + data->nhash_rules_addr = rt4_params->nhash_rules_addr; + data->nhash_rules_size = rt4_params->nhash_rules_size; + data->nhash_local_addr = rt4_params->nhash_local_addr; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_nat_init( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_v4_nat_init *data; + struct ipahal_imm_cmd_ip_v4_nat_init *nat4_params = + (struct ipahal_imm_cmd_ip_v4_nat_init *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_v4_nat_init *)pyld->data; + + data->ipv4_rules_addr = nat4_params->table_init.base_table_addr; + data->ipv4_expansion_rules_addr = + nat4_params->table_init.expansion_table_addr; + data->index_table_addr = nat4_params->index_table_addr; + data->index_table_expansion_addr = + nat4_params->index_table_expansion_addr; + data->table_index = nat4_params->table_init.table_index; + data->ipv4_rules_addr_type = + nat4_params->table_init.base_table_addr_shared ? 1 : 0; + data->ipv4_expansion_rules_addr_type = + nat4_params->table_init.expansion_table_addr_shared ? 1 : 0; + data->index_table_addr_type = + nat4_params->index_table_addr_shared ? 1 : 0; + data->index_table_expansion_addr_type = + nat4_params->index_table_expansion_addr_shared ? 1 : 0; + data->size_base_tables = nat4_params->table_init.size_base_table; + data->size_expansion_tables = + nat4_params->table_init.size_expansion_table; + data->public_addr_info = nat4_params->public_addr_info; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_ct_init( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_v6_ct_init *data; + struct ipahal_imm_cmd_ip_v6_ct_init *ipv6ct_params = + (struct ipahal_imm_cmd_ip_v6_ct_init *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) + return pyld; + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_v6_ct_init *)pyld->data; + + data->table_addr = ipv6ct_params->table_init.base_table_addr; + data->expansion_table_addr = + ipv6ct_params->table_init.expansion_table_addr; + data->table_index = ipv6ct_params->table_init.table_index; + data->table_addr_type = + ipv6ct_params->table_init.base_table_addr_shared ? 1 : 0; + data->expansion_table_addr_type = + ipv6ct_params->table_init.expansion_table_addr_shared ? 1 : 0; + data->size_base_table = ipv6ct_params->table_init.size_base_table; + data->size_expansion_table = + ipv6ct_params->table_init.size_expansion_table; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_filter_init( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_v6_filter_init *data; + struct ipahal_imm_cmd_ip_v6_filter_init *flt6_params = + (struct ipahal_imm_cmd_ip_v6_filter_init *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_v6_filter_init *)pyld->data; + + data->hash_rules_addr = flt6_params->hash_rules_addr; + data->hash_rules_size = flt6_params->hash_rules_size; + data->hash_local_addr = flt6_params->hash_local_addr; + data->nhash_rules_addr = flt6_params->nhash_rules_addr; + data->nhash_rules_size = flt6_params->nhash_rules_size; + data->nhash_local_addr = flt6_params->nhash_local_addr; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_filter_init( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_v4_filter_init *data; + struct ipahal_imm_cmd_ip_v4_filter_init *flt4_params = + (struct ipahal_imm_cmd_ip_v4_filter_init *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_v4_filter_init *)pyld->data; + + data->hash_rules_addr = flt4_params->hash_rules_addr; + data->hash_rules_size = flt4_params->hash_rules_size; + data->hash_local_addr = flt4_params->hash_local_addr; + data->nhash_rules_addr = flt4_params->nhash_rules_addr; + data->nhash_rules_size = flt4_params->nhash_rules_size; + data->nhash_local_addr = flt4_params->nhash_local_addr; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dummy( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + IPAHAL_ERR("no construct function for IMM_CMD=%s, IPA ver %d\n", + ipahal_imm_cmd_name_str(cmd), ipahal_ctx->hw_type); + WARN_ON(1); + return NULL; +} + +/* + * struct ipahal_imm_cmd_obj - immediate command H/W information for + * specific IPA version + * @construct - CB to construct imm command payload from abstracted structure + * @opcode - Immediate command OpCode + */ +struct ipahal_imm_cmd_obj { + struct ipahal_imm_cmd_pyld *(*construct)(enum ipahal_imm_cmd_name cmd, + const void *params, bool is_atomic_ctx); + u16 opcode; +}; + +/* + * This table contains the info regard each immediate command for IPAv3 + * and later. + * Information like: opcode and construct functions. + * All the information on the IMM on IPAv3 are statically defined below. + * If information is missing regard some IMM on some IPA version, + * the init function will fill it with the information from the previous + * IPA version. + * Information is considered missing if all of the fields are 0 + * If opcode is -1, this means that the IMM is removed on the + * specific version + */ +static struct ipahal_imm_cmd_obj + ipahal_imm_cmd_objs[IPA_HW_MAX][IPA_IMM_CMD_MAX] = { + /* IPAv3 */ + [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_FILTER_INIT] = { + ipa_imm_cmd_construct_ip_v4_filter_init, + 3}, + [IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_FILTER_INIT] = { + ipa_imm_cmd_construct_ip_v6_filter_init, + 4}, + [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_NAT_INIT] = { + ipa_imm_cmd_construct_ip_v4_nat_init, + 5}, + [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_ROUTING_INIT] = { + ipa_imm_cmd_construct_ip_v4_routing_init, + 7}, + [IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_ROUTING_INIT] = { + ipa_imm_cmd_construct_ip_v6_routing_init, + 8}, + [IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_LOCAL] = { + ipa_imm_cmd_construct_hdr_init_local, + 9}, + [IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_SYSTEM] = { + ipa_imm_cmd_construct_hdr_init_system, + 10}, + [IPA_HW_v3_0][IPA_IMM_CMD_REGISTER_WRITE] = { + ipa_imm_cmd_construct_register_write, + 12}, + [IPA_HW_v3_0][IPA_IMM_CMD_NAT_DMA] = { + ipa_imm_cmd_construct_nat_dma, + 14}, + [IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_INIT] = { + ipa_imm_cmd_construct_ip_packet_init, + 16}, + [IPA_HW_v3_0][IPA_IMM_CMD_DMA_TASK_32B_ADDR] = { + ipa_imm_cmd_construct_dma_task_32b_addr, + 17}, + [IPA_HW_v3_0][IPA_IMM_CMD_DMA_SHARED_MEM] = { + ipa_imm_cmd_construct_dma_shared_mem, + 19}, + [IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_TAG_STATUS] = { + ipa_imm_cmd_construct_ip_packet_tag_status, + 20}, + + /* IPAv4 */ + [IPA_HW_v4_0][IPA_IMM_CMD_REGISTER_WRITE] = { + ipa_imm_cmd_construct_register_write_v_4_0, + 12}, + /* NAT_DMA was renamed to TABLE_DMA for IPAv4 */ + [IPA_HW_v4_0][IPA_IMM_CMD_NAT_DMA] = { + ipa_imm_cmd_construct_dummy, + -1}, + [IPA_HW_v4_0][IPA_IMM_CMD_TABLE_DMA] = { + ipa_imm_cmd_construct_table_dma_ipav4, + 14}, + [IPA_HW_v4_0][IPA_IMM_CMD_DMA_SHARED_MEM] = { + ipa_imm_cmd_construct_dma_shared_mem_v_4_0, + 19}, + [IPA_HW_v4_0][IPA_IMM_CMD_IP_V6_CT_INIT] = { + ipa_imm_cmd_construct_ip_v6_ct_init, + 23} +}; + +/* + * ipahal_imm_cmd_init() - Build the Immediate command information table + * See ipahal_imm_cmd_objs[][] comments + */ +static int ipahal_imm_cmd_init(enum ipa_hw_type ipa_hw_type) +{ + int i; + int j; + struct ipahal_imm_cmd_obj zero_obj; + + IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type); + + if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) { + IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type); + return -EINVAL; + } + + memset(&zero_obj, 0, sizeof(zero_obj)); + for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) { + for (j = 0; j < IPA_IMM_CMD_MAX ; j++) { + if (!memcmp(&ipahal_imm_cmd_objs[i+1][j], &zero_obj, + sizeof(struct ipahal_imm_cmd_obj))) { + memcpy(&ipahal_imm_cmd_objs[i+1][j], + &ipahal_imm_cmd_objs[i][j], + sizeof(struct ipahal_imm_cmd_obj)); + } else { + /* + * explicitly overridden immediate command. + * Check validity + */ + if (!ipahal_imm_cmd_objs[i+1][j].opcode) { + IPAHAL_ERR( + "imm_cmd=%s with zero opcode ipa_ver=%d\n", + ipahal_imm_cmd_name_str(j), i+1); + WARN_ON(1); + } + if (!ipahal_imm_cmd_objs[i+1][j].construct) { + IPAHAL_ERR( + "imm_cmd=%s with NULL construct func ipa_ver=%d\n", + ipahal_imm_cmd_name_str(j), i+1); + WARN_ON(1); + } + } + } + } + + return 0; +} + +/* + * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd + * @cmd_name: [in] Immediate command name + */ +const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name) +{ + if (cmd_name < 0 || cmd_name >= IPA_IMM_CMD_MAX) { + IPAHAL_ERR("requested name of invalid imm_cmd=%d\n", cmd_name); + return "Invalid IMM_CMD"; + } + + return ipahal_imm_cmd_name_to_str[cmd_name]; +} + +/* + * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command + */ +static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd) +{ + u32 opcode; + + if (cmd >= IPA_IMM_CMD_MAX) { + IPAHAL_ERR("Invalid immediate command imm_cmd=%u\n", cmd); + ipa_assert(); + return -EFAULT; + } + + IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n", + ipahal_imm_cmd_name_str(cmd)); + opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode; + if (opcode == -1) { + IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n", + ipahal_imm_cmd_name_str(cmd)); + ipa_assert(); + return -EFAULT; + } + + return opcode; +} + +/* + * ipahal_construct_imm_cmd() - Construct immdiate command + * This function builds imm cmd bulk that can be be sent to IPA + * The command will be allocated dynamically. + * After done using it, call ipahal_destroy_imm_cmd() to release it + */ +struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + if (!params) { + IPAHAL_ERR("Input error: params=%pK\n", params); + ipa_assert(); + return NULL; + } + + if (cmd >= IPA_IMM_CMD_MAX) { + IPAHAL_ERR("Invalid immediate command %u\n", cmd); + return NULL; + } + + IPAHAL_DBG_LOW("construct IMM_CMD:%s\n", ipahal_imm_cmd_name_str(cmd)); + return ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].construct( + cmd, params, is_atomic_ctx); +} + +/* + * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op + * Core driver may want functionality to inject NOP commands to IPA + * to ensure e.g., PIPLINE clear before someother operation. + * The functionality given by this function can be reached by + * ipahal_construct_imm_cmd(). This function is helper to the core driver + * to reach this NOP functionlity easily. + * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait) + * @pipline_clr_opt: options for pipeline clear waiting + * @is_atomic_ctx: is called in atomic context or can sleep? + */ +struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd( + bool skip_pipline_clear, + enum ipahal_pipeline_clear_option pipline_clr_opt, + bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_register_write cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld; + + memset(&cmd, 0, sizeof(cmd)); + cmd.skip_pipeline_clear = skip_pipline_clear; + cmd.pipeline_clear_options = pipline_clr_opt; + cmd.value_mask = 0x0; + + cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + &cmd, is_atomic_ctx); + + if (!cmd_pyld) + IPAHAL_ERR("failed to construct register_write imm cmd\n"); + + return cmd_pyld; +} + + +/* IPA Packet Status Logic */ + +#define IPA_PKT_STATUS_SET_MSK(__hw_bit_msk, __shft) \ + (status->status_mask |= \ + ((hw_status->status_mask & (__hw_bit_msk) ? 1 : 0) << (__shft))) + +static void ipa_pkt_status_parse( + const void *unparsed_status, struct ipahal_pkt_status *status) +{ + enum ipahal_pkt_status_opcode opcode = 0; + enum ipahal_pkt_status_exception exception_type = 0; + bool is_ipv6; + + struct ipa_pkt_status_hw *hw_status = + (struct ipa_pkt_status_hw *)unparsed_status; + + is_ipv6 = (hw_status->status_mask & 0x80) ? false : true; + + status->pkt_len = hw_status->pkt_len; + status->endp_src_idx = hw_status->endp_src_idx; + status->endp_dest_idx = hw_status->endp_dest_idx; + status->metadata = hw_status->metadata; + status->flt_local = hw_status->flt_local; + status->flt_hash = hw_status->flt_hash; + status->flt_global = hw_status->flt_hash; + status->flt_ret_hdr = hw_status->flt_ret_hdr; + status->flt_miss = ~(hw_status->flt_rule_id) ? false : true; + status->flt_rule_id = hw_status->flt_rule_id; + status->rt_local = hw_status->rt_local; + status->rt_hash = hw_status->rt_hash; + status->ucp = hw_status->ucp; + status->rt_tbl_idx = hw_status->rt_tbl_idx; + status->rt_miss = ~(hw_status->rt_rule_id) ? false : true; + status->rt_rule_id = hw_status->rt_rule_id; + status->nat_hit = hw_status->nat_hit; + status->nat_entry_idx = hw_status->nat_entry_idx; + status->tag_info = hw_status->tag_info; + status->seq_num = hw_status->seq_num; + status->time_of_day_ctr = hw_status->time_of_day_ctr; + status->hdr_local = hw_status->hdr_local; + status->hdr_offset = hw_status->hdr_offset; + status->frag_hit = hw_status->frag_hit; + status->frag_rule = hw_status->frag_rule; + + switch (hw_status->status_opcode) { + case 0x1: + opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET; + break; + case 0x2: + opcode = IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE; + break; + case 0x4: + opcode = IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET; + break; + case 0x8: + opcode = IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET; + break; + case 0x10: + opcode = IPAHAL_PKT_STATUS_OPCODE_LOG; + break; + case 0x20: + opcode = IPAHAL_PKT_STATUS_OPCODE_DCMP; + break; + case 0x40: + opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS; + break; + default: + IPAHAL_ERR("unsupported Status Opcode 0x%x\n", + hw_status->status_opcode); + WARN_ON(1); + } + status->status_opcode = opcode; + + switch (hw_status->nat_type) { + case 0: + status->nat_type = IPAHAL_PKT_STATUS_NAT_NONE; + break; + case 1: + status->nat_type = IPAHAL_PKT_STATUS_NAT_SRC; + break; + case 2: + status->nat_type = IPAHAL_PKT_STATUS_NAT_DST; + break; + default: + IPAHAL_ERR("unsupported Status NAT type 0x%x\n", + hw_status->nat_type); + WARN_ON(1); + } + + switch (hw_status->exception) { + case 0: + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NONE; + break; + case 1: + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR; + break; + case 4: + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE; + break; + case 8: + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH; + break; + case 16: + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS; + break; + case 32: + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT; + break; + case 64: + if (is_ipv6) + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT; + else + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NAT; + break; + default: + IPAHAL_ERR("unsupported Status Exception type 0x%x\n", + hw_status->exception); + WARN_ON(1); + } + status->exception = exception_type; + + IPA_PKT_STATUS_SET_MSK(0x1, IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT); + IPA_PKT_STATUS_SET_MSK(0x2, IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT); + IPA_PKT_STATUS_SET_MSK(0x4, IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT); + IPA_PKT_STATUS_SET_MSK(0x8, IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT); + IPA_PKT_STATUS_SET_MSK(0x10, IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT); + IPA_PKT_STATUS_SET_MSK(0x20, IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT); + IPA_PKT_STATUS_SET_MSK(0x40, + IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT); + IPA_PKT_STATUS_SET_MSK(0x80, IPAHAL_PKT_STATUS_MASK_V4_SHFT); + IPA_PKT_STATUS_SET_MSK(0x100, + IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT); + IPA_PKT_STATUS_SET_MSK(0x200, IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT); + IPA_PKT_STATUS_SET_MSK(0x400, IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT); + IPA_PKT_STATUS_SET_MSK(0x800, + IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT); + IPA_PKT_STATUS_SET_MSK(0x1000, IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT); + IPA_PKT_STATUS_SET_MSK(0x2000, IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT); + IPA_PKT_STATUS_SET_MSK(0x4000, IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT); + IPA_PKT_STATUS_SET_MSK(0x8000, IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT); + status->status_mask &= 0xFFFF; +} + +/* + * struct ipahal_pkt_status_obj - Pakcet Status H/W information for + * specific IPA version + * @size: H/W size of the status packet + * @parse: CB that parses the H/W packet status into the abstracted structure + */ +struct ipahal_pkt_status_obj { + u32 size; + void (*parse)(const void *unparsed_status, + struct ipahal_pkt_status *status); +}; + +/* + * This table contains the info regard packet status for IPAv3 and later + * Information like: size of packet status and parsing function + * All the information on the pkt Status on IPAv3 are statically defined below. + * If information is missing regard some IPA version, the init function + * will fill it with the information from the previous IPA version. + * Information is considered missing if all of the fields are 0 + */ +static struct ipahal_pkt_status_obj ipahal_pkt_status_objs[IPA_HW_MAX] = { + /* IPAv3 */ + [IPA_HW_v3_0] = { + IPA3_0_PKT_STATUS_SIZE, + ipa_pkt_status_parse, + }, +}; + +/* + * ipahal_pkt_status_init() - Build the packet status information array + * for the different IPA versions + * See ipahal_pkt_status_objs[] comments + */ +static int ipahal_pkt_status_init(enum ipa_hw_type ipa_hw_type) +{ + int i; + struct ipahal_pkt_status_obj zero_obj; + + IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type); + + if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) { + IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type); + return -EINVAL; + } + + /* + * Since structure alignment is implementation dependent, + * add test to avoid different and incompatible data layouts. + * + * In case new H/W has different size or structure of status packet, + * add a compile time validty check for it like below (as well as + * the new defines and/or the new strucutre in the internal header). + */ + BUILD_BUG_ON(sizeof(struct ipa_pkt_status_hw) != + IPA3_0_PKT_STATUS_SIZE); + + memset(&zero_obj, 0, sizeof(zero_obj)); + for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) { + if (!memcmp(&ipahal_pkt_status_objs[i+1], &zero_obj, + sizeof(struct ipahal_pkt_status_obj))) { + memcpy(&ipahal_pkt_status_objs[i+1], + &ipahal_pkt_status_objs[i], + sizeof(struct ipahal_pkt_status_obj)); + } else { + /* + * explicitly overridden Packet Status info + * Check validity + */ + if (!ipahal_pkt_status_objs[i+1].size) { + IPAHAL_ERR( + "Packet Status with zero size ipa_ver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_pkt_status_objs[i+1].parse) { + IPAHAL_ERR( + "Packet Status without Parse func ipa_ver=%d\n", + i+1); + WARN_ON(1); + } + } + } + + return 0; +} + +/* + * ipahal_pkt_status_get_size() - Get H/W size of packet status + */ +u32 ipahal_pkt_status_get_size(void) +{ + return ipahal_pkt_status_objs[ipahal_ctx->hw_type].size; +} + +/* + * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form + * @unparsed_status: Pointer to H/W format of the packet status as read from H/W + * @status: Pointer to pre-allocated buffer where the parsed info will be stored + */ +void ipahal_pkt_status_parse(const void *unparsed_status, + struct ipahal_pkt_status *status) +{ + if (!unparsed_status || !status) { + IPAHAL_ERR("Input Error: unparsed_status=%pK status=%pK\n", + unparsed_status, status); + return; + } + + IPAHAL_DBG_LOW("Parse Status Packet\n"); + memset(status, 0, sizeof(*status)); + ipahal_pkt_status_objs[ipahal_ctx->hw_type].parse(unparsed_status, + status); +} + +/* + * ipahal_pkt_status_exception_str() - returns string represents exception type + * @exception: [in] The exception type + */ +const char *ipahal_pkt_status_exception_str( + enum ipahal_pkt_status_exception exception) +{ + if (exception < 0 || exception >= IPAHAL_PKT_STATUS_EXCEPTION_MAX) { + IPAHAL_ERR( + "requested string of invalid pkt_status exception=%d\n", + exception); + return "Invalid PKT_STATUS_EXCEPTION"; + } + + return ipahal_pkt_status_exception_to_str[exception]; +} + +#ifdef CONFIG_DEBUG_FS +static void ipahal_debugfs_init(void) +{ + ipahal_ctx->dent = debugfs_create_dir("ipahal", 0); + if (!ipahal_ctx->dent || IS_ERR(ipahal_ctx->dent)) { + IPAHAL_ERR("fail to create ipahal debugfs folder\n"); + goto fail; + } + + return; +fail: + debugfs_remove_recursive(ipahal_ctx->dent); + ipahal_ctx->dent = NULL; +} + +static void ipahal_debugfs_remove(void) +{ + if (!ipahal_ctx) + return; + + if (IS_ERR(ipahal_ctx->dent)) { + IPAHAL_ERR("ipahal debugfs folder was not created\n"); + return; + } + + debugfs_remove_recursive(ipahal_ctx->dent); +} +#else /* CONFIG_DEBUG_FS */ +static void ipahal_debugfs_init(void) {} +static void ipahal_debugfs_remove(void) {} +#endif /* CONFIG_DEBUG_FS */ + +/* + * ipahal_cp_hdr_to_hw_buff_v3() - copy header to hardware buffer according to + * base address and offset given. + * @base: dma base address + * @offset: offset from base address where the data will be copied + * @hdr: the header to be copied + * @hdr_len: the length of the header + */ +static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset, + u8 *const hdr, u32 hdr_len) +{ + memcpy(base + offset, hdr, hdr_len); +} + +/* + * ipahal_cp_proc_ctx_to_hw_buff_v3() - copy processing context to + * base address and offset given. + * @type: header processing context type (no processing context, + * IPA_HDR_PROC_ETHII_TO_ETHII etc.) + * @base: dma base address + * @offset: offset from base address where the data will be copied + * @hdr_len: the length of the header + * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr + * @phys_base: memory location in DDR + * @hdr_base_addr: base address in table + * @offset_entry: offset from hdr_base_addr in table + * @l2tp_params: l2tp parameters + */ +static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type, + void *const base, u32 offset, + u32 hdr_len, bool is_hdr_proc_ctx, + dma_addr_t phys_base, u32 hdr_base_addr, + struct ipa_hdr_offset_entry *offset_entry, + struct ipa_l2tp_hdr_proc_ctx_params l2tp_params) +{ + if (type == IPA_HDR_PROC_NONE) { + struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx; + + ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_seq *) + (base + offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; + ctx->hdr_add.tlv.length = 1; + ctx->hdr_add.tlv.value = hdr_len; + ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base : + hdr_base_addr + offset_entry->offset; + IPAHAL_DBG("header address 0x%x\n", + ctx->hdr_add.hdr_addr); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; + } else if (type == IPA_HDR_PROC_L2TP_HEADER_ADD) { + struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *ctx; + + ctx = (struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *) + (base + offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; + ctx->hdr_add.tlv.length = 1; + ctx->hdr_add.tlv.value = hdr_len; + ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base : + hdr_base_addr + offset_entry->offset; + IPAHAL_DBG("header address 0x%x\n", + ctx->hdr_add.hdr_addr); + ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD; + ctx->l2tp_params.tlv.length = 1; + ctx->l2tp_params.tlv.value = + IPA_HDR_UCP_L2TP_HEADER_ADD; + ctx->l2tp_params.l2tp_params.eth_hdr_retained = + l2tp_params.hdr_add_param.eth_hdr_retained; + ctx->l2tp_params.l2tp_params.input_ip_version = + l2tp_params.hdr_add_param.input_ip_version; + ctx->l2tp_params.l2tp_params.output_ip_version = + l2tp_params.hdr_add_param.output_ip_version; + + IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; + } else if (type == IPA_HDR_PROC_L2TP_HEADER_REMOVE) { + struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *ctx; + + ctx = (struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *) + (base + offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; + ctx->hdr_add.tlv.length = 1; + ctx->hdr_add.tlv.value = hdr_len; + ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base : + hdr_base_addr + offset_entry->offset; + IPAHAL_DBG("header address 0x%x length %d\n", + ctx->hdr_add.hdr_addr, ctx->hdr_add.tlv.value); + ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD; + ctx->l2tp_params.tlv.length = 1; + ctx->l2tp_params.tlv.value = + IPA_HDR_UCP_L2TP_HEADER_REMOVE; + ctx->l2tp_params.l2tp_params.hdr_len_remove = + l2tp_params.hdr_remove_param.hdr_len_remove; + ctx->l2tp_params.l2tp_params.eth_hdr_retained = + l2tp_params.hdr_remove_param.eth_hdr_retained; + ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size_valid = + l2tp_params.hdr_remove_param.hdr_ofst_pkt_size_valid; + ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size = + l2tp_params.hdr_remove_param.hdr_ofst_pkt_size; + ctx->l2tp_params.l2tp_params.hdr_endianness = + l2tp_params.hdr_remove_param.hdr_endianness; + IPAHAL_DBG("hdr ofst valid: %d, hdr ofst pkt size: %d\n", + ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size_valid, + ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size); + IPAHAL_DBG("endianness: %d\n", + ctx->l2tp_params.l2tp_params.hdr_endianness); + + IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; + } else { + struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *ctx; + + ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *) + (base + offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; + ctx->hdr_add.tlv.length = 1; + ctx->hdr_add.tlv.value = hdr_len; + ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base : + hdr_base_addr + offset_entry->offset; + IPAHAL_DBG("header address 0x%x\n", + ctx->hdr_add.hdr_addr); + ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD; + ctx->cmd.length = 0; + switch (type) { + case IPA_HDR_PROC_ETHII_TO_ETHII: + ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_ETHII; + break; + case IPA_HDR_PROC_ETHII_TO_802_3: + ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_802_3; + break; + case IPA_HDR_PROC_802_3_TO_ETHII: + ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII; + break; + case IPA_HDR_PROC_802_3_TO_802_3: + ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3; + break; + default: + IPAHAL_ERR("unknown ipa_hdr_proc_type %d", type); + WARN_ON(1); + return -EINVAL; + } + IPAHAL_DBG("command id %d\n", ctx->cmd.value); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; + } + + return 0; +} + +/* + * ipahal_get_proc_ctx_needed_len_v3() - calculates the needed length for + * addition of header processing context according to the type of processing + * context. + * @type: header processing context type (no processing context, + * IPA_HDR_PROC_ETHII_TO_ETHII etc.) + */ +static int ipahal_get_proc_ctx_needed_len_v3(enum ipa_hdr_proc_type type) +{ + return (type == IPA_HDR_PROC_NONE) ? + sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_seq) : + sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq); +} + +/* + * struct ipahal_hdr_funcs - headers handling functions for specific IPA + * version + * @ipahal_cp_hdr_to_hw_buff - copy function for regular headers + */ +struct ipahal_hdr_funcs { + void (*ipahal_cp_hdr_to_hw_buff)(void *const base, u32 offset, + u8 *const hdr, u32 hdr_len); + + int (*ipahal_cp_proc_ctx_to_hw_buff)(enum ipa_hdr_proc_type type, + void *const base, u32 offset, u32 hdr_len, + bool is_hdr_proc_ctx, dma_addr_t phys_base, + u32 hdr_base_addr, + struct ipa_hdr_offset_entry *offset_entry, + struct ipa_l2tp_hdr_proc_ctx_params l2tp_params); + + int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type); +}; + +static struct ipahal_hdr_funcs hdr_funcs; + +static void ipahal_hdr_init(enum ipa_hw_type ipa_hw_type) +{ + + IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type); + + /* + * once there are changes in HW and need to use different case, insert + * new case for the new h/w. put the default always for the latest HW + * and make sure all previous supported versions have their cases. + */ + switch (ipa_hw_type) { + case IPA_HW_v3_0: + default: + hdr_funcs.ipahal_cp_hdr_to_hw_buff = + ipahal_cp_hdr_to_hw_buff_v3; + hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff = + ipahal_cp_proc_ctx_to_hw_buff_v3; + hdr_funcs.ipahal_get_proc_ctx_needed_len = + ipahal_get_proc_ctx_needed_len_v3; + } + IPAHAL_DBG("Exit\n"); +} + +/* + * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to + * base address and offset given. + * @base: dma base address + * @offset: offset from base address where the data will be copied + * @hdr: the header to be copied + * @hdr_len: the length of the header + */ +void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *const hdr, + u32 hdr_len) +{ + IPAHAL_DBG_LOW("Entry\n"); + IPAHAL_DBG("base %pK, offset %d, hdr %pK, hdr_len %d\n", base, + offset, hdr, hdr_len); + if (!base || !hdr_len || !hdr) { + IPAHAL_ERR("failed on validating params\n"); + return; + } + + hdr_funcs.ipahal_cp_hdr_to_hw_buff(base, offset, hdr, hdr_len); + + IPAHAL_DBG_LOW("Exit\n"); +} + +/* + * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to + * base address and offset given. + * @type: type of header processing context + * @base: dma base address + * @offset: offset from base address where the data will be copied + * @hdr_len: the length of the header + * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr + * @phys_base: memory location in DDR + * @hdr_base_addr: base address in table + * @offset_entry: offset from hdr_base_addr in table + * @l2tp_params: l2tp parameters + */ +int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type, + void *const base, u32 offset, u32 hdr_len, + bool is_hdr_proc_ctx, dma_addr_t phys_base, + u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry, + struct ipa_l2tp_hdr_proc_ctx_params l2tp_params) +{ + IPAHAL_DBG( + "type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %d, offset_entry %pK\n" + , type, base, offset, hdr_len, is_hdr_proc_ctx, + hdr_base_addr, offset_entry); + + if (!base || + !hdr_len || + (is_hdr_proc_ctx && !phys_base) || + (!is_hdr_proc_ctx && !offset_entry) || + (!is_hdr_proc_ctx && !hdr_base_addr)) { + IPAHAL_ERR( + "invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%u is_hdr_proc_ctx:%d offset_entry:%pK\n" + , hdr_len, &phys_base, hdr_base_addr + , is_hdr_proc_ctx, offset_entry); + return -EINVAL; + } + + return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset, + hdr_len, is_hdr_proc_ctx, phys_base, + hdr_base_addr, offset_entry, l2tp_params); +} + +/* + * ipahal_get_proc_ctx_needed_len() - calculates the needed length for + * addition of header processing context according to the type of processing + * context + * @type: header processing context type (no processing context, + * IPA_HDR_PROC_ETHII_TO_ETHII etc.) + */ +int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type) +{ + int res; + + IPAHAL_DBG("entry\n"); + + res = hdr_funcs.ipahal_get_proc_ctx_needed_len(type); + + IPAHAL_DBG("Exit\n"); + + return res; +} + +int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base, + struct device *ipa_pdev) +{ + int result; + + IPAHAL_DBG("Entry - IPA HW TYPE=%d base=%pK ipa_pdev=%pK\n", + ipa_hw_type, base, ipa_pdev); + + ipahal_ctx = kzalloc(sizeof(*ipahal_ctx), GFP_KERNEL); + if (!ipahal_ctx) { + IPAHAL_ERR("kzalloc err for ipahal_ctx\n"); + result = -ENOMEM; + goto bail_err_exit; + } + + if (ipa_hw_type < IPA_HW_v3_0) { + IPAHAL_ERR("ipahal supported on IPAv3 and later only\n"); + result = -EINVAL; + goto bail_free_ctx; + } + + if (ipa_hw_type >= IPA_HW_MAX) { + IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type); + result = -EINVAL; + goto bail_free_ctx; + } + + if (!base) { + IPAHAL_ERR("invalid memory io mapping addr\n"); + result = -EINVAL; + goto bail_free_ctx; + } + + if (!ipa_pdev) { + IPAHAL_ERR("invalid IPA platform device\n"); + result = -EINVAL; + goto bail_free_ctx; + } + + ipahal_ctx->hw_type = ipa_hw_type; + ipahal_ctx->base = base; + ipahal_ctx->ipa_pdev = ipa_pdev; + + if (ipahal_reg_init(ipa_hw_type)) { + IPAHAL_ERR("failed to init ipahal reg\n"); + result = -EFAULT; + goto bail_free_ctx; + } + + if (ipahal_imm_cmd_init(ipa_hw_type)) { + IPAHAL_ERR("failed to init ipahal imm cmd\n"); + result = -EFAULT; + goto bail_free_ctx; + } + + if (ipahal_pkt_status_init(ipa_hw_type)) { + IPAHAL_ERR("failed to init ipahal pkt status\n"); + result = -EFAULT; + goto bail_free_ctx; + } + + ipahal_hdr_init(ipa_hw_type); + + if (ipahal_fltrt_init(ipa_hw_type)) { + IPAHAL_ERR("failed to init ipahal flt rt\n"); + result = -EFAULT; + goto bail_free_ctx; + } + + if (ipahal_hw_stats_init(ipa_hw_type)) { + IPAHAL_ERR("failed to init ipahal hw stats\n"); + result = -EFAULT; + goto bail_free_fltrt; + } + + if (ipahal_nat_init(ipa_hw_type)) { + IPAHAL_ERR("failed to init ipahal NAT\n"); + result = -EFAULT; + goto bail_free_fltrt; + } + + /* create an IPC buffer for the registers dump */ + ipahal_ctx->regdumpbuf = ipc_log_context_create(IPAHAL_IPC_LOG_PAGES, + "ipa_regs", 0); + if (ipahal_ctx->regdumpbuf == NULL) + IPAHAL_ERR("failed to create IPA regdump log, continue...\n"); + + ipahal_debugfs_init(); + + return 0; + +bail_free_fltrt: + ipahal_fltrt_destroy(); +bail_free_ctx: + if (ipahal_ctx->regdumpbuf) + ipc_log_context_destroy(ipahal_ctx->regdumpbuf); + kfree(ipahal_ctx); + ipahal_ctx = NULL; +bail_err_exit: + return result; +} + +void ipahal_destroy(void) +{ + IPAHAL_DBG("Entry\n"); + ipahal_fltrt_destroy(); + ipahal_debugfs_remove(); + kfree(ipahal_ctx); + ipahal_ctx = NULL; +} + +void ipahal_free_dma_mem(struct ipa_mem_buffer *mem) +{ + if (likely(mem)) { + dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base, + mem->phys_base); + mem->size = 0; + mem->base = NULL; + mem->phys_base = 0; + } +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h new file mode 100644 index 000000000000..2fa6b2abc3a7 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h @@ -0,0 +1,653 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPAHAL_H_ +#define _IPAHAL_H_ + +#include +#include "../../ipa_common_i.h" + +/* + * Immediate command names + * + * NOTE:: Any change to this enum, need to change to ipahal_imm_cmd_name_to_str + * array as well. + */ +enum ipahal_imm_cmd_name { + IPA_IMM_CMD_IP_V4_FILTER_INIT, + IPA_IMM_CMD_IP_V6_FILTER_INIT, + IPA_IMM_CMD_IP_V4_NAT_INIT, + IPA_IMM_CMD_IP_V4_ROUTING_INIT, + IPA_IMM_CMD_IP_V6_ROUTING_INIT, + IPA_IMM_CMD_HDR_INIT_LOCAL, + IPA_IMM_CMD_HDR_INIT_SYSTEM, + IPA_IMM_CMD_REGISTER_WRITE, + IPA_IMM_CMD_NAT_DMA, + IPA_IMM_CMD_IP_PACKET_INIT, + IPA_IMM_CMD_DMA_SHARED_MEM, + IPA_IMM_CMD_IP_PACKET_TAG_STATUS, + IPA_IMM_CMD_DMA_TASK_32B_ADDR, + IPA_IMM_CMD_TABLE_DMA, + IPA_IMM_CMD_IP_V6_CT_INIT, + IPA_IMM_CMD_MAX, +}; + +/* Immediate commands abstracted structures */ + +/* + * struct ipahal_imm_cmd_ip_v4_filter_init - IP_V4_FILTER_INIT cmd payload + * Inits IPv4 filter block. + * @hash_rules_addr: Addr in sys mem where ipv4 hashable flt tbl starts + * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should + * be copied to + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should + * be copied to + */ +struct ipahal_imm_cmd_ip_v4_filter_init { + u64 hash_rules_addr; + u64 nhash_rules_addr; + u32 hash_rules_size; + u32 hash_local_addr; + u32 nhash_rules_size; + u32 nhash_local_addr; +}; + +/* + * struct ipahal_imm_cmd_ip_v6_filter_init - IP_V6_FILTER_INIT cmd payload + * Inits IPv6 filter block. + * @hash_rules_addr: Addr in sys mem where ipv6 hashable flt tbl starts + * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should + * be copied to + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should + * be copied to + */ +struct ipahal_imm_cmd_ip_v6_filter_init { + u64 hash_rules_addr; + u64 nhash_rules_addr; + u32 hash_rules_size; + u32 hash_local_addr; + u32 nhash_rules_size; + u32 nhash_local_addr; +}; + +/* + * struct ipahal_imm_cmd_nat_ipv6ct_init_common - NAT/IPv6CT table init command + * common part + * @base_table_addr: Address in sys/shared mem where base table start + * @expansion_table_addr: Address in sys/shared mem where expansion table + * starts. Entries that result in hash collision are located in this table. + * @base_table_addr_shared: base_table_addr in shared mem (if not, then sys) + * @expansion_table_addr_shared: expansion_rules_addr in + * shared mem (if not, then sys) + * @size_base_table: Num of entries in the base table + * @size_expansion_table: Num of entries in the expansion table + * @table_index: For future support of multiple tables + */ +struct ipahal_imm_cmd_nat_ipv6ct_init_common { + u64 base_table_addr; + u64 expansion_table_addr; + bool base_table_addr_shared; + bool expansion_table_addr_shared; + u16 size_base_table; + u16 size_expansion_table; + u8 table_index; +}; + +/* + * struct ipahal_imm_cmd_ip_v4_nat_init - IP_V4_NAT_INIT cmd payload + * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location + * cache address and other related parameters. + * @table_init: table initialization parameters + * @index_table_addr: Addr in sys/shared mem where index table, which points + * to NAT table starts + * @index_table_expansion_addr: Addr in sys/shared mem where expansion index + * table starts + * @index_table_addr_shared: index_table_addr in shared mem (if not, then sys) + * @index_table_expansion_addr_shared: index_table_expansion_addr in + * shared mem (if not, then sys) + * @public_addr_info: Public IP addresses info suitable to the IPA H/W version + * IPA H/W >= 4.0 - PDN config table offset in SMEM + * IPA H/W < 4.0 - The public IP address + */ +struct ipahal_imm_cmd_ip_v4_nat_init { + struct ipahal_imm_cmd_nat_ipv6ct_init_common table_init; + u64 index_table_addr; + u64 index_table_expansion_addr; + bool index_table_addr_shared; + bool index_table_expansion_addr_shared; + u32 public_addr_info; +}; + +/* + * struct ipahal_imm_cmd_ip_v6_ct_init - IP_V6_CONN_TRACK_INIT cmd payload + * Inits IPv6CT block. Initiate IPv6CT table with it dimensions, location + * cache address and other related parameters. + * @table_init: table initialization parameters + */ +struct ipahal_imm_cmd_ip_v6_ct_init { + struct ipahal_imm_cmd_nat_ipv6ct_init_common table_init; +}; + +/* + * struct ipahal_imm_cmd_ip_v4_routing_init - IP_V4_ROUTING_INIT cmd payload + * Inits IPv4 routing table/structure - with the rules and other related params + * @hash_rules_addr: Addr in sys mem where ipv4 hashable rt tbl starts + * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should + * be copied to + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should + * be copied to + */ +struct ipahal_imm_cmd_ip_v4_routing_init { + u64 hash_rules_addr; + u64 nhash_rules_addr; + u32 hash_rules_size; + u32 hash_local_addr; + u32 nhash_rules_size; + u32 nhash_local_addr; +}; + +/* + * struct ipahal_imm_cmd_ip_v6_routing_init - IP_V6_ROUTING_INIT cmd payload + * Inits IPv6 routing table/structure - with the rules and other related params + * @hash_rules_addr: Addr in sys mem where ipv6 hashable rt tbl starts + * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should + * be copied to + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should + * be copied to + */ +struct ipahal_imm_cmd_ip_v6_routing_init { + u64 hash_rules_addr; + u64 nhash_rules_addr; + u32 hash_rules_size; + u32 hash_local_addr; + u32 nhash_rules_size; + u32 nhash_local_addr; +}; + +/* + * struct ipahal_imm_cmd_hdr_init_local - HDR_INIT_LOCAL cmd payload + * Inits hdr table within local mem with the hdrs and their length. + * @hdr_table_addr: Word address in sys mem where the table starts (SRC) + * @size_hdr_table: Size of the above (in bytes) + * @hdr_addr: header address in IPA sram (used as DST for memory copy) + * @rsvd: reserved + */ +struct ipahal_imm_cmd_hdr_init_local { + u64 hdr_table_addr; + u32 size_hdr_table; + u32 hdr_addr; +}; + +/* + * struct ipahal_imm_cmd_hdr_init_system - HDR_INIT_SYSTEM cmd payload + * Inits hdr table within sys mem with the hdrs and their length. + * @hdr_table_addr: Word address in system memory where the hdrs tbl starts. + */ +struct ipahal_imm_cmd_hdr_init_system { + u64 hdr_table_addr; +}; + +/* + * struct ipahal_imm_cmd_table_dma - TABLE_DMA cmd payload + * Perform DMA operation on NAT and IPV6 connection tracking related mem + * addresses. Copy data into different locations within IPv6CT and NAT + * associated tbls. (For add/remove NAT rules) + * @offset: offset in bytes from base addr to write 'data' to + * @data: data to be written + * @table_index: NAT tbl index. Defines the tbl on which to perform DMA op. + * @base_addr: Base addr to which the DMA operation should be performed. + */ +struct ipahal_imm_cmd_table_dma { + u32 offset; + u16 data; + u8 table_index; + u8 base_addr; +}; + +/* + * struct ipahal_imm_cmd_ip_packet_init - IP_PACKET_INIT cmd payload + * Configuration for specific IP pkt. Shall be called prior to an IP pkt + * data. Pkt will not go through IP pkt processing. + * @destination_pipe_index: Destination pipe index (in case routing + * is enabled, this field will overwrite the rt rule) + */ +struct ipahal_imm_cmd_ip_packet_init { + u32 destination_pipe_index; +}; + +/* + * enum ipa_pipeline_clear_option - Values for pipeline clear waiting options + * @IPAHAL_HPS_CLEAR: Wait for HPS clear. All queues except high priority queue + * shall not be serviced until HPS is clear of packets or immediate commands. + * The high priority Rx queue / Q6ZIP group shall still be serviced normally. + * + * @IPAHAL_SRC_GRP_CLEAR: Wait for originating source group to be clear + * (for no packet contexts allocated to the originating source group). + * The source group / Rx queue shall not be serviced until all previously + * allocated packet contexts are released. All other source groups/queues shall + * be serviced normally. + * + * @IPAHAL_FULL_PIPELINE_CLEAR: Wait for full pipeline to be clear. + * All groups / Rx queues shall not be serviced until IPA pipeline is fully + * clear. This should be used for debug only. + */ +enum ipahal_pipeline_clear_option { + IPAHAL_HPS_CLEAR, + IPAHAL_SRC_GRP_CLEAR, + IPAHAL_FULL_PIPELINE_CLEAR +}; + +/* + * struct ipahal_imm_cmd_register_write - REGISTER_WRITE cmd payload + * Write value to register. Allows reg changes to be synced with data packet + * and other immediate commands. Can be used to access the sram + * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr + * @value: value to write to register + * @value_mask: mask specifying which value bits to write to the register + * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait) + * @pipeline_clear_option: options for pipeline clear waiting + */ +struct ipahal_imm_cmd_register_write { + u32 offset; + u32 value; + u32 value_mask; + bool skip_pipeline_clear; + enum ipahal_pipeline_clear_option pipeline_clear_options; +}; + +/* + * struct ipahal_imm_cmd_dma_shared_mem - DMA_SHARED_MEM cmd payload + * Perform mem copy into or out of the SW area of IPA local mem + * @system_addr: Address in system memory + * @size: Size in bytes of data to copy. Expected size is up to 2K bytes + * @local_addr: Address in IPA local memory + * @clear_after_read: Clear local memory at the end of a read operation allows + * atomic read and clear if HPS is clear. Ignore for writes. + * @is_read: Read operation from local memory? If not, then write. + * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait) + * @pipeline_clear_option: options for pipeline clear waiting + */ +struct ipahal_imm_cmd_dma_shared_mem { + u64 system_addr; + u32 size; + u32 local_addr; + bool clear_after_read; + bool is_read; + bool skip_pipeline_clear; + enum ipahal_pipeline_clear_option pipeline_clear_options; +}; + +/* + * struct ipahal_imm_cmd_ip_packet_tag_status - IP_PACKET_TAG_STATUS cmd payload + * This cmd is used for to allow SW to track HW processing by setting a TAG + * value that is passed back to SW inside Packet Status information. + * TAG info will be provided as part of Packet Status info generated for + * the next pkt transferred over the pipe. + * This immediate command must be followed by a packet in the same transfer. + * @tag: Tag that is provided back to SW + */ +struct ipahal_imm_cmd_ip_packet_tag_status { + u64 tag; +}; + +/* + * struct ipahal_imm_cmd_dma_task_32b_addr - IPA_DMA_TASK_32B_ADDR cmd payload + * Used by clients using 32bit addresses. Used to perform DMA operation on + * multiple descriptors. + * The Opcode is dynamic, where it holds the number of buffer to process + * @cmplt: Complete flag: If true, IPA interrupt SW when the entire + * DMA related data was completely xfered to its destination. + * @eof: Enf Of Frame flag: If true, IPA assert the EOT to the + * dest client. This is used used for aggr sequence + * @flsh: Flush flag: If true pkt will go through the IPA blocks but + * will not be xfered to dest client but rather will be discarded + * @lock: Lock pipe flag: If true, IPA will stop processing descriptors + * from other EPs in the same src grp (RX queue) + * @unlock: Unlock pipe flag: If true, IPA will stop exclusively + * servicing current EP out of the src EPs of the grp (RX queue) + * @size1: Size of buffer1 data + * @addr1: Pointer to buffer1 data + * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs, + * only the first one needs to have this field set. It will be ignored + * in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK + * must contain this field (2 or more buffers) or EOT. + */ +struct ipahal_imm_cmd_dma_task_32b_addr { + bool cmplt; + bool eof; + bool flsh; + bool lock; + bool unlock; + u32 size1; + u32 addr1; + u32 packet_size; +}; + +/* + * struct ipahal_imm_cmd_pyld - Immediate cmd payload information + * @len: length of the buffer + * @opcode: opcode of the immediate command + * @data: buffer contains the immediate command payload. Buffer goes + * back to back with this structure + */ +struct ipahal_imm_cmd_pyld { + u16 len; + u16 opcode; + u8 data[0]; +}; + + +/* Immediate command Function APIs */ + +/* + * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd + * @cmd_name: [in] Immediate command name + */ +const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name); + +/* + * ipahal_construct_imm_cmd() - Construct immdiate command + * This function builds imm cmd bulk that can be be sent to IPA + * The command will be allocated dynamically. + * After done using it, call ipahal_destroy_imm_cmd() to release it + */ +struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx); + +/* + * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op + * Core driver may want functionality to inject NOP commands to IPA + * to ensure e.g., PIPLINE clear before someother operation. + * The functionality given by this function can be reached by + * ipahal_construct_imm_cmd(). This function is helper to the core driver + * to reach this NOP functionlity easily. + * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait) + * @pipline_clr_opt: options for pipeline clear waiting + * @is_atomic_ctx: is called in atomic context or can sleep? + */ +struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd( + bool skip_pipline_clear, + enum ipahal_pipeline_clear_option pipline_clr_opt, + bool is_atomic_ctx); + +/* + * ipahal_destroy_imm_cmd() - Destroy/Release bulk that was built + * by the construction functions + */ +static inline void ipahal_destroy_imm_cmd(struct ipahal_imm_cmd_pyld *pyld) +{ + kfree(pyld); +} + + +/* IPA Status packet Structures and Function APIs */ + +/* + * enum ipahal_pkt_status_opcode - Packet Status Opcode + * @IPAHAL_STATUS_OPCODE_PACKET_2ND_PASS: Packet Status generated as part of + * IPA second processing pass for a packet (i.e. IPA XLAT processing for + * the translated packet). + */ +enum ipahal_pkt_status_opcode { + IPAHAL_PKT_STATUS_OPCODE_PACKET = 0, + IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE, + IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET, + IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET, + IPAHAL_PKT_STATUS_OPCODE_LOG, + IPAHAL_PKT_STATUS_OPCODE_DCMP, + IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS, +}; + +/* + * enum ipahal_pkt_status_exception - Packet Status exception type + * @IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH: formerly IHL exception. + * + * Note: IPTYPE, PACKET_LENGTH and PACKET_THRESHOLD exceptions means that + * partial / no IP processing took place and corresponding Status Mask + * fields should be ignored. Flt and rt info is not valid. + * + * NOTE:: Any change to this enum, need to change to + * ipahal_pkt_status_exception_to_str array as well. + */ +enum ipahal_pkt_status_exception { + IPAHAL_PKT_STATUS_EXCEPTION_NONE = 0, + IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR, + IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE, + IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH, + IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD, + IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS, + IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT, + /* + * NAT and IPv6CT have the same value at HW. + * NAT for IPv4 and IPv6CT for IPv6 exceptions + */ + IPAHAL_PKT_STATUS_EXCEPTION_NAT, + IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT, + IPAHAL_PKT_STATUS_EXCEPTION_MAX, +}; + +/* + * enum ipahal_pkt_status_mask - Packet Status bitmask shift values of + * the contained flags. This bitmask indicates flags on the properties of + * the packet as well as IPA processing it may had. + * @FRAG_PROCESS: Frag block processing flag: Was pkt processed by frag block? + * Also means the frag info is valid unless exception or first frag + * @FILT_PROCESS: Flt block processing flag: Was pkt processed by flt block? + * Also means that flt info is valid. + * @NAT_PROCESS: NAT block processing flag: Was pkt processed by NAT block? + * Also means that NAT info is valid, unless exception. + * @ROUTE_PROCESS: Rt block processing flag: Was pkt processed by rt block? + * Also means that rt info is valid, unless exception. + * @TAG_VALID: Flag specifying if TAG and TAG info valid? + * @FRAGMENT: Flag specifying if pkt is IP fragment. + * @FIRST_FRAGMENT: Flag specifying if pkt is first fragment. In this case, frag + * info is invalid + * @V4: Flag specifying pkt is IPv4 or IPv6 + * @CKSUM_PROCESS: CSUM block processing flag: Was pkt processed by csum block? + * If so, csum trailer exists + * @AGGR_PROCESS: Aggr block processing flag: Was pkt processed by aggr block? + * @DEST_EOT: Flag specifying if EOT was asserted for the pkt on dest endp + * @DEAGGR_PROCESS: Deaggr block processing flag: Was pkt processed by deaggr + * block? + * @DEAGG_FIRST: Flag specifying if this is the first pkt in deaggr frame + * @SRC_EOT: Flag specifying if EOT asserted by src endp when sending the buffer + * @PREV_EOT: Flag specifying if EOT was sent just before the pkt as part of + * aggr hard-byte-limit + * @BYTE_LIMIT: Flag specifying if pkt is over a configured byte limit. + */ +enum ipahal_pkt_status_mask { + IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT = 0, + IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT, + IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT, + IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT, + IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT, + IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT, + IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT, + IPAHAL_PKT_STATUS_MASK_V4_SHFT, + IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT, + IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT, + IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT, + IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT, + IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT, + IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT, + IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT, + IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT, +}; + +/* + * Returns boolean value representing a property of the a packet. + * @__flag_shft: The shift value of the flag of the status bitmask of + * @__status: Pointer to abstracrted status structure + * the needed property. See enum ipahal_pkt_status_mask + */ +#define IPAHAL_PKT_STATUS_MASK_FLAG_VAL(__flag_shft, __status) \ + (((__status)->status_mask) & ((u32)0x1<<(__flag_shft)) ? true : false) + +/* + * enum ipahal_pkt_status_nat_type - Type of NAT + */ +enum ipahal_pkt_status_nat_type { + IPAHAL_PKT_STATUS_NAT_NONE, + IPAHAL_PKT_STATUS_NAT_SRC, + IPAHAL_PKT_STATUS_NAT_DST, +}; + +/* + * struct ipahal_pkt_status - IPA status packet abstracted payload. + * This structure describes the status packet fields for the + * following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET, + * IPA_STATUS_SUSPENDED_PACKET. + * Other statuses types has different status packet structure. + * @tag_info: S/W defined value provided via immediate command + * @status_opcode: The Type of the status (Opcode). + * @exception: The first exception that took place. + * In case of exception, src endp and pkt len are always valid. + * @status_mask: Bit mask for flags on several properties on the packet + * and processing it may passed at IPA. See enum ipahal_pkt_status_mask + * @pkt_len: Pkt pyld len including hdr and retained hdr if used. Does + * not include padding or checksum trailer len. + * @metadata: meta data value used by packet + * @flt_local: Filter table location flag: Does matching flt rule belongs to + * flt tbl that resides in lcl memory? (if not, then system mem) + * @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl? + * @flt_global: Global filter rule flag: Does matching flt rule belongs to + * the global flt tbl? (if not, then the per endp tables) + * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule + * specifies to retain header? + * Starting IPA4.5, this will be true only if packet has L2 header. + * @flt_miss: Filtering miss flag: Was their a filtering rule miss? + * In case of miss, all flt info to be ignored + * @rt_local: Route table location flag: Does matching rt rule belongs to + * rt tbl that resides in lcl memory? (if not, then system mem) + * @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl? + * @ucp: UC Processing flag + * @rt_miss: Routing miss flag: Was their a routing rule miss? + * @nat_hit: NAT hit flag: Was their NAT hit? + * @nat_type: Defines the type of the NAT operation: + * @time_of_day_ctr: running counter from IPA clock + * @hdr_local: Header table location flag: In header insertion, was the header + * taken from the table resides in local memory? (If no, then system mem) + * @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table? + * @flt_rule_id: The ID of the matching filter rule (if no miss). + * This info can be combined with endp_src_idx to locate the exact rule. + * @rt_rule_id: The ID of the matching rt rule. (if no miss). This info + * can be combined with rt_tbl_idx to locate the exact rule. + * @nat_entry_idx: Index of the NAT entry used of NAT processing + * @hdr_offset: Offset of used header in the header table + * @endp_src_idx: Source end point index. + * @endp_dest_idx: Destination end point index. + * Not valid in case of exception + * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match + * @seq_num: Per source endp unique packet sequence number + * @frag_rule: Frag rule index in H/W frag table in case of frag hit + */ +struct ipahal_pkt_status { + u64 tag_info; + enum ipahal_pkt_status_opcode status_opcode; + enum ipahal_pkt_status_exception exception; + u32 status_mask; + u32 pkt_len; + u32 metadata; + bool flt_local; + bool flt_hash; + bool flt_global; + bool flt_ret_hdr; + bool flt_miss; + bool rt_local; + bool rt_hash; + bool ucp; + bool rt_miss; + bool nat_hit; + enum ipahal_pkt_status_nat_type nat_type; + u32 time_of_day_ctr; + bool hdr_local; + bool frag_hit; + u16 flt_rule_id; + u16 rt_rule_id; + u16 nat_entry_idx; + u16 hdr_offset; + u8 endp_src_idx; + u8 endp_dest_idx; + u8 rt_tbl_idx; + u8 seq_num; + u8 frag_rule; +}; + +/* + * ipahal_pkt_status_get_size() - Get H/W size of packet status + */ +u32 ipahal_pkt_status_get_size(void); + +/* + * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form + * @unparsed_status: Pointer to H/W format of the packet status as read from H/W + * @status: Pointer to pre-allocated buffer where the parsed info will be stored + */ +void ipahal_pkt_status_parse(const void *unparsed_status, + struct ipahal_pkt_status *status); + +/* + * ipahal_pkt_status_exception_str() - returns string represents exception type + * @exception: [in] The exception type + */ +const char *ipahal_pkt_status_exception_str( + enum ipahal_pkt_status_exception exception); + +/* + * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to + * base address and offset given. + * @base: dma base address + * @offset: offset from base address where the data will be copied + * @hdr: the header to be copied + * @hdr_len: the length of the header + */ +void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *hdr, u32 hdr_len); + +/* + * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to + * base address and offset given. + * @type: type of header processing context + * @base: dma base address + * @offset: offset from base address where the data will be copied + * @hdr_len: the length of the header + * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr + * @phys_base: memory location in DDR + * @hdr_base_addr: base address in table + * @offset_entry: offset from hdr_base_addr in table + * @l2tp_params: l2tp parameters + */ +int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type, + void *base, u32 offset, u32 hdr_len, + bool is_hdr_proc_ctx, dma_addr_t phys_base, + u32 hdr_base_addr, + struct ipa_hdr_offset_entry *offset_entry, + struct ipa_l2tp_hdr_proc_ctx_params l2tp_params); + +/* + * ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition + * of header processing context according to the type of processing context + * @type: header processing context type (no processing context, + * IPA_HDR_PROC_ETHII_TO_ETHII etc.) + */ +int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type); + +int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base, + struct device *ipa_pdev); +void ipahal_destroy(void); +void ipahal_free_dma_mem(struct ipa_mem_buffer *mem); + +#endif /* _IPAHAL_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c new file mode 100644 index 000000000000..7ea005be140d --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c @@ -0,0 +1,4086 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include "ipahal.h" +#include "ipahal_fltrt.h" +#include "ipahal_fltrt_i.h" +#include "ipahal_i.h" +#include "../../ipa_common_i.h" + +/* + * struct ipahal_fltrt_obj - Flt/Rt H/W information for specific IPA version + * @support_hash: Is hashable tables supported + * @tbl_width: Width of table in bytes + * @sysaddr_alignment: System table address alignment + * @lcladdr_alignment: Local table offset alignment + * @blk_sz_alignment: Rules block size alignment + * @rule_start_alignment: Rule start address alignment + * @tbl_hdr_width: Width of the header structure in bytes + * @tbl_addr_mask: Masking for Table address + * @rule_max_prio: Max possible priority of a rule + * @rule_min_prio: Min possible priority of a rule + * @low_rule_id: Low value of Rule ID that can be used + * @rule_id_bit_len: Rule is high (MSB) bit len + * @rule_buf_size: Max size rule may utilize. + * @write_val_to_hdr: Write address or offset to header entry + * @create_flt_bitmap: Create bitmap in H/W format using given bitmap + * @create_tbl_addr: Given raw table address, create H/W formated one + * @parse_tbl_addr: Parse the given H/W address (hdr format) + * @rt_generate_hw_rule: Generate RT rule in H/W format + * @flt_generate_hw_rule: Generate FLT rule in H/W format + * @flt_generate_eq: Generate flt equation attributes from rule attributes + * @rt_parse_hw_rule: Parse rt rule read from H/W + * @flt_parse_hw_rule: Parse flt rule read from H/W + * @eq_bitfield: Array of the bit fields of the support equations. + * 0xFF means the equation is not supported + */ +struct ipahal_fltrt_obj { + bool support_hash; + u32 tbl_width; + u32 sysaddr_alignment; + u32 lcladdr_alignment; + u32 blk_sz_alignment; + u32 rule_start_alignment; + u32 tbl_hdr_width; + u32 tbl_addr_mask; + int rule_max_prio; + int rule_min_prio; + u32 low_rule_id; + u32 rule_id_bit_len; + u32 rule_buf_size; + u8* (*write_val_to_hdr)(u64 val, u8 *hdr); + u64 (*create_flt_bitmap)(u64 ep_bitmap); + u64 (*create_tbl_addr)(bool is_sys, u64 addr); + void (*parse_tbl_addr)(u64 hwaddr, u64 *addr, bool *is_sys); + int (*rt_generate_hw_rule)(struct ipahal_rt_rule_gen_params *params, + u32 *hw_len, u8 *buf); + int (*flt_generate_hw_rule)(struct ipahal_flt_rule_gen_params *params, + u32 *hw_len, u8 *buf); + int (*flt_generate_eq)(enum ipa_ip_type ipt, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb); + int (*rt_parse_hw_rule)(u8 *addr, struct ipahal_rt_rule_entry *rule); + int (*flt_parse_hw_rule)(u8 *addr, struct ipahal_flt_rule_entry *rule); + u8 eq_bitfield[IPA_EQ_MAX]; +}; + + +static u64 ipa_fltrt_create_flt_bitmap(u64 ep_bitmap) +{ + /* At IPA3, there global configuration is possible but not used */ + return (ep_bitmap << 1) & ~0x1; +} + +static u64 ipa_fltrt_create_tbl_addr(bool is_sys, u64 addr) +{ + if (is_sys) { + if (addr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) { + IPAHAL_ERR( + "sys addr is not aligned accordingly addr=0x%pad\n", + &addr); + ipa_assert(); + return 0; + } + } else { + if (addr & IPA3_0_HW_TBL_LCLADDR_ALIGNMENT) { + IPAHAL_ERR("addr/ofst isn't lcl addr aligned %llu\n", + addr); + ipa_assert(); + return 0; + } + /* + * for local tables (at sram) offsets is used as tables + * addresses. offset need to be in 8B units + * (local address aligned) and left shifted to its place. + * Local bit need to be enabled. + */ + addr /= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1; + addr *= IPA3_0_HW_TBL_ADDR_MASK + 1; + addr += 1; + } + + return addr; +} + +static void ipa_fltrt_parse_tbl_addr(u64 hwaddr, u64 *addr, bool *is_sys) +{ + IPAHAL_DBG_LOW("Parsing hwaddr 0x%llx\n", hwaddr); + + *is_sys = !(hwaddr & 0x1); + hwaddr &= (~0ULL - 1); + if (hwaddr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) { + IPAHAL_ERR( + "sys addr is not aligned accordingly addr=0x%pad\n", + &hwaddr); + ipa_assert(); + return; + } + + if (!*is_sys) { + hwaddr /= IPA3_0_HW_TBL_ADDR_MASK + 1; + hwaddr *= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1; + } + + *addr = hwaddr; +} + +/* Update these tables of the number of equations changes */ +static const int ipa3_0_ofst_meq32[] = { IPA_OFFSET_MEQ32_0, + IPA_OFFSET_MEQ32_1}; +static const int ipa3_0_ofst_meq128[] = { IPA_OFFSET_MEQ128_0, + IPA_OFFSET_MEQ128_1}; +static const int ipa3_0_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0, + IPA_IHL_OFFSET_RANGE16_1}; +static const int ipa3_0_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0, + IPA_IHL_OFFSET_MEQ32_1}; + +static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt, + const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule); +static int ipa_fltrt_generate_hw_rule_bdy_from_eq( + const struct ipa_ipfltri_rule_eq *attrib, u8 **buf); +static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb); +static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb); +static int ipa_flt_generate_eq(enum ipa_ip_type ipt, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb); +static int ipa_rt_parse_hw_rule(u8 *addr, + struct ipahal_rt_rule_entry *rule); +static int ipa_flt_parse_hw_rule(u8 *addr, + struct ipahal_flt_rule_entry *rule); +static int ipa_flt_parse_hw_rule_ipav4(u8 *addr, + struct ipahal_flt_rule_entry *rule); + +#define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \ + (ARRAY_SIZE(__eq_array) <= (__eq_index)) + +#define IPA_GET_RULE_EQ_BIT_PTRN(__eq) \ + (BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].eq_bitfield[(__eq)])) + +#define IPA_IS_RULE_EQ_VALID(__eq) \ + (ipahal_fltrt_objs[ipahal_ctx->hw_type].eq_bitfield[(__eq)] != 0xFF) + +/* + * ipa_fltrt_rule_generation_err_check() - check basic validity on the rule + * attribs before starting building it + * checks if not not using ipv4 attribs on ipv6 and vice-versa + * @ip: IP address type + * @attrib: IPA rule attribute + * + * Return: 0 on success, -EPERM on failure + */ +static int ipa_fltrt_rule_generation_err_check( + enum ipa_ip_type ipt, const struct ipa_rule_attrib *attrib) +{ + if (ipt == IPA_IP_v4) { + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR || + attrib->attrib_mask & IPA_FLT_TC || + attrib->attrib_mask & IPA_FLT_FLOW_LABEL) { + IPAHAL_ERR_RL("v6 attrib's specified for v4 rule\n"); + return -EPERM; + } + } else if (ipt == IPA_IP_v6) { + if (attrib->attrib_mask & IPA_FLT_TOS || + attrib->attrib_mask & IPA_FLT_PROTOCOL) { + IPAHAL_ERR_RL("v4 attrib's specified for v6 rule\n"); + return -EPERM; + } + } else { + IPAHAL_ERR_RL("unsupported ip %d\n", ipt); + return -EPERM; + } + + return 0; +} + +static int ipa_rt_gen_hw_rule(struct ipahal_rt_rule_gen_params *params, + u32 *hw_len, u8 *buf) +{ + struct ipa3_0_rt_rule_hw_hdr *rule_hdr; + u8 *start; + u16 en_rule = 0; + + start = buf; + rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)buf; + + ipa_assert_on(params->dst_pipe_idx & ~0x1F); + rule_hdr->u.hdr.pipe_dest_idx = params->dst_pipe_idx; + switch (params->hdr_type) { + case IPAHAL_RT_RULE_HDR_PROC_CTX: + rule_hdr->u.hdr.system = !params->hdr_lcl; + rule_hdr->u.hdr.proc_ctx = 1; + ipa_assert_on(params->hdr_ofst & 31); + rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 5; + break; + case IPAHAL_RT_RULE_HDR_RAW: + rule_hdr->u.hdr.system = !params->hdr_lcl; + rule_hdr->u.hdr.proc_ctx = 0; + ipa_assert_on(params->hdr_ofst & 3); + rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 2; + break; + case IPAHAL_RT_RULE_HDR_NONE: + rule_hdr->u.hdr.system = !params->hdr_lcl; + rule_hdr->u.hdr.proc_ctx = 0; + rule_hdr->u.hdr.hdr_offset = 0; + break; + default: + IPAHAL_ERR("Invalid HDR type %d\n", params->hdr_type); + WARN_ON_RATELIMIT_IPA(1); + return -EINVAL; + } + + ipa_assert_on(params->priority & ~0x3FF); + rule_hdr->u.hdr.priority = params->priority; + rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0; + ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1)); + ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1)); + rule_hdr->u.hdr.rule_id = params->id; + + buf += sizeof(struct ipa3_0_rt_rule_hw_hdr); + + if (ipa_fltrt_generate_hw_rule_bdy(params->ipt, ¶ms->rule->attrib, + &buf, &en_rule)) { + IPAHAL_ERR("fail to generate hw rule\n"); + return -EPERM; + } + rule_hdr->u.hdr.en_rule = en_rule; + + IPAHAL_DBG_LOW("en_rule 0x%x\n", en_rule); + ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr); + + if (*hw_len == 0) { + *hw_len = buf - start; + } else if (*hw_len != (buf - start)) { + IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n", + *hw_len, (buf - start)); + return -EPERM; + } + + return 0; +} + +static int ipa_flt_gen_hw_rule(struct ipahal_flt_rule_gen_params *params, + u32 *hw_len, u8 *buf) +{ + struct ipa3_0_flt_rule_hw_hdr *rule_hdr; + u8 *start; + u16 en_rule = 0; + + start = buf; + rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)buf; + + switch (params->rule->action) { + case IPA_PASS_TO_ROUTING: + rule_hdr->u.hdr.action = 0x0; + break; + case IPA_PASS_TO_SRC_NAT: + rule_hdr->u.hdr.action = 0x1; + break; + case IPA_PASS_TO_DST_NAT: + rule_hdr->u.hdr.action = 0x2; + break; + case IPA_PASS_TO_EXCEPTION: + rule_hdr->u.hdr.action = 0x3; + break; + default: + IPAHAL_ERR_RL("Invalid Rule Action %d\n", params->rule->action); + WARN_ON_RATELIMIT_IPA(1); + return -EINVAL; + } + ipa_assert_on(params->rt_tbl_idx & ~0x1F); + rule_hdr->u.hdr.rt_tbl_idx = params->rt_tbl_idx; + rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0; + rule_hdr->u.hdr.rsvd1 = 0; + rule_hdr->u.hdr.rsvd2 = 0; + rule_hdr->u.hdr.rsvd3 = 0; + + ipa_assert_on(params->priority & ~0x3FF); + rule_hdr->u.hdr.priority = params->priority; + ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1)); + ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1)); + rule_hdr->u.hdr.rule_id = params->id; + + buf += sizeof(struct ipa3_0_flt_rule_hw_hdr); + + if (params->rule->eq_attrib_type) { + if (ipa_fltrt_generate_hw_rule_bdy_from_eq( + ¶ms->rule->eq_attrib, &buf)) { + IPAHAL_ERR_RL("fail to generate hw rule from eq\n"); + return -EPERM; + } + en_rule = params->rule->eq_attrib.rule_eq_bitmap; + } else { + if (ipa_fltrt_generate_hw_rule_bdy(params->ipt, + ¶ms->rule->attrib, &buf, &en_rule)) { + IPAHAL_ERR_RL("fail to generate hw rule\n"); + return -EPERM; + } + } + rule_hdr->u.hdr.en_rule = en_rule; + + IPAHAL_DBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n", + en_rule, + rule_hdr->u.hdr.action, + rule_hdr->u.hdr.rt_tbl_idx, + rule_hdr->u.hdr.retain_hdr); + IPAHAL_DBG_LOW("priority=%d, rule_id=%d\n", + rule_hdr->u.hdr.priority, + rule_hdr->u.hdr.rule_id); + + ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr); + + if (*hw_len == 0) { + *hw_len = buf - start; + } else if (*hw_len != (buf - start)) { + IPAHAL_ERR_RL("hw_len differs b/w passed=0x%x calc=%td\n", + *hw_len, (buf - start)); + return -EPERM; + } + + return 0; +} + +static int ipa_flt_gen_hw_rule_ipav4(struct ipahal_flt_rule_gen_params *params, + u32 *hw_len, u8 *buf) +{ + struct ipa4_0_flt_rule_hw_hdr *rule_hdr; + u8 *start; + u16 en_rule = 0; + + start = buf; + rule_hdr = (struct ipa4_0_flt_rule_hw_hdr *)buf; + + switch (params->rule->action) { + case IPA_PASS_TO_ROUTING: + rule_hdr->u.hdr.action = 0x0; + break; + case IPA_PASS_TO_SRC_NAT: + rule_hdr->u.hdr.action = 0x1; + break; + case IPA_PASS_TO_DST_NAT: + rule_hdr->u.hdr.action = 0x2; + break; + case IPA_PASS_TO_EXCEPTION: + rule_hdr->u.hdr.action = 0x3; + break; + default: + IPAHAL_ERR("Invalid Rule Action %d\n", params->rule->action); + WARN_ON_RATELIMIT_IPA(1); + return -EINVAL; + } + + ipa_assert_on(params->rt_tbl_idx & ~0x1F); + rule_hdr->u.hdr.rt_tbl_idx = params->rt_tbl_idx; + rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0; + + ipa_assert_on(params->rule->pdn_idx & ~0xF); + rule_hdr->u.hdr.pdn_idx = params->rule->pdn_idx; + rule_hdr->u.hdr.set_metadata = params->rule->set_metadata; + rule_hdr->u.hdr.rsvd2 = 0; + rule_hdr->u.hdr.rsvd3 = 0; + + ipa_assert_on(params->priority & ~0x3FF); + rule_hdr->u.hdr.priority = params->priority; + ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1)); + ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1)); + rule_hdr->u.hdr.rule_id = params->id; + + buf += sizeof(struct ipa4_0_flt_rule_hw_hdr); + + if (params->rule->eq_attrib_type) { + if (ipa_fltrt_generate_hw_rule_bdy_from_eq( + ¶ms->rule->eq_attrib, &buf)) { + IPAHAL_ERR("fail to generate hw rule from eq\n"); + return -EPERM; + } + en_rule = params->rule->eq_attrib.rule_eq_bitmap; + } else { + if (ipa_fltrt_generate_hw_rule_bdy(params->ipt, + ¶ms->rule->attrib, &buf, &en_rule)) { + IPAHAL_ERR("fail to generate hw rule\n"); + return -EPERM; + } + } + rule_hdr->u.hdr.en_rule = en_rule; + + IPAHAL_DBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n", + en_rule, + rule_hdr->u.hdr.action, + rule_hdr->u.hdr.rt_tbl_idx, + rule_hdr->u.hdr.retain_hdr); + IPAHAL_DBG_LOW("priority=%d, rule_id=%d, pdn=%d, set_metadata=%d\n", + rule_hdr->u.hdr.priority, + rule_hdr->u.hdr.rule_id, + rule_hdr->u.hdr.pdn_idx, + rule_hdr->u.hdr.set_metadata); + + ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr); + + if (*hw_len == 0) { + *hw_len = buf - start; + } else if (*hw_len != (buf - start)) { + IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n", + *hw_len, (buf - start)); + return -EPERM; + } + + return 0; +} + +/* + * This array contains the FLT/RT info for IPAv3 and later. + * All the information on IPAv3 are statically defined below. + * If information is missing regarding on some IPA version, + * the init function will fill it with the information from the previous + * IPA version. + * Information is considered missing if all of the fields are 0. + */ +static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = { + /* IPAv3 */ + [IPA_HW_v3_0] = { + true, + IPA3_0_HW_TBL_WIDTH, + IPA3_0_HW_TBL_SYSADDR_ALIGNMENT, + IPA3_0_HW_TBL_LCLADDR_ALIGNMENT, + IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT, + IPA3_0_HW_RULE_START_ALIGNMENT, + IPA3_0_HW_TBL_HDR_WIDTH, + IPA3_0_HW_TBL_ADDR_MASK, + IPA3_0_RULE_MAX_PRIORITY, + IPA3_0_RULE_MIN_PRIORITY, + IPA3_0_LOW_RULE_ID, + IPA3_0_RULE_ID_BIT_LEN, + IPA3_0_HW_RULE_BUF_SIZE, + ipa_write_64, + ipa_fltrt_create_flt_bitmap, + ipa_fltrt_create_tbl_addr, + ipa_fltrt_parse_tbl_addr, + ipa_rt_gen_hw_rule, + ipa_flt_gen_hw_rule, + ipa_flt_generate_eq, + ipa_rt_parse_hw_rule, + ipa_flt_parse_hw_rule, + { + [IPA_TOS_EQ] = 0, + [IPA_PROTOCOL_EQ] = 1, + [IPA_TC_EQ] = 2, + [IPA_OFFSET_MEQ128_0] = 3, + [IPA_OFFSET_MEQ128_1] = 4, + [IPA_OFFSET_MEQ32_0] = 5, + [IPA_OFFSET_MEQ32_1] = 6, + [IPA_IHL_OFFSET_MEQ32_0] = 7, + [IPA_IHL_OFFSET_MEQ32_1] = 8, + [IPA_METADATA_COMPARE] = 9, + [IPA_IHL_OFFSET_RANGE16_0] = 10, + [IPA_IHL_OFFSET_RANGE16_1] = 11, + [IPA_IHL_OFFSET_EQ_32] = 12, + [IPA_IHL_OFFSET_EQ_16] = 13, + [IPA_FL_EQ] = 14, + [IPA_IS_FRAG] = 15, + [IPA_IS_PURE_ACK] = 0xFF, + }, + }, + + /* IPAv4 */ + [IPA_HW_v4_0] = { + true, + IPA3_0_HW_TBL_WIDTH, + IPA3_0_HW_TBL_SYSADDR_ALIGNMENT, + IPA3_0_HW_TBL_LCLADDR_ALIGNMENT, + IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT, + IPA3_0_HW_RULE_START_ALIGNMENT, + IPA3_0_HW_TBL_HDR_WIDTH, + IPA3_0_HW_TBL_ADDR_MASK, + IPA3_0_RULE_MAX_PRIORITY, + IPA3_0_RULE_MIN_PRIORITY, + IPA3_0_LOW_RULE_ID, + IPA3_0_RULE_ID_BIT_LEN, + IPA3_0_HW_RULE_BUF_SIZE, + ipa_write_64, + ipa_fltrt_create_flt_bitmap, + ipa_fltrt_create_tbl_addr, + ipa_fltrt_parse_tbl_addr, + ipa_rt_gen_hw_rule, + ipa_flt_gen_hw_rule_ipav4, + ipa_flt_generate_eq, + ipa_rt_parse_hw_rule, + ipa_flt_parse_hw_rule_ipav4, + { + [IPA_TOS_EQ] = 0, + [IPA_PROTOCOL_EQ] = 1, + [IPA_TC_EQ] = 2, + [IPA_OFFSET_MEQ128_0] = 3, + [IPA_OFFSET_MEQ128_1] = 4, + [IPA_OFFSET_MEQ32_0] = 5, + [IPA_OFFSET_MEQ32_1] = 6, + [IPA_IHL_OFFSET_MEQ32_0] = 7, + [IPA_IHL_OFFSET_MEQ32_1] = 8, + [IPA_METADATA_COMPARE] = 9, + [IPA_IHL_OFFSET_RANGE16_0] = 10, + [IPA_IHL_OFFSET_RANGE16_1] = 11, + [IPA_IHL_OFFSET_EQ_32] = 12, + [IPA_IHL_OFFSET_EQ_16] = 13, + [IPA_FL_EQ] = 14, + [IPA_IS_FRAG] = 15, + [IPA_IS_PURE_ACK] = 0xFF, + }, + }, + + /* IPAv4.2 */ + [IPA_HW_v4_2] = { + false, + IPA3_0_HW_TBL_WIDTH, + IPA3_0_HW_TBL_SYSADDR_ALIGNMENT, + IPA3_0_HW_TBL_LCLADDR_ALIGNMENT, + IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT, + IPA3_0_HW_RULE_START_ALIGNMENT, + IPA3_0_HW_TBL_HDR_WIDTH, + IPA3_0_HW_TBL_ADDR_MASK, + IPA3_0_RULE_MAX_PRIORITY, + IPA3_0_RULE_MIN_PRIORITY, + IPA3_0_LOW_RULE_ID, + IPA3_0_RULE_ID_BIT_LEN, + IPA3_0_HW_RULE_BUF_SIZE, + ipa_write_64, + ipa_fltrt_create_flt_bitmap, + ipa_fltrt_create_tbl_addr, + ipa_fltrt_parse_tbl_addr, + ipa_rt_gen_hw_rule, + ipa_flt_gen_hw_rule_ipav4, + ipa_flt_generate_eq, + ipa_rt_parse_hw_rule, + ipa_flt_parse_hw_rule_ipav4, + { + [IPA_TOS_EQ] = 0, + [IPA_PROTOCOL_EQ] = 1, + [IPA_TC_EQ] = 2, + [IPA_OFFSET_MEQ128_0] = 3, + [IPA_OFFSET_MEQ128_1] = 4, + [IPA_OFFSET_MEQ32_0] = 5, + [IPA_OFFSET_MEQ32_1] = 6, + [IPA_IHL_OFFSET_MEQ32_0] = 7, + [IPA_IHL_OFFSET_MEQ32_1] = 8, + [IPA_METADATA_COMPARE] = 9, + [IPA_IHL_OFFSET_RANGE16_0] = 10, + [IPA_IHL_OFFSET_RANGE16_1] = 11, + [IPA_IHL_OFFSET_EQ_32] = 12, + [IPA_IHL_OFFSET_EQ_16] = 13, + [IPA_FL_EQ] = 14, + [IPA_IS_FRAG] = 15, + [IPA_IS_PURE_ACK] = 0xFF, + }, + }, + + /* IPAv4.5 */ + [IPA_HW_v4_5] = { + true, + IPA3_0_HW_TBL_WIDTH, + IPA3_0_HW_TBL_SYSADDR_ALIGNMENT, + IPA3_0_HW_TBL_LCLADDR_ALIGNMENT, + IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT, + IPA3_0_HW_RULE_START_ALIGNMENT, + IPA3_0_HW_TBL_HDR_WIDTH, + IPA3_0_HW_TBL_ADDR_MASK, + IPA3_0_RULE_MAX_PRIORITY, + IPA3_0_RULE_MIN_PRIORITY, + IPA3_0_LOW_RULE_ID, + IPA3_0_RULE_ID_BIT_LEN, + IPA3_0_HW_RULE_BUF_SIZE, + ipa_write_64, + ipa_fltrt_create_flt_bitmap, + ipa_fltrt_create_tbl_addr, + ipa_fltrt_parse_tbl_addr, + ipa_rt_gen_hw_rule, + ipa_flt_gen_hw_rule_ipav4, + ipa_flt_generate_eq, + ipa_rt_parse_hw_rule, + ipa_flt_parse_hw_rule_ipav4, + { + [IPA_TOS_EQ] = 0xFF, + [IPA_PROTOCOL_EQ] = 1, + [IPA_TC_EQ] = 2, + [IPA_OFFSET_MEQ128_0] = 3, + [IPA_OFFSET_MEQ128_1] = 4, + [IPA_OFFSET_MEQ32_0] = 5, + [IPA_OFFSET_MEQ32_1] = 6, + [IPA_IHL_OFFSET_MEQ32_0] = 7, + [IPA_IHL_OFFSET_MEQ32_1] = 8, + [IPA_METADATA_COMPARE] = 9, + [IPA_IHL_OFFSET_RANGE16_0] = 10, + [IPA_IHL_OFFSET_RANGE16_1] = 11, + [IPA_IHL_OFFSET_EQ_32] = 12, + [IPA_IHL_OFFSET_EQ_16] = 13, + [IPA_FL_EQ] = 14, + [IPA_IS_FRAG] = 15, + [IPA_IS_PURE_ACK] = 0, + }, + }, +}; + +static int ipa_flt_generate_eq(enum ipa_ip_type ipt, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb) +{ + if (ipa_fltrt_rule_generation_err_check(ipt, attrib)) + return -EPERM; + + if (ipt == IPA_IP_v4) { + if (ipa_flt_generate_eq_ip4(ipt, attrib, eq_atrb)) { + IPAHAL_ERR("failed to build ipv4 flt eq rule\n"); + return -EPERM; + } + } else if (ipt == IPA_IP_v6) { + if (ipa_flt_generate_eq_ip6(ipt, attrib, eq_atrb)) { + IPAHAL_ERR("failed to build ipv6 flt eq rule\n"); + return -EPERM; + } + } else { + IPAHAL_ERR("unsupported ip %d\n", ipt); + return -EPERM; + } + + /* + * default "rule" means no attributes set -> map to + * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0 + */ + if (attrib->attrib_mask == 0) { + eq_atrb->rule_eq_bitmap = 0; + eq_atrb->rule_eq_bitmap |= IPA_GET_RULE_EQ_BIT_PTRN( + IPA_OFFSET_MEQ32_0); + eq_atrb->offset_meq_32[0].offset = 0; + eq_atrb->offset_meq_32[0].mask = 0; + eq_atrb->offset_meq_32[0].value = 0; + } + + return 0; +} + +static void ipa_fltrt_generate_mac_addr_hw_rule(u8 **extra, u8 **rest, + u8 hdr_mac_addr_offset, + const uint8_t mac_addr_mask[ETH_ALEN], + const uint8_t mac_addr[ETH_ALEN]) +{ + int i; + + *extra = ipa_write_8(hdr_mac_addr_offset, *extra); + + /* LSB MASK and ADDR */ + *rest = ipa_write_64(0, *rest); + *rest = ipa_write_64(0, *rest); + + /* MSB MASK and ADDR */ + *rest = ipa_write_16(0, *rest); + for (i = 5; i >= 0; i--) + *rest = ipa_write_8(mac_addr_mask[i], *rest); + *rest = ipa_write_16(0, *rest); + for (i = 5; i >= 0; i--) + *rest = ipa_write_8(mac_addr[i], *rest); +} + +static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule, + const struct ipa_rule_attrib *attrib, + u8 **extra_wrds, u8 **rest_wrds) +{ + u8 *extra = *extra_wrds; + u8 *rest = *rest_wrds; + u8 ofst_meq32 = 0; + u8 ihl_ofst_rng16 = 0; + u8 ihl_ofst_meq32 = 0; + u8 ofst_meq128 = 0; + int rc = 0; + bool tos_done = false; + + if (attrib->attrib_mask & IPA_FLT_IS_PURE_ACK) { + if (!IPA_IS_RULE_EQ_VALID(IPA_IS_PURE_ACK)) { + IPAHAL_ERR("is_pure_ack eq not supported\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_PURE_ACK); + extra = ipa_write_8(0, extra); + } + + if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) { + if (!IPA_IS_RULE_EQ_VALID(IPA_TOS_EQ)) { + IPAHAL_DBG("tos eq not supported\n"); + } else { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ); + extra = ipa_write_8(attrib->u.v4.tos, extra); + tos_done = true; + } + } + + if (attrib->attrib_mask & IPA_FLT_PROTOCOL) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ); + extra = ipa_write_8(attrib->u.v4.protocol, extra); + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -14, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -8, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -22, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -16, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + /* 0 => Take the first word. offset of TOS in v4 header is 1 */ + extra = ipa_write_8(0, extra); + rest = ipa_write_32((attrib->tos_mask << 16), rest); + rest = ipa_write_32((attrib->tos_value << 16), rest); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + /* 12 => offset of src ip in v4 header */ + extra = ipa_write_8(12, extra); + rest = ipa_write_32(attrib->u.v4.src_addr_mask, rest); + rest = ipa_write_32(attrib->u.v4.src_addr, rest); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + /* 16 => offset of dst ip in v4 header */ + extra = ipa_write_8(16, extra); + rest = ipa_write_32(attrib->u.v4.dst_addr_mask, rest); + rest = ipa_write_32(attrib->u.v4.dst_addr, rest); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + /* -2 => offset of ether type in L2 hdr */ + extra = ipa_write_8((u8)-2, extra); + rest = ipa_write_16(0, rest); + rest = ipa_write_16(htons(attrib->ether_type), rest); + rest = ipa_write_16(0, rest); + rest = ipa_write_16(htons(attrib->ether_type), rest); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_DBG("ran out of meq32 eq\n"); + } else { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + /* + * 0 => Take the first word. + * offset of TOS in v4 header is 1 + */ + extra = ipa_write_8(0, extra); + rest = ipa_write_32(0xFF << 16, rest); + rest = ipa_write_32((attrib->u.v4.tos << 16), rest); + ofst_meq32++; + tos_done = true; + } + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 0 => offset of type after v4 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_32(0xFF, rest); + rest = ipa_write_32(attrib->type, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 1 => offset of code after v4 header */ + extra = ipa_write_8(1, extra); + rest = ipa_write_32(0xFF, rest); + rest = ipa_write_32(attrib->code, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 0 => offset of SPI after v4 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_32(0xFFFFFFFF, rest); + rest = ipa_write_32(attrib->spi, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ( + ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]); + /* populate first ihl meq eq */ + extra = ipa_write_8(8, extra); + rest = ipa_write_8(attrib->dst_mac_addr_mask[3], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[2], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[1], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[0], rest); + rest = ipa_write_8(attrib->dst_mac_addr[3], rest); + rest = ipa_write_8(attrib->dst_mac_addr[2], rest); + rest = ipa_write_8(attrib->dst_mac_addr[1], rest); + rest = ipa_write_8(attrib->dst_mac_addr[0], rest); + /* populate second ihl meq eq */ + extra = ipa_write_8(12, extra); + rest = ipa_write_16(0, rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[5], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[4], rest); + rest = ipa_write_16(0, rest); + rest = ipa_write_8(attrib->dst_mac_addr[5], rest); + rest = ipa_write_8(attrib->dst_mac_addr[4], rest); + ihl_ofst_meq32 += 2; + } + + if (attrib->attrib_mask & IPA_FLT_TCP_SYN) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 12 => offset of SYN after v4 header */ + extra = ipa_write_8(12, extra); + rest = ipa_write_32(0x20000, rest); + rest = ipa_write_32(0x20000, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_DBG("ran out of ihl_meq32 eq\n"); + } else { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* + * 0 => Take the first word. offset of TOS in + * v4 header is 1. MSB bit asserted at IHL means + * to ignore packet IHL and do offset inside IPA header + */ + extra = ipa_write_8(0x80, extra); + rest = ipa_write_32(0xFF << 16, rest); + rest = ipa_write_32((attrib->u.v4.tos << 16), rest); + ihl_ofst_meq32++; + tos_done = true; + } + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE); + rest = ipa_write_32(attrib->meta_data_mask, rest); + rest = ipa_write_32(attrib->meta_data, rest); + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAHAL_ERR("bad src port range param\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 0 => offset of src port after v4 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_16(attrib->src_port_hi, rest); + rest = ipa_write_16(attrib->src_port_lo, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAHAL_ERR("bad dst port range param\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 2 => offset of dst port after v4 header */ + extra = ipa_write_8(2, extra); + rest = ipa_write_16(attrib->dst_port_hi, rest); + rest = ipa_write_16(attrib->dst_port_lo, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 0 => offset of src port after v4 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_16(attrib->src_port, rest); + rest = ipa_write_16(attrib->src_port, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 2 => offset of dst port after v4 header */ + extra = ipa_write_8(2, extra); + rest = ipa_write_16(attrib->dst_port, rest); + rest = ipa_write_16(attrib->dst_port, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG); + + if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) { + IPAHAL_ERR("could not find equation for tos\n"); + goto err; + } + + goto done; + +err: + rc = -EPERM; +done: + *extra_wrds = extra; + *rest_wrds = rest; + return rc; +} + +static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule, + const struct ipa_rule_attrib *attrib, + u8 **extra_wrds, u8 **rest_wrds) +{ + u8 *extra = *extra_wrds; + u8 *rest = *rest_wrds; + u8 ofst_meq32 = 0; + u8 ihl_ofst_rng16 = 0; + u8 ihl_ofst_meq32 = 0; + u8 ofst_meq128 = 0; + int rc = 0; + + /* v6 code below assumes no extension headers TODO: fix this */ + if (attrib->attrib_mask & IPA_FLT_IS_PURE_ACK) { + if (!IPA_IS_RULE_EQ_VALID(IPA_IS_PURE_ACK)) { + IPAHAL_ERR("is_pure_ack eq not supported\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_PURE_ACK); + extra = ipa_write_8(0, extra); + } + + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ); + extra = ipa_write_8(attrib->u.v6.next_hdr, extra); + } + + if (attrib->attrib_mask & IPA_FLT_TC) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ); + extra = ipa_write_8(attrib->u.v6.tc, extra); + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + /* 8 => offset of src ip in v6 header */ + extra = ipa_write_8(8, extra); + rest = ipa_write_32(attrib->u.v6.src_addr_mask[3], rest); + rest = ipa_write_32(attrib->u.v6.src_addr_mask[2], rest); + rest = ipa_write_32(attrib->u.v6.src_addr[3], rest); + rest = ipa_write_32(attrib->u.v6.src_addr[2], rest); + rest = ipa_write_32(attrib->u.v6.src_addr_mask[1], rest); + rest = ipa_write_32(attrib->u.v6.src_addr_mask[0], rest); + rest = ipa_write_32(attrib->u.v6.src_addr[1], rest); + rest = ipa_write_32(attrib->u.v6.src_addr[0], rest); + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + /* 24 => offset of dst ip in v6 header */ + extra = ipa_write_8(24, extra); + rest = ipa_write_32(attrib->u.v6.dst_addr_mask[3], rest); + rest = ipa_write_32(attrib->u.v6.dst_addr_mask[2], rest); + rest = ipa_write_32(attrib->u.v6.dst_addr[3], rest); + rest = ipa_write_32(attrib->u.v6.dst_addr[2], rest); + rest = ipa_write_32(attrib->u.v6.dst_addr_mask[1], rest); + rest = ipa_write_32(attrib->u.v6.dst_addr_mask[0], rest); + rest = ipa_write_32(attrib->u.v6.dst_addr[1], rest); + rest = ipa_write_32(attrib->u.v6.dst_addr[0], rest); + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + /* 0 => offset of TOS in v6 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_64(0, rest); + rest = ipa_write_64(0, rest); + rest = ipa_write_32(0, rest); + rest = ipa_write_32((attrib->tos_mask << 20), rest); + rest = ipa_write_32(0, rest); + rest = ipa_write_32((attrib->tos_value << 20), rest); + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -14, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -8, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -22, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -16, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + /* -2 => offset of ether type in L2 hdr */ + extra = ipa_write_8((u8)-2, extra); + rest = ipa_write_16(0, rest); + rest = ipa_write_16(htons(attrib->ether_type), rest); + rest = ipa_write_16(0, rest); + rest = ipa_write_16(htons(attrib->ether_type), rest); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 0 => offset of type after v6 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_32(0xFF, rest); + rest = ipa_write_32(attrib->type, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 1 => offset of code after v6 header */ + extra = ipa_write_8(1, extra); + rest = ipa_write_32(0xFF, rest); + rest = ipa_write_32(attrib->code, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 0 => offset of SPI after v6 header FIXME */ + extra = ipa_write_8(0, extra); + rest = ipa_write_32(0xFFFFFFFF, rest); + rest = ipa_write_32(attrib->spi, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ( + ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]); + /* populate first ihl meq eq */ + extra = ipa_write_8(8, extra); + rest = ipa_write_8(attrib->dst_mac_addr_mask[3], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[2], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[1], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[0], rest); + rest = ipa_write_8(attrib->dst_mac_addr[3], rest); + rest = ipa_write_8(attrib->dst_mac_addr[2], rest); + rest = ipa_write_8(attrib->dst_mac_addr[1], rest); + rest = ipa_write_8(attrib->dst_mac_addr[0], rest); + /* populate second ihl meq eq */ + extra = ipa_write_8(12, extra); + rest = ipa_write_16(0, rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[5], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[4], rest); + rest = ipa_write_16(0, rest); + rest = ipa_write_8(attrib->dst_mac_addr[5], rest); + rest = ipa_write_8(attrib->dst_mac_addr[4], rest); + ihl_ofst_meq32 += 2; + } + + if (attrib->attrib_mask & IPA_FLT_TCP_SYN) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 12 => offset of SYN after v4 header */ + extra = ipa_write_8(12, extra); + rest = ipa_write_32(0x20000, rest); + rest = ipa_write_32(0x20000, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ( + ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]); + + /* populate TCP protocol eq */ + if (attrib->ether_type == 0x0800) { + extra = ipa_write_8(30, extra); + rest = ipa_write_32(0xFF0000, rest); + rest = ipa_write_32(0x60000, rest); + } else { + extra = ipa_write_8(26, extra); + rest = ipa_write_32(0xFF00, rest); + rest = ipa_write_32(0x600, rest); + } + + /* populate TCP SYN eq */ + if (attrib->ether_type == 0x0800) { + extra = ipa_write_8(54, extra); + rest = ipa_write_32(0x20000, rest); + rest = ipa_write_32(0x20000, rest); + } else { + extra = ipa_write_8(74, extra); + rest = ipa_write_32(0x20000, rest); + rest = ipa_write_32(0x20000, rest); + } + ihl_ofst_meq32 += 2; + } + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 22 => offset of IP type after v6 header */ + extra = ipa_write_8(22, extra); + rest = ipa_write_32(0xF0000000, rest); + if (attrib->type == 0x40) + rest = ipa_write_32(0x40000000, rest); + else + rest = ipa_write_32(0x60000000, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 38 => offset of inner IPv4 addr */ + extra = ipa_write_8(38, extra); + rest = ipa_write_32(attrib->u.v4.dst_addr_mask, rest); + rest = ipa_write_32(attrib->u.v4.dst_addr, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE); + rest = ipa_write_32(attrib->meta_data_mask, rest); + rest = ipa_write_32(attrib->meta_data, rest); + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 0 => offset of src port after v6 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_16(attrib->src_port, rest); + rest = ipa_write_16(attrib->src_port, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 2 => offset of dst port after v6 header */ + extra = ipa_write_8(2, extra); + rest = ipa_write_16(attrib->dst_port, rest); + rest = ipa_write_16(attrib->dst_port, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAHAL_ERR("bad src port range param\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 0 => offset of src port after v6 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_16(attrib->src_port_hi, rest); + rest = ipa_write_16(attrib->src_port_lo, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAHAL_ERR("bad dst port range param\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 2 => offset of dst port after v6 header */ + extra = ipa_write_8(2, extra); + rest = ipa_write_16(attrib->dst_port_hi, rest); + rest = ipa_write_16(attrib->dst_port_lo, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 20 => offset of Ethertype after v4 header */ + if (attrib->ether_type == 0x0800) { + extra = ipa_write_8(21, extra); + rest = ipa_write_16(0x0045, rest); + rest = ipa_write_16(0x0045, rest); + } else { + extra = ipa_write_8(20, extra); + rest = ipa_write_16(attrib->ether_type, rest); + rest = ipa_write_16(attrib->ether_type, rest); + } + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ); + rest = ipa_write_32(attrib->u.v6.flow_label & 0xFFFFF, + rest); + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG); + + goto done; + +err: + rc = -EPERM; +done: + *extra_wrds = extra; + *rest_wrds = rest; + return rc; +} + +static u8 *ipa_fltrt_copy_mem(u8 *src, u8 *dst, int cnt) +{ + while (cnt--) + *dst++ = *src++; + + return dst; +} + +/* + * ipa_fltrt_generate_hw_rule_bdy() - generate HW rule body (w/o header) + * @ip: IP address type + * @attrib: IPA rule attribute + * @buf: output buffer. Advance it after building the rule + * @en_rule: enable rule + * + * Return codes: + * 0: success + * -EPERM: wrong input + */ +static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt, + const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule) +{ + int sz; + int rc = 0; + u8 *extra_wrd_buf; + u8 *rest_wrd_buf; + u8 *extra_wrd_start; + u8 *rest_wrd_start; + u8 *extra_wrd_i; + u8 *rest_wrd_i; + + sz = IPA3_0_HW_TBL_WIDTH * 2 + IPA3_0_HW_RULE_START_ALIGNMENT; + extra_wrd_buf = kzalloc(sz, GFP_KERNEL); + if (!extra_wrd_buf) { + rc = -ENOMEM; + goto fail_extra_alloc; + } + + sz = IPA3_0_HW_RULE_BUF_SIZE + IPA3_0_HW_RULE_START_ALIGNMENT; + rest_wrd_buf = kzalloc(sz, GFP_KERNEL); + if (!rest_wrd_buf) { + rc = -ENOMEM; + goto fail_rest_alloc; + } + + extra_wrd_start = extra_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT; + extra_wrd_start = (u8 *)((long)extra_wrd_start & + ~IPA3_0_HW_RULE_START_ALIGNMENT); + + rest_wrd_start = rest_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT; + rest_wrd_start = (u8 *)((long)rest_wrd_start & + ~IPA3_0_HW_RULE_START_ALIGNMENT); + + extra_wrd_i = extra_wrd_start; + rest_wrd_i = rest_wrd_start; + + rc = ipa_fltrt_rule_generation_err_check(ipt, attrib); + if (rc) { + IPAHAL_ERR_RL("rule generation err check failed\n"); + goto fail_err_check; + } + + if (ipt == IPA_IP_v4) { + if (ipa_fltrt_generate_hw_rule_bdy_ip4(en_rule, attrib, + &extra_wrd_i, &rest_wrd_i)) { + IPAHAL_ERR_RL("failed to build ipv4 hw rule\n"); + rc = -EPERM; + goto fail_err_check; + } + + } else if (ipt == IPA_IP_v6) { + if (ipa_fltrt_generate_hw_rule_bdy_ip6(en_rule, attrib, + &extra_wrd_i, &rest_wrd_i)) { + IPAHAL_ERR_RL("failed to build ipv6 hw rule\n"); + rc = -EPERM; + goto fail_err_check; + } + } else { + IPAHAL_ERR_RL("unsupported ip %d\n", ipt); + goto fail_err_check; + } + + /* + * default "rule" means no attributes set -> map to + * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0 + */ + if (attrib->attrib_mask == 0) { + IPAHAL_DBG_LOW("building default rule\n"); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(ipa3_0_ofst_meq32[0]); + extra_wrd_i = ipa_write_8(0, extra_wrd_i); /* offset */ + rest_wrd_i = ipa_write_32(0, rest_wrd_i); /* mask */ + rest_wrd_i = ipa_write_32(0, rest_wrd_i); /* val */ + } + + IPAHAL_DBG_LOW("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start); + IPAHAL_DBG_LOW("extra_word_2 0x%llx\n", + *(u64 *)(extra_wrd_start + IPA3_0_HW_TBL_WIDTH)); + + extra_wrd_i = ipa_pad_to_64(extra_wrd_i); + sz = extra_wrd_i - extra_wrd_start; + IPAHAL_DBG_LOW("extra words params sz %d\n", sz); + *buf = ipa_fltrt_copy_mem(extra_wrd_start, *buf, sz); + + rest_wrd_i = ipa_pad_to_64(rest_wrd_i); + sz = rest_wrd_i - rest_wrd_start; + IPAHAL_DBG_LOW("non extra words params sz %d\n", sz); + *buf = ipa_fltrt_copy_mem(rest_wrd_start, *buf, sz); + +fail_err_check: + kfree(rest_wrd_buf); +fail_rest_alloc: + kfree(extra_wrd_buf); +fail_extra_alloc: + return rc; +} + + +/** + * ipa_fltrt_calc_extra_wrd_bytes()- Calculate the number of extra words for eq + * @attrib: equation attribute + * + * Return value: 0 on success, negative otherwise + */ +static int ipa_fltrt_calc_extra_wrd_bytes( + const struct ipa_ipfltri_rule_eq *attrib) +{ + int num = 0; + + /* + * tos_eq_present field has two meanings: + * tos equation for IPA ver < 4.5 (as the field name reveals) + * pure_ack equation for IPA ver >= 4.5 + * In both cases it needs one extra word. + */ + if (attrib->tos_eq_present) + num++; + if (attrib->protocol_eq_present) + num++; + if (attrib->tc_eq_present) + num++; + num += attrib->num_offset_meq_128; + num += attrib->num_offset_meq_32; + num += attrib->num_ihl_offset_meq_32; + num += attrib->num_ihl_offset_range_16; + if (attrib->ihl_offset_eq_32_present) + num++; + if (attrib->ihl_offset_eq_16_present) + num++; + + IPAHAL_DBG_LOW("extra bytes number %d\n", num); + + return num; +} + +static int ipa_fltrt_generate_hw_rule_bdy_from_eq( + const struct ipa_ipfltri_rule_eq *attrib, u8 **buf) +{ + uint8_t num_offset_meq_32 = attrib->num_offset_meq_32; + uint8_t num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16; + uint8_t num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32; + uint8_t num_offset_meq_128 = attrib->num_offset_meq_128; + int i; + int extra_bytes; + u8 *extra; + u8 *rest; + + extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(attrib); + /* only 3 eq does not have extra word param, 13 out of 16 is the number + * of equations that needs extra word param + */ + if (extra_bytes > 13) { + IPAHAL_ERR_RL("too much extra bytes\n"); + return -EPERM; + } else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) { + /* two extra words */ + extra = *buf; + rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH * 2; + } else if (extra_bytes > 0) { + /* single exra word */ + extra = *buf; + rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH; + } else { + /* no extra words */ + extra = NULL; + rest = *buf; + } + + /* + * tos_eq_present field has two meanings: + * tos equation for IPA ver < 4.5 (as the field name reveals) + * pure_ack equation for IPA ver >= 4.5 + * In both cases it needs one extra word. + */ + if (attrib->tos_eq_present) { + if (IPA_IS_RULE_EQ_VALID(IPA_IS_PURE_ACK)) { + extra = ipa_write_8(0, extra); + } else if (IPA_IS_RULE_EQ_VALID(IPA_TOS_EQ)) { + extra = ipa_write_8(attrib->tos_eq, extra); + } else { + IPAHAL_ERR("no support for pure_ack and tos eqs\n"); + return -EPERM; + } + } + + if (attrib->protocol_eq_present) + extra = ipa_write_8(attrib->protocol_eq, extra); + + if (attrib->tc_eq_present) + extra = ipa_write_8(attrib->tc_eq, extra); + + if (num_offset_meq_128) { + extra = ipa_write_8(attrib->offset_meq_128[0].offset, extra); + for (i = 0; i < 8; i++) + rest = ipa_write_8(attrib->offset_meq_128[0].mask[i], + rest); + for (i = 0; i < 8; i++) + rest = ipa_write_8(attrib->offset_meq_128[0].value[i], + rest); + for (i = 8; i < 16; i++) + rest = ipa_write_8(attrib->offset_meq_128[0].mask[i], + rest); + for (i = 8; i < 16; i++) + rest = ipa_write_8(attrib->offset_meq_128[0].value[i], + rest); + num_offset_meq_128--; + } + + if (num_offset_meq_128) { + extra = ipa_write_8(attrib->offset_meq_128[1].offset, extra); + for (i = 0; i < 8; i++) + rest = ipa_write_8(attrib->offset_meq_128[1].mask[i], + rest); + for (i = 0; i < 8; i++) + rest = ipa_write_8(attrib->offset_meq_128[1].value[i], + rest); + for (i = 8; i < 16; i++) + rest = ipa_write_8(attrib->offset_meq_128[1].mask[i], + rest); + for (i = 8; i < 16; i++) + rest = ipa_write_8(attrib->offset_meq_128[1].value[i], + rest); + num_offset_meq_128--; + } + + if (num_offset_meq_32) { + extra = ipa_write_8(attrib->offset_meq_32[0].offset, extra); + rest = ipa_write_32(attrib->offset_meq_32[0].mask, rest); + rest = ipa_write_32(attrib->offset_meq_32[0].value, rest); + num_offset_meq_32--; + } + + if (num_offset_meq_32) { + extra = ipa_write_8(attrib->offset_meq_32[1].offset, extra); + rest = ipa_write_32(attrib->offset_meq_32[1].mask, rest); + rest = ipa_write_32(attrib->offset_meq_32[1].value, rest); + num_offset_meq_32--; + } + + if (num_ihl_offset_meq_32) { + extra = ipa_write_8(attrib->ihl_offset_meq_32[0].offset, + extra); + + rest = ipa_write_32(attrib->ihl_offset_meq_32[0].mask, rest); + rest = ipa_write_32(attrib->ihl_offset_meq_32[0].value, rest); + num_ihl_offset_meq_32--; + } + + if (num_ihl_offset_meq_32) { + extra = ipa_write_8(attrib->ihl_offset_meq_32[1].offset, + extra); + + rest = ipa_write_32(attrib->ihl_offset_meq_32[1].mask, rest); + rest = ipa_write_32(attrib->ihl_offset_meq_32[1].value, rest); + num_ihl_offset_meq_32--; + } + + if (attrib->metadata_meq32_present) { + rest = ipa_write_32(attrib->metadata_meq32.mask, rest); + rest = ipa_write_32(attrib->metadata_meq32.value, rest); + } + + if (num_ihl_offset_range_16) { + extra = ipa_write_8(attrib->ihl_offset_range_16[0].offset, + extra); + + rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_high, + rest); + rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_low, + rest); + num_ihl_offset_range_16--; + } + + if (num_ihl_offset_range_16) { + extra = ipa_write_8(attrib->ihl_offset_range_16[1].offset, + extra); + + rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_high, + rest); + rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_low, + rest); + num_ihl_offset_range_16--; + } + + if (attrib->ihl_offset_eq_32_present) { + extra = ipa_write_8(attrib->ihl_offset_eq_32.offset, extra); + rest = ipa_write_32(attrib->ihl_offset_eq_32.value, rest); + } + + if (attrib->ihl_offset_eq_16_present) { + extra = ipa_write_8(attrib->ihl_offset_eq_16.offset, extra); + rest = ipa_write_16(attrib->ihl_offset_eq_16.value, rest); + rest = ipa_write_16(0, rest); + } + + if (attrib->fl_eq_present) + rest = ipa_write_32(attrib->fl_eq & 0xFFFFF, rest); + + extra = ipa_pad_to_64(extra); + rest = ipa_pad_to_64(rest); + *buf = rest; + + return 0; +} + +static void ipa_flt_generate_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb, + u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN], + const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128) +{ + int i; + + eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset; + + /* LSB MASK and ADDR */ + memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 8); + memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 8); + + /* MSB MASK and ADDR */ + memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 2); + for (i = 0; i <= 5; i++) + eq_atrb->offset_meq_128[ofst_meq128].mask[15 - i] = + mac_addr_mask[i]; + + memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 2); + for (i = 0; i <= 5; i++) + eq_atrb->offset_meq_128[ofst_meq128].value[15 - i] = + mac_addr[i]; +} + +static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb) +{ + u8 ofst_meq32 = 0; + u8 ihl_ofst_rng16 = 0; + u8 ihl_ofst_meq32 = 0; + u8 ofst_meq128 = 0; + u16 eq_bitmap = 0; + u16 *en_rule = &eq_bitmap; + bool tos_done = false; + + if (attrib->attrib_mask & IPA_FLT_IS_PURE_ACK) { + if (!IPA_IS_RULE_EQ_VALID(IPA_IS_PURE_ACK)) { + IPAHAL_ERR("is_pure_ack eq not supported\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_PURE_ACK); + /* + * Starting IPA 4.5, where PURE ACK equation supported + * and TOS equation support removed, field tos_eq_present + * represent pure_ack presence. + */ + eq_atrb->tos_eq_present = 1; + eq_atrb->tos_eq = 0; + } + + if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) { + if (!IPA_IS_RULE_EQ_VALID(IPA_TOS_EQ)) { + IPAHAL_DBG("tos eq not supported\n"); + } else { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ); + eq_atrb->tos_eq_present = 1; + eq_atrb->tos_eq = attrib->u.v4.tos; + } + } + + if (attrib->attrib_mask & IPA_FLT_PROTOCOL) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ); + eq_atrb->protocol_eq_present = 1; + eq_atrb->protocol_eq = attrib->u.v4.protocol; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -14, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -8, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -22, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -16, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ( + ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]); + /* populate the first ihl meq 32 eq */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 8; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + (attrib->dst_mac_addr_mask[3] & 0xFF) | + ((attrib->dst_mac_addr_mask[2] << 8) & 0xFF00) | + ((attrib->dst_mac_addr_mask[1] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr_mask[0] << 24) & 0xFF000000); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + (attrib->dst_mac_addr[3] & 0xFF) | + ((attrib->dst_mac_addr[2] << 8) & 0xFF00) | + ((attrib->dst_mac_addr[1] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr[0] << 24) & 0xFF000000); + /* populate the second ihl meq 32 eq */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].offset = 12; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].mask = + ((attrib->dst_mac_addr_mask[5] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr_mask[4] << 24) & 0xFF000000); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].value = + ((attrib->dst_mac_addr[5] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr[4] << 24) & 0xFF000000); + ihl_ofst_meq32 += 2; + } + + if (attrib->attrib_mask & IPA_FLT_TCP_SYN) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 12 => offset of SYN after v4 header */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 12; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0x20000; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = 0x20000; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + eq_atrb->offset_meq_32[ofst_meq32].offset = 0; + eq_atrb->offset_meq_32[ofst_meq32].mask = + attrib->tos_mask << 16; + eq_atrb->offset_meq_32[ofst_meq32].value = + attrib->tos_value << 16; + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + eq_atrb->offset_meq_32[ofst_meq32].offset = 12; + eq_atrb->offset_meq_32[ofst_meq32].mask = + attrib->u.v4.src_addr_mask; + eq_atrb->offset_meq_32[ofst_meq32].value = + attrib->u.v4.src_addr; + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + eq_atrb->offset_meq_32[ofst_meq32].offset = 16; + eq_atrb->offset_meq_32[ofst_meq32].mask = + attrib->u.v4.dst_addr_mask; + eq_atrb->offset_meq_32[ofst_meq32].value = + attrib->u.v4.dst_addr; + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + eq_atrb->offset_meq_32[ofst_meq32].offset = -2; + eq_atrb->offset_meq_32[ofst_meq32].mask = + htons(attrib->ether_type); + eq_atrb->offset_meq_32[ofst_meq32].value = + htons(attrib->ether_type); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_DBG("ran out of meq32 eq\n"); + } else { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + /* + * offset 0 => Take the first word. + * offset of TOS in v4 header is 1 + */ + eq_atrb->offset_meq_32[ofst_meq32].offset = 0; + eq_atrb->offset_meq_32[ofst_meq32].mask = + 0xFF << 16; + eq_atrb->offset_meq_32[ofst_meq32].value = + attrib->u.v4.tos << 16; + ofst_meq32++; + tos_done = true; + } + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->type; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->code; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0xFFFFFFFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->spi; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_DBG("ran out of ihl_meq32 eq\n"); + } else { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* + * 0 => Take the first word. offset of TOS in + * v4 header is 1. MSB bit asserted at IHL means + * to ignore packet IHL and do offset inside IPA header + */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = + 0x80; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0xFF << 16; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->u.v4.tos << 16; + ihl_ofst_meq32++; + tos_done = true; + } + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + IPA_METADATA_COMPARE); + eq_atrb->metadata_meq32_present = 1; + eq_atrb->metadata_meq32.offset = 0; + eq_atrb->metadata_meq32.mask = attrib->meta_data_mask; + eq_atrb->metadata_meq32.value = attrib->meta_data; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAHAL_ERR("bad src port range param\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAHAL_ERR("bad dst port range param\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG); + eq_atrb->ipv4_frag_eq_present = 1; + } + + if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) { + IPAHAL_ERR("could not find equation for tos\n"); + return -EPERM; + } + + eq_atrb->rule_eq_bitmap = *en_rule; + eq_atrb->num_offset_meq_32 = ofst_meq32; + eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16; + eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32; + eq_atrb->num_offset_meq_128 = ofst_meq128; + + return 0; +} + +static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb) +{ + u8 ofst_meq32 = 0; + u8 ihl_ofst_rng16 = 0; + u8 ihl_ofst_meq32 = 0; + u8 ofst_meq128 = 0; + u16 eq_bitmap = 0; + u16 *en_rule = &eq_bitmap; + + if (attrib->attrib_mask & IPA_FLT_IS_PURE_ACK) { + if (!IPA_IS_RULE_EQ_VALID(IPA_IS_PURE_ACK)) { + IPAHAL_ERR("is_pure_ack eq not supported\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_PURE_ACK); + /* + * Starting IPA 4.5, where PURE ACK equation supported + * and TOS equation support removed, field tos_eq_present + * represent pure_ack presenence. + */ + eq_atrb->tos_eq_present = 1; + eq_atrb->tos_eq = 0; + } + + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + IPA_PROTOCOL_EQ); + eq_atrb->protocol_eq_present = 1; + eq_atrb->protocol_eq = attrib->u.v6.next_hdr; + } + + if (attrib->attrib_mask & IPA_FLT_TC) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + IPA_TC_EQ); + eq_atrb->tc_eq_present = 1; + eq_atrb->tc_eq = attrib->u.v6.tc; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + /* use the same word order as in ipa v2 */ + eq_atrb->offset_meq_128[ofst_meq128].offset = 8; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0) + = attrib->u.v6.src_addr_mask[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4) + = attrib->u.v6.src_addr_mask[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8) + = attrib->u.v6.src_addr_mask[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) + = attrib->u.v6.src_addr_mask[3]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0) + = attrib->u.v6.src_addr[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4) + = attrib->u.v6.src_addr[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8) + = attrib->u.v6.src_addr[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + + 12) = attrib->u.v6.src_addr[3]; + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + eq_atrb->offset_meq_128[ofst_meq128].offset = 24; + /* use the same word order as in ipa v2 */ + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0) + = attrib->u.v6.dst_addr_mask[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4) + = attrib->u.v6.dst_addr_mask[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8) + = attrib->u.v6.dst_addr_mask[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) + = attrib->u.v6.dst_addr_mask[3]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0) + = attrib->u.v6.dst_addr[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4) + = attrib->u.v6.dst_addr[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8) + = attrib->u.v6.dst_addr[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + + 12) = attrib->u.v6.dst_addr[3]; + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + eq_atrb->offset_meq_128[ofst_meq128].offset = 0; + memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 12); + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) + = attrib->tos_mask << 20; + memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 12); + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + + 12) = attrib->tos_value << 20; + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -14, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -8, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -22, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -16, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ( + ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) { + IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]); + /* populate the first ihl meq 32 eq */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 8; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + (attrib->dst_mac_addr_mask[3] & 0xFF) | + ((attrib->dst_mac_addr_mask[2] << 8) & 0xFF00) | + ((attrib->dst_mac_addr_mask[1] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr_mask[0] << 24) & 0xFF000000); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + (attrib->dst_mac_addr[3] & 0xFF) | + ((attrib->dst_mac_addr[2] << 8) & 0xFF00) | + ((attrib->dst_mac_addr[1] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr[0] << 24) & 0xFF000000); + /* populate the second ihl meq 32 eq */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].offset = 12; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].mask = + ((attrib->dst_mac_addr_mask[5] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr_mask[4] << 24) & 0xFF000000); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].value = + ((attrib->dst_mac_addr[5] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr[4] << 24) & 0xFF000000); + ihl_ofst_meq32 += 2; + } + + if (attrib->attrib_mask & IPA_FLT_TCP_SYN) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 12 => offset of SYN after v4 header */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 12; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0x20000; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = 0x20000; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ( + ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) { + IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]); + + /* populate TCP protocol eq */ + if (attrib->ether_type == 0x0800) { + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 30; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0xFF0000; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + 0x60000; + } else { + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 26; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0xFF00; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + 0x600; + } + + /* populate TCP SYN eq */ + if (attrib->ether_type == 0x0800) { + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 54; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0x20000; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + 0x20000; + } else { + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 74; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0x20000; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + 0x20000; + } + ihl_ofst_meq32 += 2; + } + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 22 => offset of inner IP type after v6 header */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 22; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0xF0000000; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + (u32)attrib->type << 24; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 38 => offset of inner IPv4 addr */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 38; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + attrib->u.v4.dst_addr_mask; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->u.v4.dst_addr; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR_RL("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + eq_atrb->offset_meq_32[ofst_meq32].offset = -2; + eq_atrb->offset_meq_32[ofst_meq32].mask = + htons(attrib->ether_type); + eq_atrb->offset_meq_32[ofst_meq32].value = + htons(attrib->ether_type); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->type; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->code; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0xFFFFFFFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->spi; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + IPA_METADATA_COMPARE); + eq_atrb->metadata_meq32_present = 1; + eq_atrb->metadata_meq32.offset = 0; + eq_atrb->metadata_meq32.mask = attrib->meta_data_mask; + eq_atrb->metadata_meq32.value = attrib->meta_data; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAHAL_ERR_RL("bad src port range param\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAHAL_ERR_RL("bad dst port range param\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + if (attrib->ether_type == 0x0800) { + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset + = 21; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = 0x0045; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = 0x0045; + } else { + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = + 20; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->ether_type; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->ether_type; + } + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + if (attrib->ether_type == 0x0800) { + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset + = 21; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = 0x0045; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = 0x0045; + } else { + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = + 20; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->ether_type; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->ether_type; + } + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ); + eq_atrb->fl_eq_present = 1; + eq_atrb->fl_eq = attrib->u.v6.flow_label; + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + IPA_IS_FRAG); + eq_atrb->ipv4_frag_eq_present = 1; + } + + eq_atrb->rule_eq_bitmap = *en_rule; + eq_atrb->num_offset_meq_32 = ofst_meq32; + eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16; + eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32; + eq_atrb->num_offset_meq_128 = ofst_meq128; + + return 0; +} + +static int ipa_fltrt_parse_hw_rule_eq(u8 *addr, u32 hdr_sz, + struct ipa_ipfltri_rule_eq *atrb, u32 *rule_size) +{ + u16 eq_bitmap; + int extra_bytes; + u8 *extra; + u8 *rest; + int i; + u8 dummy_extra_wrd; + + if (!addr || !atrb || !rule_size) { + IPAHAL_ERR("Input error: addr=%pK atrb=%pK rule_size=%pK\n", + addr, atrb, rule_size); + return -EINVAL; + } + + eq_bitmap = atrb->rule_eq_bitmap; + + IPAHAL_DBG_LOW("eq_bitmap=0x%x\n", eq_bitmap); + + if (IPA_IS_RULE_EQ_VALID(IPA_IS_PURE_ACK) && + (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_PURE_ACK))) { + /* + * tos_eq_present field represents pure_ack when pure + * ack equation valid (started IPA 4.5). In this case + * tos equation should not be supported. + */ + atrb->tos_eq_present = true; + } + if (IPA_IS_RULE_EQ_VALID(IPA_TOS_EQ) && + (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ))) { + atrb->tos_eq_present = true; + } + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ)) + atrb->protocol_eq_present = true; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ)) + atrb->tc_eq_present = true; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_0)) + atrb->num_offset_meq_128++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_1)) + atrb->num_offset_meq_128++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_0)) + atrb->num_offset_meq_32++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_1)) + atrb->num_offset_meq_32++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_0)) + atrb->num_ihl_offset_meq_32++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_1)) + atrb->num_ihl_offset_meq_32++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE)) + atrb->metadata_meq32_present = true; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_0)) + atrb->num_ihl_offset_range_16++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_1)) + atrb->num_ihl_offset_range_16++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_32)) + atrb->ihl_offset_eq_32_present = true; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_16)) + atrb->ihl_offset_eq_16_present = true; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ)) + atrb->fl_eq_present = true; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG)) + atrb->ipv4_frag_eq_present = true; + + extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(atrb); + /* only 3 eq does not have extra word param, 13 out of 16 is the number + * of equations that needs extra word param + */ + if (extra_bytes > 13) { + IPAHAL_ERR("too much extra bytes\n"); + return -EPERM; + } else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) { + /* two extra words */ + extra = addr + hdr_sz; + rest = extra + IPA3_0_HW_TBL_HDR_WIDTH * 2; + } else if (extra_bytes > 0) { + /* single extra word */ + extra = addr + hdr_sz; + rest = extra + IPA3_0_HW_TBL_HDR_WIDTH; + } else { + /* no extra words */ + dummy_extra_wrd = 0; + extra = &dummy_extra_wrd; + rest = addr + hdr_sz; + } + IPAHAL_DBG_LOW("addr=0x%pK extra=0x%pK rest=0x%pK\n", + addr, extra, rest); + + if (IPA_IS_RULE_EQ_VALID(IPA_TOS_EQ) && atrb->tos_eq_present) + atrb->tos_eq = *extra++; + if (IPA_IS_RULE_EQ_VALID(IPA_IS_PURE_ACK) && atrb->tos_eq_present) { + atrb->tos_eq = 0; + extra++; + } + if (atrb->protocol_eq_present) + atrb->protocol_eq = *extra++; + if (atrb->tc_eq_present) + atrb->tc_eq = *extra++; + + if (atrb->num_offset_meq_128 > 0) { + atrb->offset_meq_128[0].offset = *extra++; + for (i = 0; i < 8; i++) + atrb->offset_meq_128[0].mask[i] = *rest++; + for (i = 0; i < 8; i++) + atrb->offset_meq_128[0].value[i] = *rest++; + for (i = 8; i < 16; i++) + atrb->offset_meq_128[0].mask[i] = *rest++; + for (i = 8; i < 16; i++) + atrb->offset_meq_128[0].value[i] = *rest++; + } + if (atrb->num_offset_meq_128 > 1) { + atrb->offset_meq_128[1].offset = *extra++; + for (i = 0; i < 8; i++) + atrb->offset_meq_128[1].mask[i] = *rest++; + for (i = 0; i < 8; i++) + atrb->offset_meq_128[1].value[i] = *rest++; + for (i = 8; i < 16; i++) + atrb->offset_meq_128[1].mask[i] = *rest++; + for (i = 8; i < 16; i++) + atrb->offset_meq_128[1].value[i] = *rest++; + } + + if (atrb->num_offset_meq_32 > 0) { + atrb->offset_meq_32[0].offset = *extra++; + atrb->offset_meq_32[0].mask = *((u32 *)rest); + rest += 4; + atrb->offset_meq_32[0].value = *((u32 *)rest); + rest += 4; + } + if (atrb->num_offset_meq_32 > 1) { + atrb->offset_meq_32[1].offset = *extra++; + atrb->offset_meq_32[1].mask = *((u32 *)rest); + rest += 4; + atrb->offset_meq_32[1].value = *((u32 *)rest); + rest += 4; + } + + if (atrb->num_ihl_offset_meq_32 > 0) { + atrb->ihl_offset_meq_32[0].offset = *extra++; + atrb->ihl_offset_meq_32[0].mask = *((u32 *)rest); + rest += 4; + atrb->ihl_offset_meq_32[0].value = *((u32 *)rest); + rest += 4; + } + if (atrb->num_ihl_offset_meq_32 > 1) { + atrb->ihl_offset_meq_32[1].offset = *extra++; + atrb->ihl_offset_meq_32[1].mask = *((u32 *)rest); + rest += 4; + atrb->ihl_offset_meq_32[1].value = *((u32 *)rest); + rest += 4; + } + + if (atrb->metadata_meq32_present) { + atrb->metadata_meq32.mask = *((u32 *)rest); + rest += 4; + atrb->metadata_meq32.value = *((u32 *)rest); + rest += 4; + } + + if (atrb->num_ihl_offset_range_16 > 0) { + atrb->ihl_offset_range_16[0].offset = *extra++; + atrb->ihl_offset_range_16[0].range_high = *((u16 *)rest); + rest += 2; + atrb->ihl_offset_range_16[0].range_low = *((u16 *)rest); + rest += 2; + } + if (atrb->num_ihl_offset_range_16 > 1) { + atrb->ihl_offset_range_16[1].offset = *extra++; + atrb->ihl_offset_range_16[1].range_high = *((u16 *)rest); + rest += 2; + atrb->ihl_offset_range_16[1].range_low = *((u16 *)rest); + rest += 2; + } + + if (atrb->ihl_offset_eq_32_present) { + atrb->ihl_offset_eq_32.offset = *extra++; + atrb->ihl_offset_eq_32.value = *((u32 *)rest); + rest += 4; + } + + if (atrb->ihl_offset_eq_16_present) { + atrb->ihl_offset_eq_16.offset = *extra++; + atrb->ihl_offset_eq_16.value = *((u16 *)rest); + rest += 4; + } + + if (atrb->fl_eq_present) { + atrb->fl_eq = *((u32 *)rest); + atrb->fl_eq &= 0xfffff; + rest += 4; + } + + IPAHAL_DBG_LOW("before rule alignment rest=0x%pK\n", rest); + rest = (u8 *)(((unsigned long)rest + IPA3_0_HW_RULE_START_ALIGNMENT) & + ~IPA3_0_HW_RULE_START_ALIGNMENT); + IPAHAL_DBG_LOW("after rule alignment rest=0x%pK\n", rest); + + *rule_size = rest - addr; + IPAHAL_DBG_LOW("rule_size=0x%x\n", *rule_size); + + return 0; +} + +static int ipa_rt_parse_hw_rule(u8 *addr, struct ipahal_rt_rule_entry *rule) +{ + struct ipa3_0_rt_rule_hw_hdr *rule_hdr; + struct ipa_ipfltri_rule_eq *atrb; + + IPAHAL_DBG_LOW("Entry\n"); + + rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)addr; + atrb = &rule->eq_attrib; + + IPAHAL_DBG_LOW("read hdr 0x%llx\n", rule_hdr->u.word); + + if (rule_hdr->u.word == 0) { + /* table terminator - empty table */ + rule->rule_size = 0; + return 0; + } + + rule->dst_pipe_idx = rule_hdr->u.hdr.pipe_dest_idx; + if (rule_hdr->u.hdr.proc_ctx) { + rule->hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX; + rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 5; + } else { + rule->hdr_type = IPAHAL_RT_RULE_HDR_RAW; + rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 2; + } + rule->hdr_lcl = !rule_hdr->u.hdr.system; + + rule->priority = rule_hdr->u.hdr.priority; + rule->retain_hdr = rule_hdr->u.hdr.retain_hdr; + rule->id = rule_hdr->u.hdr.rule_id; + + atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule; + return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr), + atrb, &rule->rule_size); +} + +static int ipa_flt_parse_hw_rule(u8 *addr, struct ipahal_flt_rule_entry *rule) +{ + struct ipa3_0_flt_rule_hw_hdr *rule_hdr; + struct ipa_ipfltri_rule_eq *atrb; + + IPAHAL_DBG_LOW("Entry\n"); + + rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)addr; + atrb = &rule->rule.eq_attrib; + + if (rule_hdr->u.word == 0) { + /* table termintator - empty table */ + rule->rule_size = 0; + return 0; + } + + switch (rule_hdr->u.hdr.action) { + case 0x0: + rule->rule.action = IPA_PASS_TO_ROUTING; + break; + case 0x1: + rule->rule.action = IPA_PASS_TO_SRC_NAT; + break; + case 0x2: + rule->rule.action = IPA_PASS_TO_DST_NAT; + break; + case 0x3: + rule->rule.action = IPA_PASS_TO_EXCEPTION; + break; + default: + IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action); + WARN_ON_RATELIMIT_IPA(1); + rule->rule.action = rule_hdr->u.hdr.action; + } + + rule->rule.rt_tbl_idx = rule_hdr->u.hdr.rt_tbl_idx; + rule->rule.retain_hdr = rule_hdr->u.hdr.retain_hdr; + rule->priority = rule_hdr->u.hdr.priority; + rule->id = rule_hdr->u.hdr.rule_id; + + atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule; + rule->rule.eq_attrib_type = 1; + return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr), + atrb, &rule->rule_size); +} + +static int ipa_flt_parse_hw_rule_ipav4(u8 *addr, + struct ipahal_flt_rule_entry *rule) +{ + struct ipa4_0_flt_rule_hw_hdr *rule_hdr; + struct ipa_ipfltri_rule_eq *atrb; + + IPAHAL_DBG_LOW("Entry\n"); + + rule_hdr = (struct ipa4_0_flt_rule_hw_hdr *)addr; + atrb = &rule->rule.eq_attrib; + + if (rule_hdr->u.word == 0) { + /* table termintator - empty table */ + rule->rule_size = 0; + return 0; + } + + switch (rule_hdr->u.hdr.action) { + case 0x0: + rule->rule.action = IPA_PASS_TO_ROUTING; + break; + case 0x1: + rule->rule.action = IPA_PASS_TO_SRC_NAT; + break; + case 0x2: + rule->rule.action = IPA_PASS_TO_DST_NAT; + break; + case 0x3: + rule->rule.action = IPA_PASS_TO_EXCEPTION; + break; + default: + IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action); + WARN_ON_RATELIMIT_IPA(1); + rule->rule.action = rule_hdr->u.hdr.action; + } + + rule->rule.rt_tbl_idx = rule_hdr->u.hdr.rt_tbl_idx; + rule->rule.retain_hdr = rule_hdr->u.hdr.retain_hdr; + rule->priority = rule_hdr->u.hdr.priority; + rule->id = rule_hdr->u.hdr.rule_id; + rule->rule.pdn_idx = rule_hdr->u.hdr.pdn_idx; + rule->rule.set_metadata = rule_hdr->u.hdr.set_metadata; + + atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule; + rule->rule.eq_attrib_type = 1; + return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr), + atrb, &rule->rule_size); +} + +/* + * ipahal_fltrt_init() - Build the FLT/RT information table + * See ipahal_fltrt_objs[] comments + * + * Note: As global variables are initialized with zero, any un-overridden + * register entry will be zero. By this we recognize them. + */ +int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type) +{ + struct ipahal_fltrt_obj zero_obj; + int i; + struct ipa_mem_buffer *mem; + int rc = -EFAULT; + u32 eq_bits; + u8 *eq_bitfield; + + IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type); + + if (ipa_hw_type >= IPA_HW_MAX) { + IPAHAL_ERR("Invalid H/W type\n"); + return -EFAULT; + } + + memset(&zero_obj, 0, sizeof(zero_obj)); + for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) { + if (!memcmp(&ipahal_fltrt_objs[i+1], &zero_obj, + sizeof(struct ipahal_fltrt_obj))) { + memcpy(&ipahal_fltrt_objs[i+1], + &ipahal_fltrt_objs[i], + sizeof(struct ipahal_fltrt_obj)); + } else { + /* + * explicitly overridden FLT RT info + * Check validity + */ + if (!ipahal_fltrt_objs[i+1].tbl_width) { + IPAHAL_ERR( + "Zero tbl width ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].sysaddr_alignment) { + IPAHAL_ERR( + "No tbl sysaddr alignment ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].lcladdr_alignment) { + IPAHAL_ERR( + "No tbl lcladdr alignment ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].blk_sz_alignment) { + IPAHAL_ERR( + "No blk sz alignment ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].rule_start_alignment) { + IPAHAL_ERR( + "No rule start alignment ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].tbl_hdr_width) { + IPAHAL_ERR( + "Zero tbl hdr width ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].tbl_addr_mask) { + IPAHAL_ERR( + "Zero tbl hdr width ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (ipahal_fltrt_objs[i+1].rule_id_bit_len < 2) { + IPAHAL_ERR( + "Too little bits for rule_id ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].rule_buf_size) { + IPAHAL_ERR( + "zero rule buf size ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].write_val_to_hdr) { + IPAHAL_ERR( + "No write_val_to_hdr CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].create_flt_bitmap) { + IPAHAL_ERR( + "No create_flt_bitmap CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].create_tbl_addr) { + IPAHAL_ERR( + "No create_tbl_addr CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].parse_tbl_addr) { + IPAHAL_ERR( + "No parse_tbl_addr CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].rt_generate_hw_rule) { + IPAHAL_ERR( + "No rt_generate_hw_rule CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].flt_generate_hw_rule) { + IPAHAL_ERR( + "No flt_generate_hw_rule CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].flt_generate_eq) { + IPAHAL_ERR( + "No flt_generate_eq CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].rt_parse_hw_rule) { + IPAHAL_ERR( + "No rt_parse_hw_rule CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].flt_parse_hw_rule) { + IPAHAL_ERR( + "No flt_parse_hw_rule CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + } + } + + eq_bits = 0; + eq_bitfield = ipahal_fltrt_objs[ipa_hw_type].eq_bitfield; + for (i = 0; i < IPA_EQ_MAX; i++) { + if (!IPA_IS_RULE_EQ_VALID(i)) + continue; + + if (eq_bits & IPA_GET_RULE_EQ_BIT_PTRN(eq_bitfield[i])) { + IPAHAL_ERR("more than eq with same bit. eq=%d\n", i); + WARN_ON(1); + return -EFAULT; + } + eq_bits |= IPA_GET_RULE_EQ_BIT_PTRN(eq_bitfield[i]); + } + + mem = &ipahal_ctx->empty_fltrt_tbl; + + /* setup an empty table in system memory; This will + * be used, for example, to delete a rt tbl safely + */ + mem->size = ipahal_fltrt_objs[ipa_hw_type].tbl_width; + mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAHAL_ERR("DMA buff alloc fail %d bytes for empty tbl\n", + mem->size); + return -ENOMEM; + } + + if (mem->phys_base & + ipahal_fltrt_objs[ipa_hw_type].sysaddr_alignment) { + IPAHAL_ERR("Empty table buf is not address aligned 0x%pad\n", + &mem->phys_base); + rc = -EFAULT; + goto clear_empty_tbl; + } + + memset(mem->base, 0, mem->size); + IPAHAL_DBG("empty table allocated in system memory"); + + return 0; + +clear_empty_tbl: + dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base, + mem->phys_base); + return rc; +} + +void ipahal_fltrt_destroy(void) +{ + IPAHAL_DBG("Entry\n"); + + if (ipahal_ctx && ipahal_ctx->empty_fltrt_tbl.base) + dma_free_coherent(ipahal_ctx->ipa_pdev, + ipahal_ctx->empty_fltrt_tbl.size, + ipahal_ctx->empty_fltrt_tbl.base, + ipahal_ctx->empty_fltrt_tbl.phys_base); +} + +/* Get the H/W table (flt/rt) header width */ +u32 ipahal_get_hw_tbl_hdr_width(void) +{ + return ipahal_fltrt_objs[ipahal_ctx->hw_type].tbl_hdr_width; +} + +/* Get the H/W local table (SRAM) address alignment + * Tables headers references to local tables via offsets in SRAM + * This function return the alignment of the offset that IPA expects + */ +u32 ipahal_get_lcl_tbl_addr_alignment(void) +{ + return ipahal_fltrt_objs[ipahal_ctx->hw_type].lcladdr_alignment; +} + +/* + * Rule priority is used to distinguish rules order + * at the integrated table consisting from hashable and + * non-hashable tables. Max priority are rules that once are + * scanned by IPA, IPA will not look for further rules and use it. + */ +int ipahal_get_rule_max_priority(void) +{ + return ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_max_prio; +} + +/* Given a priority, calc and return the next lower one if it is in + * legal range. + */ +int ipahal_rule_decrease_priority(int *prio) +{ + struct ipahal_fltrt_obj *obj; + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (!prio) { + IPAHAL_ERR("Invalid Input\n"); + return -EINVAL; + } + + /* Priority logic is reverse. 0 priority considred max priority */ + if (*prio > obj->rule_min_prio || *prio < obj->rule_max_prio) { + IPAHAL_ERR("Invalid given priority %d\n", *prio); + return -EINVAL; + } + + *prio += 1; + + if (*prio > obj->rule_min_prio) { + IPAHAL_ERR("Cannot decrease priority. Already on min\n"); + *prio -= 1; + return -EFAULT; + } + + return 0; +} + +/* Does the given ID represents rule miss? + * Rule miss ID, is always the max ID possible in the bit-pattern + */ +bool ipahal_is_rule_miss_id(u32 id) +{ + return (id == + ((1U << ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len) + -1)); +} + +/* Get rule ID with high bit only asserted + * Used e.g. to create groups of IDs according to this bit + */ +u32 ipahal_get_rule_id_hi_bit(void) +{ + return BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len - 1); +} + +/* Get the low value possible to be used for rule-id */ +u32 ipahal_get_low_rule_id(void) +{ + return ipahal_fltrt_objs[ipahal_ctx->hw_type].low_rule_id; +} + +/* + * ipahal_rt_generate_empty_img() - Generate empty route image + * Creates routing header buffer for the given tables number. + * For each table, make it point to the empty table on DDR. + * @tbls_num: Number of tables. For each will have an entry in the header + * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check + * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check + * @mem: mem object that points to DMA mem representing the hdr structure + * @atomic: should DMA allocation be executed with atomic flag + */ +int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size, + u32 nhash_hdr_size, struct ipa_mem_buffer *mem, bool atomic) +{ + int i; + u64 addr; + struct ipahal_fltrt_obj *obj; + int flag; + + IPAHAL_DBG("Entry\n"); + + flag = atomic ? GFP_ATOMIC : GFP_KERNEL; + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (!tbls_num || !nhash_hdr_size || !mem) { + IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%pK\n", + tbls_num, nhash_hdr_size, mem); + return -EINVAL; + } + if (obj->support_hash && !hash_hdr_size) { + IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size); + return -EINVAL; + } + + if (nhash_hdr_size < (tbls_num * obj->tbl_hdr_width)) { + IPAHAL_ERR("No enough spc at non-hash hdr blk for all tbls\n"); + WARN_ON(1); + return -EINVAL; + } + if (obj->support_hash && + (hash_hdr_size < (tbls_num * obj->tbl_hdr_width))) { + IPAHAL_ERR("No enough spc at hash hdr blk for all tbls\n"); + WARN_ON(1); + return -EINVAL; + } + + mem->size = tbls_num * obj->tbl_hdr_width; + mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size, + &mem->phys_base, flag); + if (!mem->base) { + IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size); + return -ENOMEM; + } + + addr = obj->create_tbl_addr(true, + ipahal_ctx->empty_fltrt_tbl.phys_base); + for (i = 0; i < tbls_num; i++) + obj->write_val_to_hdr(addr, + mem->base + i * obj->tbl_hdr_width); + + return 0; +} + +/* + * ipahal_flt_generate_empty_img() - Generate empty filter image + * Creates filter header buffer for the given tables number. + * For each table, make it point to the empty table on DDR. + * @tbls_num: Number of tables. For each will have an entry in the header + * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check + * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check + * @ep_bitmap: Bitmap representing the EP that has flt tables. The format + * should be: bit0->EP0, bit1->EP1 + * If bitmap is zero -> create tbl without bitmap entry + * @mem: mem object that points to DMA mem representing the hdr structure + * @atomic: should DMA allocation be executed with atomic flag + */ +int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size, + u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem, + bool atomic) +{ + int flt_spc; + u64 flt_bitmap; + int i; + u64 addr; + struct ipahal_fltrt_obj *obj; + int flag; + + IPAHAL_DBG("Entry - ep_bitmap 0x%llx\n", ep_bitmap); + + flag = atomic ? GFP_ATOMIC : GFP_KERNEL; + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (!tbls_num || !nhash_hdr_size || !mem) { + IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%pK\n", + tbls_num, nhash_hdr_size, mem); + return -EINVAL; + } + if (obj->support_hash && !hash_hdr_size) { + IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size); + return -EINVAL; + } + + if (obj->support_hash) { + flt_spc = hash_hdr_size; + /* bitmap word */ + if (ep_bitmap) + flt_spc -= obj->tbl_hdr_width; + flt_spc /= obj->tbl_hdr_width; + if (tbls_num > flt_spc) { + IPAHAL_ERR("space for hash flt hdr is too small\n"); + WARN_ON(1); + return -EPERM; + } + } + + flt_spc = nhash_hdr_size; + /* bitmap word */ + if (ep_bitmap) + flt_spc -= obj->tbl_hdr_width; + flt_spc /= obj->tbl_hdr_width; + if (tbls_num > flt_spc) { + IPAHAL_ERR("space for non-hash flt hdr is too small\n"); + WARN_ON(1); + return -EPERM; + } + + mem->size = tbls_num * obj->tbl_hdr_width; + if (ep_bitmap) + mem->size += obj->tbl_hdr_width; + mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size, + &mem->phys_base, flag); + if (!mem->base) { + IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size); + return -ENOMEM; + } + + if (ep_bitmap) { + flt_bitmap = obj->create_flt_bitmap(ep_bitmap); + IPAHAL_DBG("flt bitmap 0x%llx\n", flt_bitmap); + obj->write_val_to_hdr(flt_bitmap, mem->base); + } + + addr = obj->create_tbl_addr(true, + ipahal_ctx->empty_fltrt_tbl.phys_base); + + if (ep_bitmap) { + for (i = 1; i <= tbls_num; i++) + obj->write_val_to_hdr(addr, + mem->base + i * obj->tbl_hdr_width); + } else { + for (i = 0; i < tbls_num; i++) + obj->write_val_to_hdr(addr, + mem->base + i * obj->tbl_hdr_width); + } + + return 0; +} + +/* + * ipa_fltrt_alloc_init_tbl_hdr() - allocate and initialize buffers for + * flt/rt tables headers to be filled into sram. Init each table to point + * to empty system table + * @params: Allocate IN and OUT params + * + * Return: 0 on success, negative on failure + */ +static int ipa_fltrt_alloc_init_tbl_hdr( + struct ipahal_fltrt_alloc_imgs_params *params) +{ + u64 addr; + int i; + struct ipahal_fltrt_obj *obj; + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (!params) { + IPAHAL_ERR_RL("Input error: params=%pK\n", params); + return -EINVAL; + } + + params->nhash_hdr.size = params->tbls_num * obj->tbl_hdr_width; + params->nhash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, + params->nhash_hdr.size, + ¶ms->nhash_hdr.phys_base, GFP_KERNEL); + if (!params->nhash_hdr.base) { + IPAHAL_ERR_RL("fail to alloc DMA buff of size %d\n", + params->nhash_hdr.size); + goto nhash_alloc_fail; + } + + if (obj->support_hash) { + params->hash_hdr.size = params->tbls_num * obj->tbl_hdr_width; + params->hash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, + params->hash_hdr.size, ¶ms->hash_hdr.phys_base, + GFP_KERNEL); + if (!params->hash_hdr.base) { + IPAHAL_ERR_RL("fail to alloc DMA buff of size %d\n", + params->hash_hdr.size); + goto hash_alloc_fail; + } + } + + addr = obj->create_tbl_addr(true, + ipahal_ctx->empty_fltrt_tbl.phys_base); + for (i = 0; i < params->tbls_num; i++) { + obj->write_val_to_hdr(addr, + params->nhash_hdr.base + i * obj->tbl_hdr_width); + if (obj->support_hash) + obj->write_val_to_hdr(addr, + params->hash_hdr.base + + i * obj->tbl_hdr_width); + } + + return 0; + +hash_alloc_fail: + ipahal_free_dma_mem(¶ms->nhash_hdr); +nhash_alloc_fail: + return -ENOMEM; +} + +/* + * ipa_fltrt_alloc_lcl_bdy() - allocate and initialize buffers for + * local flt/rt tables bodies to be filled into sram + * @params: Allocate IN and OUT params + * + * Return: 0 on success, negative on failure + */ +static int ipa_fltrt_alloc_lcl_bdy( + struct ipahal_fltrt_alloc_imgs_params *params) +{ + struct ipahal_fltrt_obj *obj; + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + /* The HAL allocates larger sizes than the given effective ones + * for alignments and border indications + */ + IPAHAL_DBG_LOW("lcl tbl bdy total effective sizes: hash=%u nhash=%u\n", + params->total_sz_lcl_hash_tbls, + params->total_sz_lcl_nhash_tbls); + + IPAHAL_DBG_LOW("lcl tbl bdy count: hash=%u nhash=%u\n", + params->num_lcl_hash_tbls, + params->num_lcl_nhash_tbls); + + /* Align the sizes to coop with termination word + * and H/W local table start offset alignment + */ + if (params->nhash_bdy.size) { + params->nhash_bdy.size = params->total_sz_lcl_nhash_tbls; + /* for table terminator */ + params->nhash_bdy.size += obj->tbl_width * + params->num_lcl_nhash_tbls; + /* align the start of local rule-set */ + params->nhash_bdy.size += obj->lcladdr_alignment * + params->num_lcl_nhash_tbls; + /* SRAM block size alignment */ + params->nhash_bdy.size += obj->blk_sz_alignment; + params->nhash_bdy.size &= ~(obj->blk_sz_alignment); + + IPAHAL_DBG_LOW("nhash lcl tbl bdy total h/w size = %u\n", + params->nhash_bdy.size); + + params->nhash_bdy.base = dma_zalloc_coherent( + ipahal_ctx->ipa_pdev, params->nhash_bdy.size, + ¶ms->nhash_bdy.phys_base, GFP_KERNEL); + if (!params->nhash_bdy.base) { + IPAHAL_ERR("fail to alloc DMA buff of size %d\n", + params->nhash_bdy.size); + return -ENOMEM; + } + } + + if (!obj->support_hash && params->hash_bdy.size) { + IPAHAL_ERR("No HAL Hash tbls support - Will be ignored\n"); + WARN_ON(1); + } + + if (obj->support_hash && params->hash_bdy.size) { + params->hash_bdy.size = params->total_sz_lcl_hash_tbls; + /* for table terminator */ + params->hash_bdy.size += obj->tbl_width * + params->num_lcl_hash_tbls; + /* align the start of local rule-set */ + params->hash_bdy.size += obj->lcladdr_alignment * + params->num_lcl_hash_tbls; + /* SRAM block size alignment */ + params->hash_bdy.size += obj->blk_sz_alignment; + params->hash_bdy.size &= ~(obj->blk_sz_alignment); + + IPAHAL_DBG_LOW("hash lcl tbl bdy total h/w size = %u\n", + params->hash_bdy.size); + + params->hash_bdy.base = dma_zalloc_coherent( + ipahal_ctx->ipa_pdev, params->hash_bdy.size, + ¶ms->hash_bdy.phys_base, GFP_KERNEL); + if (!params->hash_bdy.base) { + IPAHAL_ERR("fail to alloc DMA buff of size %d\n", + params->hash_bdy.size); + goto hash_bdy_fail; + } + } + + return 0; + +hash_bdy_fail: + if (params->nhash_bdy.size) + ipahal_free_dma_mem(¶ms->nhash_bdy); + + return -ENOMEM; +} + +/* + * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures + * Used usually during commit. + * Allocates header structures and init them to point to empty DDR table + * Allocate body strucutres for local bodies tables + * @params: Parameters for IN and OUT regard the allocation. + */ +int ipahal_fltrt_allocate_hw_tbl_imgs( + struct ipahal_fltrt_alloc_imgs_params *params) +{ + IPAHAL_DBG_LOW("Entry\n"); + + /* Input validation */ + if (!params) { + IPAHAL_ERR_RL("Input err: no params\n"); + return -EINVAL; + } + if (params->ipt >= IPA_IP_MAX) { + IPAHAL_ERR_RL("Input err: Invalid ip type %d\n", params->ipt); + return -EINVAL; + } + + if (ipa_fltrt_alloc_init_tbl_hdr(params)) { + IPAHAL_ERR_RL("fail to alloc and init tbl hdr\n"); + return -ENOMEM; + } + + if (ipa_fltrt_alloc_lcl_bdy(params)) { + IPAHAL_ERR_RL("fail to alloc tbl bodies\n"); + goto bdy_alloc_fail; + } + + return 0; + +bdy_alloc_fail: + ipahal_free_dma_mem(¶ms->nhash_hdr); + if (params->hash_hdr.size) + ipahal_free_dma_mem(¶ms->hash_hdr); + return -ENOMEM; +} + +/* + * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl + * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the + * allocated memory. + * + * The size is adapted for needed alignments/borders. + */ +int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem) +{ + struct ipahal_fltrt_obj *obj; + + IPAHAL_DBG_LOW("Entry\n"); + + if (!tbl_mem) { + IPAHAL_ERR("Input err\n"); + return -EINVAL; + } + + if (!tbl_mem->size) { + IPAHAL_ERR("Input err: zero table size\n"); + return -EINVAL; + } + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + /* add word for rule-set terminator */ + tbl_mem->size += obj->tbl_width; + + tbl_mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, tbl_mem->size, + &tbl_mem->phys_base, GFP_KERNEL); + if (!tbl_mem->base) { + IPAHAL_ERR("fail to alloc DMA buf of size %d\n", + tbl_mem->size); + return -ENOMEM; + } + if (tbl_mem->phys_base & obj->sysaddr_alignment) { + IPAHAL_ERR("sys rt tbl address is not aligned\n"); + goto align_err; + } + + memset(tbl_mem->base, 0, tbl_mem->size); + + return 0; + +align_err: + ipahal_free_dma_mem(tbl_mem); + return -EPERM; +} + +/* + * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address + * Given table addr/offset, adapt it to IPA H/W format and write it + * to given header index. + * @addr: Address or offset to be used + * @hdr_base: base address of header structure to write the address + * @hdr_idx: index of the address in the header structure + * @is_sys: Is it system address or local offset + */ +int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx, + bool is_sys) +{ + struct ipahal_fltrt_obj *obj; + u64 hwaddr; + u8 *hdr; + + IPAHAL_DBG_LOW("Entry\n"); + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (!addr || !hdr_base) { + IPAHAL_ERR("Input err: addr=0x%llx hdr_base=%pK\n", + addr, hdr_base); + return -EINVAL; + } + + hdr = (u8 *)hdr_base; + hdr += hdr_idx * obj->tbl_hdr_width; + hwaddr = obj->create_tbl_addr(is_sys, addr); + obj->write_val_to_hdr(hwaddr, hdr); + + return 0; +} + +/* + * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's + * content (physical address or offset) and parse it. + * @hdr_base: base sram address of the header structure. + * @hdr_idx: index of the header entry line in the header structure. + * @addr: The parsed address - Out parameter + * @is_sys: Is this system or local address - Out parameter + */ +int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr, + bool *is_sys) +{ + struct ipahal_fltrt_obj *obj; + u64 hwaddr; + u8 *hdr; + + IPAHAL_DBG_LOW("Entry\n"); + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (!addr || !hdr_base || !is_sys) { + IPAHAL_ERR("Input err: addr=%pK hdr_base=%pK is_sys=%pK\n", + addr, hdr_base, is_sys); + return -EINVAL; + } + + hdr = (u8 *)hdr_base; + hdr += hdr_idx * obj->tbl_hdr_width; + hwaddr = *((u64 *)hdr); + obj->parse_tbl_addr(hwaddr, addr, is_sys); + return 0; +} + +/* + * ipahal_rt_generate_hw_rule() - generates the routing hardware rule + * @params: Params for the rule creation. + * @hw_len: Size of the H/W rule to be returned + * @buf: Buffer to build the rule in. If buf is NULL, then the rule will + * be built in internal temp buf. This is used e.g. to get the rule size + * only. + */ +int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params, + u32 *hw_len, u8 *buf) +{ + struct ipahal_fltrt_obj *obj; + u8 *tmp = NULL; + int rc; + + IPAHAL_DBG_LOW("Entry\n"); + + if (!params || !hw_len) { + IPAHAL_ERR("Input err: params=%pK hw_len=%pK\n", + params, hw_len); + return -EINVAL; + } + if (!params->rule) { + IPAHAL_ERR("Input err: invalid rule\n"); + return -EINVAL; + } + if (params->ipt >= IPA_IP_MAX) { + IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt); + return -EINVAL; + } + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (buf == NULL) { + tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + buf = tmp; + } else { + if ((long)buf & obj->rule_start_alignment) { + IPAHAL_ERR("buff is not rule start aligned\n"); + return -EPERM; + } + } + + rc = obj->rt_generate_hw_rule(params, hw_len, buf); + if (!tmp && !rc) { + /* write the rule-set terminator */ + memset(buf + *hw_len, 0, obj->tbl_width); + } + + kfree(tmp); + + return rc; +} + +/* + * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule. + * @params: Params for the rule creation. + * @hw_len: Size of the H/W rule to be returned + * @buf: Buffer to build the rule in. If buf is NULL, then the rule will + * be built in internal temp buf. This is used e.g. to get the rule size + * only. + */ +int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params, + u32 *hw_len, u8 *buf) +{ + struct ipahal_fltrt_obj *obj; + u8 *tmp = NULL; + int rc; + + IPAHAL_DBG_LOW("Entry\n"); + + if (!params || !hw_len) { + IPAHAL_ERR("Input err: params=%pK hw_len=%pK\n", + params, hw_len); + return -EINVAL; + } + if (!params->rule) { + IPAHAL_ERR("Input err: invalid rule\n"); + return -EINVAL; + } + if (params->ipt >= IPA_IP_MAX) { + IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt); + return -EINVAL; + } + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (buf == NULL) { + tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL); + if (!tmp) { + IPAHAL_ERR("failed to alloc %u bytes\n", + obj->rule_buf_size); + return -ENOMEM; + } + buf = tmp; + } else + if ((long)buf & obj->rule_start_alignment) { + IPAHAL_ERR("buff is not rule rule start aligned\n"); + return -EPERM; + } + + rc = obj->flt_generate_hw_rule(params, hw_len, buf); + if (!tmp && !rc) { + /* write the rule-set terminator */ + memset(buf + *hw_len, 0, obj->tbl_width); + } + + kfree(tmp); + + return rc; + +} + +/* + * ipahal_flt_generate_equation() - generate flt rule in equation form + * Will build equation form flt rule from given info. + * @ipt: IP family + * @attrib: Rule attribute to be generated + * @eq_atrb: Equation form generated rule + * Note: Usage example: Pass the generated form to other sub-systems + * for inter-subsystems rules exchange. + */ +int ipahal_flt_generate_equation(enum ipa_ip_type ipt, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb) +{ + IPAHAL_DBG_LOW("Entry\n"); + + if (ipt >= IPA_IP_MAX) { + IPAHAL_ERR_RL("Input err: Invalid ip type %d\n", ipt); + return -EINVAL; + } + + if (!attrib || !eq_atrb) { + IPAHAL_ERR_RL("Input err: attrib=%pK eq_atrb=%pK\n", + attrib, eq_atrb); + return -EINVAL; + } + + return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_generate_eq(ipt, + attrib, eq_atrb); + +} + +/* + * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule + * Given the rule address, read the rule info from H/W and parse it. + * @rule_addr: Rule address (virtual memory) + * @rule: Out parameter for parsed rule info + */ +int ipahal_rt_parse_hw_rule(u8 *rule_addr, + struct ipahal_rt_rule_entry *rule) +{ + IPAHAL_DBG_LOW("Entry\n"); + + if (!rule_addr || !rule) { + IPAHAL_ERR("Input err: rule_addr=%pK rule=%pK\n", + rule_addr, rule); + return -EINVAL; + } + + return ipahal_fltrt_objs[ipahal_ctx->hw_type].rt_parse_hw_rule( + rule_addr, rule); +} + +/* + * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule + * Given the rule address, read the rule info from H/W and parse it. + * @rule_addr: Rule address (virtual memory) + * @rule: Out parameter for parsed rule info + */ +int ipahal_flt_parse_hw_rule(u8 *rule_addr, + struct ipahal_flt_rule_entry *rule) +{ + IPAHAL_DBG_LOW("Entry\n"); + + if (!rule_addr || !rule) { + IPAHAL_ERR("Input err: rule_addr=%pK rule=%pK\n", + rule_addr, rule); + return -EINVAL; + } + + return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_parse_hw_rule( + rule_addr, rule); +} + diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h new file mode 100644 index 000000000000..b084b1eba334 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPAHAL_FLTRT_H_ +#define _IPAHAL_FLTRT_H_ + +/* + * struct ipahal_fltrt_alloc_imgs_params - Params for tbls imgs allocations + * The allocation logic will allocate DMA memory representing the header. + * If the bodies are local (SRAM) the allocation will allocate + * a DMA buffers that would contain the content of these local tables in raw + * @ipt: IP version type + * @tbls_num: Number of tables to represent by the header + * @num_lcl_hash_tbls: Number of local (sram) hashable tables + * @num_lcl_nhash_tbls: Number of local (sram) non-hashable tables + * @total_sz_lcl_hash_tbls: Total size of local hashable tables + * @total_sz_lcl_nhash_tbls: Total size of local non-hashable tables + * @hash_hdr/nhash_hdr: OUT params for the header structures + * @hash_bdy/nhash_bdy: OUT params for the local body structures + */ +struct ipahal_fltrt_alloc_imgs_params { + enum ipa_ip_type ipt; + u32 tbls_num; + u32 num_lcl_hash_tbls; + u32 num_lcl_nhash_tbls; + u32 total_sz_lcl_hash_tbls; + u32 total_sz_lcl_nhash_tbls; + + /* OUT PARAMS */ + struct ipa_mem_buffer hash_hdr; + struct ipa_mem_buffer nhash_hdr; + struct ipa_mem_buffer hash_bdy; + struct ipa_mem_buffer nhash_bdy; +}; + +/* + * enum ipahal_rt_rule_hdr_type - Header type used in rt rules + * @IPAHAL_RT_RULE_HDR_NONE: No header is used + * @IPAHAL_RT_RULE_HDR_RAW: Raw header is used + * @IPAHAL_RT_RULE_HDR_PROC_CTX: Header Processing context is used + */ +enum ipahal_rt_rule_hdr_type { + IPAHAL_RT_RULE_HDR_NONE, + IPAHAL_RT_RULE_HDR_RAW, + IPAHAL_RT_RULE_HDR_PROC_CTX, +}; + +/* + * struct ipahal_rt_rule_gen_params - Params for generating rt rule + * @ipt: IP family version + * @dst_pipe_idx: Destination pipe index + * @hdr_type: Header type to be used + * @hdr_lcl: Does header on local or system table? + * @hdr_ofst: Offset of the header in the header table + * @priority: Rule priority + * @id: Rule ID + * @rule: Rule info + */ +struct ipahal_rt_rule_gen_params { + enum ipa_ip_type ipt; + int dst_pipe_idx; + enum ipahal_rt_rule_hdr_type hdr_type; + bool hdr_lcl; + u32 hdr_ofst; + u32 priority; + u32 id; + const struct ipa_rt_rule *rule; +}; + +/* + * struct ipahal_rt_rule_entry - Rt rule info parsed from H/W + * @dst_pipe_idx: Destination pipe index + * @hdr_lcl: Does the references header located in sram or system mem? + * @hdr_ofst: Offset of the header in the header table + * @hdr_type: Header type to be used + * @priority: Rule priority + * @retain_hdr: to retain the removed header in header removal + * @id: Rule ID + * @eq_attrib: Equations and their params in the rule + * @rule_size: Rule size in memory + */ +struct ipahal_rt_rule_entry { + int dst_pipe_idx; + bool hdr_lcl; + u32 hdr_ofst; + enum ipahal_rt_rule_hdr_type hdr_type; + u32 priority; + bool retain_hdr; + u32 id; + struct ipa_ipfltri_rule_eq eq_attrib; + u32 rule_size; +}; + +/* + * struct ipahal_flt_rule_gen_params - Params for generating flt rule + * @ipt: IP family version + * @rt_tbl_idx: Routing table the rule pointing to + * @priority: Rule priority + * @id: Rule ID + * @rule: Rule info + */ +struct ipahal_flt_rule_gen_params { + enum ipa_ip_type ipt; + u32 rt_tbl_idx; + u32 priority; + u32 id; + const struct ipa_flt_rule *rule; +}; + +/* + * struct ipahal_flt_rule_entry - Flt rule info parsed from H/W + * @rule: Rule info + * @priority: Rule priority + * @id: Rule ID + * @rule_size: Rule size in memory + */ +struct ipahal_flt_rule_entry { + struct ipa_flt_rule rule; + u32 priority; + u32 id; + u32 rule_size; +}; + +/* Get the H/W table (flt/rt) header width */ +u32 ipahal_get_hw_tbl_hdr_width(void); + +/* Get the H/W local table (SRAM) address alignment + * Tables headers references to local tables via offsets in SRAM + * This function return the alignment of the offset that IPA expects + */ +u32 ipahal_get_lcl_tbl_addr_alignment(void); + +/* + * Rule priority is used to distinguish rules order + * at the integrated table consisting from hashable and + * non-hashable tables. Max priority are rules that once are + * scanned by IPA, IPA will not look for further rules and use it. + */ +int ipahal_get_rule_max_priority(void); + +/* Given a priority, calc and return the next lower one if it is in + * legal range. + */ +int ipahal_rule_decrease_priority(int *prio); + +/* Does the given ID represents rule miss? */ +bool ipahal_is_rule_miss_id(u32 id); + +/* Get rule ID with high bit only asserted + * Used e.g. to create groups of IDs according to this bit + */ +u32 ipahal_get_rule_id_hi_bit(void); + +/* Get the low value possible to be used for rule-id */ +u32 ipahal_get_low_rule_id(void); + +/* + * ipahal_rt_generate_empty_img() - Generate empty route image + * Creates routing header buffer for the given tables number. + * For each table, make it point to the empty table on DDR. + * @tbls_num: Number of tables. For each will have an entry in the header + * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check + * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check + * @mem: mem object that points to DMA mem representing the hdr structure + * @atomic: should DMA allocation be executed with atomic flag + */ +int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size, + u32 nhash_hdr_size, struct ipa_mem_buffer *mem, bool atomic); + +/* + * ipahal_flt_generate_empty_img() - Generate empty filter image + * Creates filter header buffer for the given tables number. + * For each table, make it point to the empty table on DDR. + * @tbls_num: Number of tables. For each will have an entry in the header + * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check + * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check + * @ep_bitmap: Bitmap representing the EP that has flt tables. The format + * should be: bit0->EP0, bit1->EP1 + * @mem: mem object that points to DMA mem representing the hdr structure + * @atomic: should DMA allocation be executed with atomic flag + */ +int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size, + u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem, + bool atomic); + +/* + * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures + * Used usually during commit. + * Allocates header structures and init them to point to empty DDR table + * Allocate body strucutres for local bodies tables + * @params: Parameters for IN and OUT regard the allocation. + */ +int ipahal_fltrt_allocate_hw_tbl_imgs( + struct ipahal_fltrt_alloc_imgs_params *params); + +/* + * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl + * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the + * allocated memory. + * + * The size is adapted for needed alignments/borders. + */ +int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem); + +/* + * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address + * Given table addr/offset, adapt it to IPA H/W format and write it + * to given header index. + * @addr: Address or offset to be used + * @hdr_base: base address of header structure to write the address + * @hdr_idx: index of the address in the header structure + * @is_sys: Is it system address or local offset + */ +int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx, + bool is_sys); + +/* + * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's + * content (physical address or offset) and parse it. + * @hdr_base: base sram address of the header structure. + * @hdr_idx: index of the header entry line in the header structure. + * @addr: The parsed address - Out parameter + * @is_sys: Is this system or local address - Out parameter + */ +int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr, + bool *is_sys); + +/* + * ipahal_rt_generate_hw_rule() - generates the routing hardware rule. + * @params: Params for the rule creation. + * @hw_len: Size of the H/W rule to be returned + * @buf: Buffer to build the rule in. If buf is NULL, then the rule will + * be built in internal temp buf. This is used e.g. to get the rule size + * only. + */ +int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params, + u32 *hw_len, u8 *buf); + +/* + * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule. + * @params: Params for the rule creation. + * @hw_len: Size of the H/W rule to be returned + * @buf: Buffer to build the rule in. If buf is NULL, then the rule will + * be built in internal temp buf. This is used e.g. to get the rule size + * only. + */ +int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params, + u32 *hw_len, u8 *buf); + +/* + * ipahal_flt_generate_equation() - generate flt rule in equation form + * Will build equation form flt rule from given info. + * @ipt: IP family + * @attrib: Rule attribute to be generated + * @eq_atrb: Equation form generated rule + * Note: Usage example: Pass the generated form to other sub-systems + * for inter-subsystems rules exchange. + */ +int ipahal_flt_generate_equation(enum ipa_ip_type ipt, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb); + +/* + * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule + * Given the rule address, read the rule info from H/W and parse it. + * @rule_addr: Rule address (virtual memory) + * @rule: Out parameter for parsed rule info + */ +int ipahal_rt_parse_hw_rule(u8 *rule_addr, + struct ipahal_rt_rule_entry *rule); + +/* + * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule + * Given the rule address, read the rule info from H/W and parse it. + * @rule_addr: Rule address (virtual memory) + * @rule: Out parameter for parsed rule info + */ +int ipahal_flt_parse_hw_rule(u8 *rule_addr, + struct ipahal_flt_rule_entry *rule); + + +#endif /* _IPAHAL_FLTRT_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h new file mode 100644 index 000000000000..0062ff191bf7 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPAHAL_FLTRT_I_H_ +#define _IPAHAL_FLTRT_I_H_ + +/* + * enum ipa_fltrt_equations - RULE equations + * These are names values to the equations that can be used + * The HAL layer holds mapping between these names and H/W + * presentation. + */ +enum ipa_fltrt_equations { + IPA_TOS_EQ, + IPA_PROTOCOL_EQ, + IPA_TC_EQ, + IPA_OFFSET_MEQ128_0, + IPA_OFFSET_MEQ128_1, + IPA_OFFSET_MEQ32_0, + IPA_OFFSET_MEQ32_1, + IPA_IHL_OFFSET_MEQ32_0, + IPA_IHL_OFFSET_MEQ32_1, + IPA_METADATA_COMPARE, + IPA_IHL_OFFSET_RANGE16_0, + IPA_IHL_OFFSET_RANGE16_1, + IPA_IHL_OFFSET_EQ_32, + IPA_IHL_OFFSET_EQ_16, + IPA_FL_EQ, + IPA_IS_FRAG, + IPA_IS_PURE_ACK, + IPA_EQ_MAX, +}; + +/* Width and Alignment values for H/W structures. + * Specific for IPA version. + */ +#define IPA3_0_HW_TBL_SYSADDR_ALIGNMENT (127) +#define IPA3_0_HW_TBL_LCLADDR_ALIGNMENT (7) +#define IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT (127) +#define IPA3_0_HW_TBL_WIDTH (8) +#define IPA3_0_HW_TBL_HDR_WIDTH (8) +#define IPA3_0_HW_TBL_ADDR_MASK (127) +#define IPA3_0_HW_RULE_BUF_SIZE (256) +#define IPA3_0_HW_RULE_START_ALIGNMENT (7) + + +/* + * Rules Priority. + * Needed due to rules classification to hashable and non-hashable. + * Higher priority is lower in number. i.e. 0 is highest priority + */ +#define IPA3_0_RULE_MAX_PRIORITY (0) +#define IPA3_0_RULE_MIN_PRIORITY (1023) + +/* + * RULE ID, bit length (e.g. 10 bits). + */ +#define IPA3_0_RULE_ID_BIT_LEN (10) +#define IPA3_0_LOW_RULE_ID (1) + +/** + * struct ipa3_0_rt_rule_hw_hdr - HW header of IPA routing rule + * @word: routing rule header properties + * @en_rule: enable rule - Equation bit fields + * @pipe_dest_idx: destination pipe index + * @system: Is referenced header is lcl or sys memory + * @hdr_offset: header offset + * @proc_ctx: whether hdr_offset points to header table or to + * header processing context table + * @priority: Rule priority. Added to distinguish rules order + * at the integrated table consisting from hashable and + * non-hashable parts + * @rsvd1: reserved bits + * @retain_hdr: added to add back to the packet the header removed + * as part of header removal. This will be done as part of + * header insertion block. + * @rule_id: rule ID that will be returned in the packet status + * @rsvd2: reserved bits + */ +struct ipa3_0_rt_rule_hw_hdr { + union { + u64 word; + struct { + u64 en_rule:16; + u64 pipe_dest_idx:5; + u64 system:1; + u64 hdr_offset:9; + u64 proc_ctx:1; + u64 priority:10; + u64 rsvd1:5; + u64 retain_hdr:1; + u64 rule_id:10; + u64 rsvd2:6; + } hdr; + } u; +}; + +/** + * struct ipa3_0_flt_rule_hw_hdr - HW header of IPA filter rule + * @word: filtering rule properties + * @en_rule: enable rule + * @action: post filtering action + * @rt_tbl_idx: index in routing table + * @retain_hdr: added to add back to the packet the header removed + * as part of header removal. This will be done as part of + * header insertion block. + * @rsvd1: reserved bits + * @priority: Rule priority. Added to distinguish rules order + * at the integrated table consisting from hashable and + * non-hashable parts + * @rsvd2: reserved bits + * @rule_id: rule ID that will be returned in the packet status + * @rsvd3: reserved bits + */ +struct ipa3_0_flt_rule_hw_hdr { + union { + u64 word; + struct { + u64 en_rule:16; + u64 action:5; + u64 rt_tbl_idx:5; + u64 retain_hdr:1; + u64 rsvd1:5; + u64 priority:10; + u64 rsvd2:6; + u64 rule_id:10; + u64 rsvd3:6; + } hdr; + } u; +}; + +/** + * struct ipa4_0_flt_rule_hw_hdr - HW header of IPA filter rule + * @word: filtering rule properties + * @en_rule: enable rule + * @action: post filtering action + * @rt_tbl_idx: index in routing table + * @retain_hdr: added to add back to the packet the header removed + * as part of header removal. This will be done as part of + * header insertion block. + * @pdn_idx: in case of go to src nat action possible to input the pdn index to + * the NAT block + * @set_metadata: enable metadata replacement in the NAT block + * @priority: Rule priority. Added to distinguish rules order + * at the integrated table consisting from hashable and + * non-hashable parts + * @rsvd2: reserved bits + * @rule_id: rule ID that will be returned in the packet status + * @rsvd3: reserved bits + */ +struct ipa4_0_flt_rule_hw_hdr { + union { + u64 word; + struct { + u64 en_rule : 16; + u64 action : 5; + u64 rt_tbl_idx : 5; + u64 retain_hdr : 1; + u64 pdn_idx : 4; + u64 set_metadata : 1; + u64 priority : 10; + u64 rsvd2 : 6; + u64 rule_id : 10; + u64 rsvd3 : 6; + } hdr; + } u; +}; + +int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type); +void ipahal_fltrt_destroy(void); + +#endif /* _IPAHAL_FLTRT_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c new file mode 100644 index 000000000000..50d8ddefd32c --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c @@ -0,0 +1,540 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#include "ipahal_hw_stats.h" +#include "ipahal_hw_stats_i.h" +#include "ipahal_i.h" + +struct ipahal_hw_stats_obj { + struct ipahal_stats_init_pyld *(*generate_init_pyld)(void *params, + bool is_atomic_ctx); + int (*get_offset)(void *params, struct ipahal_stats_offset *out); + int (*parse_stats)(void *init_params, void *raw_stats, + void *parsed_stats); +}; + +static int _count_ones(u32 number) +{ + int count = 0; + + while (number) { + count++; + number = number & (number - 1); + } + + return count; +} + +static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_quota( + void *params, bool is_atomic_ctx) +{ + struct ipahal_stats_init_pyld *pyld; + struct ipahal_stats_init_quota *in = + (struct ipahal_stats_init_quota *)params; + int entries = _count_ones(in->enabled_bitmask); + + IPAHAL_DBG_LOW("entries = %d\n", entries); + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + + entries * sizeof(struct ipahal_stats_quota_hw), is_atomic_ctx); + if (!pyld) { + IPAHAL_ERR("no mem\n"); + return NULL; + } + + pyld->len = entries * sizeof(struct ipahal_stats_quota_hw); + return pyld; +} + +static int ipahal_get_offset_quota(void *params, + struct ipahal_stats_offset *out) +{ + struct ipahal_stats_get_offset_quota *in = + (struct ipahal_stats_get_offset_quota *)params; + int entries = _count_ones(in->init.enabled_bitmask); + + IPAHAL_DBG_LOW("\n"); + out->offset = 0; + out->size = entries * sizeof(struct ipahal_stats_quota_hw); + + return 0; +} + +static int ipahal_parse_stats_quota(void *init_params, void *raw_stats, + void *parsed_stats) +{ + struct ipahal_stats_init_quota *init = + (struct ipahal_stats_init_quota *)init_params; + struct ipahal_stats_quota_hw *raw_hw = + (struct ipahal_stats_quota_hw *)raw_stats; + struct ipahal_stats_quota_all *out = + (struct ipahal_stats_quota_all *)parsed_stats; + int stat_idx = 0; + int i; + + memset(out, 0, sizeof(*out)); + IPAHAL_DBG_LOW("\n"); + for (i = 0; i < IPAHAL_MAX_PIPES; i++) { + if (init->enabled_bitmask & (1 << i)) { + IPAHAL_DBG_LOW("pipe %d stat_idx %d\n", i, stat_idx); + out->stats[i].num_ipv4_bytes = + raw_hw[stat_idx].num_ipv4_bytes; + out->stats[i].num_ipv4_pkts = + raw_hw[stat_idx].num_ipv4_pkts; + out->stats[i].num_ipv6_pkts = + raw_hw[stat_idx].num_ipv6_pkts; + out->stats[i].num_ipv6_bytes = + raw_hw[stat_idx].num_ipv6_bytes; + stat_idx++; + } + } + + return 0; +} + +static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_tethering( + void *params, bool is_atomic_ctx) +{ + struct ipahal_stats_init_pyld *pyld; + struct ipahal_stats_init_tethering *in = + (struct ipahal_stats_init_tethering *)params; + int hdr_entries = _count_ones(in->prod_bitmask); + int entries = 0; + int i; + void *pyld_ptr; + u32 incremental_offset; + + IPAHAL_DBG_LOW("prod entries = %d\n", hdr_entries); + for (i = 0; i < sizeof(in->prod_bitmask) * 8; i++) { + if (in->prod_bitmask & (1 << i)) { + if (in->cons_bitmask[i] == 0) { + IPAHAL_ERR("no cons bitmask for prod %d\n", i); + return NULL; + } + entries += _count_ones(in->cons_bitmask[i]); + } + } + IPAHAL_DBG_LOW("sum all entries = %d\n", entries); + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + + hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw) + + entries * sizeof(struct ipahal_stats_tethering_hw), + is_atomic_ctx); + if (!pyld) + return NULL; + + pyld->len = hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw) + + entries * sizeof(struct ipahal_stats_tethering_hw); + + pyld_ptr = pyld->data; + incremental_offset = + (hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw)) + / 8; + for (i = 0; i < sizeof(in->prod_bitmask) * 8; i++) { + if (in->prod_bitmask & (1 << i)) { + struct ipahal_stats_tethering_hdr_hw *hdr = pyld_ptr; + + hdr->dst_mask = in->cons_bitmask[i]; + hdr->offset = incremental_offset; + IPAHAL_DBG_LOW("hdr->dst_mask=0x%x\n", hdr->dst_mask); + IPAHAL_DBG_LOW("hdr->offset=0x%x\n", hdr->offset); + /* add the stats entry */ + incremental_offset += _count_ones(in->cons_bitmask[i]) * + sizeof(struct ipahal_stats_tethering_hw) / 8; + pyld_ptr += sizeof(*hdr); + } + } + + return pyld; +} + +static int ipahal_get_offset_tethering(void *params, + struct ipahal_stats_offset *out) +{ + struct ipahal_stats_get_offset_tethering *in = + (struct ipahal_stats_get_offset_tethering *)params; + int entries = 0; + int i; + + for (i = 0; i < sizeof(in->init.prod_bitmask) * 8; i++) { + if (in->init.prod_bitmask & (1 << i)) { + if (in->init.cons_bitmask[i] == 0) { + IPAHAL_ERR("no cons bitmask for prod %d\n", i); + return -EPERM; + } + entries += _count_ones(in->init.cons_bitmask[i]); + } + } + IPAHAL_DBG_LOW("sum all entries = %d\n", entries); + + /* skip the header */ + out->offset = _count_ones(in->init.prod_bitmask) * + sizeof(struct ipahal_stats_tethering_hdr_hw); + out->size = entries * sizeof(struct ipahal_stats_tethering_hw); + + return 0; +} + +static int ipahal_parse_stats_tethering(void *init_params, void *raw_stats, + void *parsed_stats) +{ + struct ipahal_stats_init_tethering *init = + (struct ipahal_stats_init_tethering *)init_params; + struct ipahal_stats_tethering_hw *raw_hw = + (struct ipahal_stats_tethering_hw *)raw_stats; + struct ipahal_stats_tethering_all *out = + (struct ipahal_stats_tethering_all *)parsed_stats; + int i, j; + int stat_idx = 0; + + memset(out, 0, sizeof(*out)); + IPAHAL_DBG_LOW("\n"); + for (i = 0; i < IPAHAL_MAX_PIPES; i++) { + for (j = 0; j < IPAHAL_MAX_PIPES; j++) { + if ((init->prod_bitmask & (1 << i)) && + init->cons_bitmask[i] & (1 << j)) { + IPAHAL_DBG_LOW("prod %d cons %d\n", i, j); + IPAHAL_DBG_LOW("stat_idx %d\n", stat_idx); + out->stats[i][j].num_ipv4_bytes = + raw_hw[stat_idx].num_ipv4_bytes; + IPAHAL_DBG_LOW("num_ipv4_bytes %lld\n", + out->stats[i][j].num_ipv4_bytes); + out->stats[i][j].num_ipv4_pkts = + raw_hw[stat_idx].num_ipv4_pkts; + IPAHAL_DBG_LOW("num_ipv4_pkts %lld\n", + out->stats[i][j].num_ipv4_pkts); + out->stats[i][j].num_ipv6_pkts = + raw_hw[stat_idx].num_ipv6_pkts; + IPAHAL_DBG_LOW("num_ipv6_pkts %lld\n", + out->stats[i][j].num_ipv6_pkts); + out->stats[i][j].num_ipv6_bytes = + raw_hw[stat_idx].num_ipv6_bytes; + IPAHAL_DBG_LOW("num_ipv6_bytes %lld\n", + out->stats[i][j].num_ipv6_bytes); + stat_idx++; + } + } + } + + return 0; +} + +static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_flt_rt( + void *params, bool is_atomic_ctx) +{ + struct ipahal_stats_init_pyld *pyld; + struct ipahal_stats_init_flt_rt *in = + (struct ipahal_stats_init_flt_rt *)params; + int hdr_entries; + int num_rules = 0; + int i, start_entry; + void *pyld_ptr; + u32 incremental_offset; + + for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) + num_rules += _count_ones(in->rule_id_bitmask[i]); + + if (num_rules == 0) { + IPAHAL_ERR("no rule ids provided\n"); + return NULL; + } + IPAHAL_DBG_LOW("num_rules = %d\n", num_rules); + + hdr_entries = IPAHAL_MAX_RULE_ID_32; + for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) { + if (in->rule_id_bitmask[i] != 0) + break; + hdr_entries--; + } + start_entry = i; + + for (i = IPAHAL_MAX_RULE_ID_32 - 1; i >= start_entry; i--) { + if (in->rule_id_bitmask[i] != 0) + break; + hdr_entries--; + } + IPAHAL_DBG_LOW("hdr_entries = %d\n", hdr_entries); + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + + hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw) + + num_rules * sizeof(struct ipahal_stats_flt_rt_hw), + is_atomic_ctx); + if (!pyld) { + IPAHAL_ERR("no mem\n"); + return NULL; + } + + pyld->len = hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw) + + num_rules * sizeof(struct ipahal_stats_flt_rt_hw); + + pyld_ptr = pyld->data; + incremental_offset = + (hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw)) + / 8; + for (i = start_entry; i < hdr_entries; i++) { + struct ipahal_stats_flt_rt_hdr_hw *hdr = pyld_ptr; + + hdr->en_mask = in->rule_id_bitmask[i]; + hdr->cnt_offset = incremental_offset; + /* add the stats entry */ + incremental_offset += _count_ones(in->rule_id_bitmask[i]) * + sizeof(struct ipahal_stats_flt_rt_hw) / 8; + pyld_ptr += sizeof(*hdr); + } + + return pyld; +} + +static int ipahal_get_offset_flt_rt(void *params, + struct ipahal_stats_offset *out) +{ + struct ipahal_stats_get_offset_flt_rt *in = + (struct ipahal_stats_get_offset_flt_rt *)params; + int i; + int hdr_entries; + int skip_rules = 0; + int start_entry; + int rule_bit = in->rule_id % 32; + int rule_idx = in->rule_id / 32; + + if (rule_idx >= IPAHAL_MAX_RULE_ID_32) { + IPAHAL_ERR("invalid rule_id %d\n", in->rule_id); + return -EPERM; + } + + hdr_entries = IPAHAL_MAX_RULE_ID_32; + for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) { + if (in->init.rule_id_bitmask[i] != 0) + break; + hdr_entries--; + } + + if (hdr_entries == 0) { + IPAHAL_ERR("no rule ids provided\n"); + return -EPERM; + } + start_entry = i; + + for (i = IPAHAL_MAX_RULE_ID_32 - 1; i >= 0; i--) { + if (in->init.rule_id_bitmask[i] != 0) + break; + hdr_entries--; + } + IPAHAL_DBG_LOW("hdr_entries = %d\n", hdr_entries); + + /* skip the header */ + out->offset = hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw); + + /* skip the previous rules */ + for (i = start_entry; i < rule_idx; i++) + skip_rules += _count_ones(in->init.rule_id_bitmask[i]); + + for (i = 0; i < rule_bit; i++) + if (in->init.rule_id_bitmask[rule_idx] & (1 << i)) + skip_rules++; + + out->offset += skip_rules * sizeof(struct ipahal_stats_flt_rt_hw); + out->size = sizeof(struct ipahal_stats_flt_rt_hw); + + return 0; +} + +static int ipahal_parse_stats_flt_rt(void *init_params, void *raw_stats, + void *parsed_stats) +{ + struct ipahal_stats_flt_rt_hw *raw_hw = + (struct ipahal_stats_flt_rt_hw *)raw_stats; + struct ipahal_stats_flt_rt *out = + (struct ipahal_stats_flt_rt *)parsed_stats; + + memset(out, 0, sizeof(*out)); + IPAHAL_DBG_LOW("\n"); + out->num_packets = raw_hw->num_packets; + out->num_packets_hash = raw_hw->num_packets_hash; + + return 0; +} + +static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_drop( + void *params, bool is_atomic_ctx) +{ + struct ipahal_stats_init_pyld *pyld; + struct ipahal_stats_init_drop *in = + (struct ipahal_stats_init_drop *)params; + int entries = _count_ones(in->enabled_bitmask); + + IPAHAL_DBG_LOW("entries = %d\n", entries); + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + + entries * sizeof(struct ipahal_stats_drop_hw), is_atomic_ctx); + if (!pyld) + return NULL; + + pyld->len = entries * sizeof(struct ipahal_stats_drop_hw); + + return pyld; +} + +static int ipahal_get_offset_drop(void *params, + struct ipahal_stats_offset *out) +{ + struct ipahal_stats_get_offset_drop *in = + (struct ipahal_stats_get_offset_drop *)params; + int entries = _count_ones(in->init.enabled_bitmask); + + IPAHAL_DBG_LOW("\n"); + out->offset = 0; + out->size = entries * sizeof(struct ipahal_stats_drop_hw); + + return 0; +} + +static int ipahal_parse_stats_drop(void *init_params, void *raw_stats, + void *parsed_stats) +{ + struct ipahal_stats_init_drop *init = + (struct ipahal_stats_init_drop *)init_params; + struct ipahal_stats_drop_hw *raw_hw = + (struct ipahal_stats_drop_hw *)raw_stats; + struct ipahal_stats_drop_all *out = + (struct ipahal_stats_drop_all *)parsed_stats; + int stat_idx = 0; + int i; + + memset(out, 0, sizeof(*out)); + IPAHAL_DBG_LOW("\n"); + for (i = 0; i < IPAHAL_MAX_PIPES; i++) { + if (init->enabled_bitmask & (1 << i)) { + out->stats[i].drop_byte_cnt = + raw_hw[stat_idx].drop_byte_cnt; + out->stats[i].drop_packet_cnt = + raw_hw[stat_idx].drop_packet_cnt; + stat_idx++; + } + } + + return 0; +} + +static struct ipahal_hw_stats_obj + ipahal_hw_stats_objs[IPA_HW_MAX][IPAHAL_HW_STATS_MAX] = { + /* IPAv4 */ + [IPA_HW_v4_0][IPAHAL_HW_STATS_QUOTA] = { + ipahal_generate_init_pyld_quota, + ipahal_get_offset_quota, + ipahal_parse_stats_quota + }, + [IPA_HW_v4_0][IPAHAL_HW_STATS_TETHERING] = { + ipahal_generate_init_pyld_tethering, + ipahal_get_offset_tethering, + ipahal_parse_stats_tethering + }, + [IPA_HW_v4_0][IPAHAL_HW_STATS_FNR] = { + ipahal_generate_init_pyld_flt_rt, + ipahal_get_offset_flt_rt, + ipahal_parse_stats_flt_rt + }, + [IPA_HW_v4_0][IPAHAL_HW_STATS_DROP] = { + ipahal_generate_init_pyld_drop, + ipahal_get_offset_drop, + ipahal_parse_stats_drop + }, +}; + +int ipahal_hw_stats_init(enum ipa_hw_type ipa_hw_type) +{ + int i; + int j; + struct ipahal_hw_stats_obj zero_obj; + struct ipahal_hw_stats_obj *hw_stat_ptr; + + IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type); + + if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) { + IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type); + return -EINVAL; + } + + memset(&zero_obj, 0, sizeof(zero_obj)); + for (i = IPA_HW_v4_0 ; i < ipa_hw_type ; i++) { + for (j = 0; j < IPAHAL_HW_STATS_MAX; j++) { + if (!memcmp(&ipahal_hw_stats_objs[i + 1][j], &zero_obj, + sizeof(struct ipahal_hw_stats_obj))) { + memcpy(&ipahal_hw_stats_objs[i + 1][j], + &ipahal_hw_stats_objs[i][j], + sizeof(struct ipahal_hw_stats_obj)); + } else { + /* + * explicitly overridden stat. + * Check validity + */ + hw_stat_ptr = &ipahal_hw_stats_objs[i + 1][j]; + if (!hw_stat_ptr->get_offset) { + IPAHAL_ERR( + "stat=%d get_offset null ver=%d\n", + j, i+1); + WARN_ON(1); + } + if (!hw_stat_ptr->parse_stats) { + IPAHAL_ERR( + "stat=%d parse_stats null ver=%d\n", + j, i + 1); + WARN_ON(1); + } + } + } + } + + return 0; +} + +int ipahal_stats_get_offset(enum ipahal_hw_stats_type type, void *params, + struct ipahal_stats_offset *out) +{ + if (type < 0 || type >= IPAHAL_HW_STATS_MAX) { + IPAHAL_ERR("Invalid type stat=%d\n", type); + WARN_ON(1); + return -EFAULT; + } + + if (!params || !out) { + IPAHAL_ERR("Null arg\n"); + WARN_ON(1); + return -EFAULT; + } + + return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].get_offset( + params, out); +} + +struct ipahal_stats_init_pyld *ipahal_stats_generate_init_pyld( + enum ipahal_hw_stats_type type, void *params, bool is_atomic_ctx) +{ + struct ipahal_hw_stats_obj *hw_obj_ptr; + + if (type < 0 || type >= IPAHAL_HW_STATS_MAX) { + IPAHAL_ERR("Invalid type stat=%d\n", type); + WARN_ON(1); + return NULL; + } + + if (WARN(!params, "Null arg\n")) + return NULL; + + hw_obj_ptr = &ipahal_hw_stats_objs[ipahal_ctx->hw_type][type]; + return hw_obj_ptr->generate_init_pyld(params, is_atomic_ctx); +} + +int ipahal_parse_stats(enum ipahal_hw_stats_type type, void *init_params, + void *raw_stats, void *parsed_stats) +{ + if (WARN((type < 0 || type >= IPAHAL_HW_STATS_MAX), + "Invalid type stat = %d\n", type)) + return -EFAULT; + + if (WARN((!raw_stats || !parsed_stats), "Null arg\n")) + return -EFAULT; + + return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].parse_stats( + init_params, raw_stats, parsed_stats); +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h new file mode 100644 index 000000000000..094f21bf936f --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h @@ -0,0 +1,241 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPAHAL_HW_STATS_H_ +#define _IPAHAL_HW_STATS_H_ + +#include + +#define IPAHAL_MAX_PIPES 32 +#define IPAHAL_MAX_RULE_ID_32 (1024 / 32) /* 10 bits of rule id */ + +enum ipahal_hw_stats_type { + IPAHAL_HW_STATS_QUOTA, + IPAHAL_HW_STATS_TETHERING, + IPAHAL_HW_STATS_FNR, + IPAHAL_HW_STATS_DROP, + IPAHAL_HW_STATS_MAX +}; + +/* + * struct ipahal_stats_init_pyld - Statistics initialization payload + * @len: length of payload + * @data: actual payload data + */ +struct ipahal_stats_init_pyld { + u16 len; + u16 reserved; + u8 data[0]; +}; + +/* + * struct ipahal_stats_offset - Statistics offset parameters + * @offset: offset of the statistic from beginning of stats table + * @size: size of the statistics + */ +struct ipahal_stats_offset { + u32 offset; + u16 size; +}; + +/* + * struct ipahal_stats_init_quota - Initializations parameters for quota + * @enabled_bitmask: bit mask of pipes to be monitored + */ +struct ipahal_stats_init_quota { + u32 enabled_bitmask; +}; + +/* + * struct ipahal_stats_get_offset_quota - Get offset parameters for quota + * @init: initialization parameters used in initialization of stats + */ +struct ipahal_stats_get_offset_quota { + struct ipahal_stats_init_quota init; +}; + +/* + * struct ipahal_stats_quota - Quota statistics + * @num_ipv4_bytes: IPv4 bytes + * @num_ipv6_bytes: IPv6 bytes + * @num_ipv4_pkts: IPv4 packets + * @num_ipv6_pkts: IPv6 packets + */ +struct ipahal_stats_quota { + u64 num_ipv4_bytes; + u64 num_ipv6_bytes; + u64 num_ipv4_pkts; + u64 num_ipv6_pkts; +}; + +/* + * struct ipahal_stats_quota_all - Quota statistics for all pipes + * @stats: array of statistics per pipe + */ +struct ipahal_stats_quota_all { + struct ipahal_stats_quota stats[IPAHAL_MAX_PIPES]; +}; + +/* + * struct ipahal_stats_init_tethering - Initializations parameters for tethering + * @prod_bitmask: bit mask of producer pipes to be monitored + * @cons_bitmask: bit mask of consumer pipes to be monitored per producer + */ +struct ipahal_stats_init_tethering { + u32 prod_bitmask; + u32 cons_bitmask[IPAHAL_MAX_PIPES]; +}; + +/* + * struct ipahal_stats_get_offset_tethering - Get offset parameters for + * tethering + * @init: initialization parameters used in initialization of stats + */ +struct ipahal_stats_get_offset_tethering { + struct ipahal_stats_init_tethering init; +}; + +/* + * struct ipahal_stats_tethering - Tethering statistics + * @num_ipv4_bytes: IPv4 bytes + * @num_ipv6_bytes: IPv6 bytes + * @num_ipv4_pkts: IPv4 packets + * @num_ipv6_pkts: IPv6 packets + */ +struct ipahal_stats_tethering { + u64 num_ipv4_bytes; + u64 num_ipv6_bytes; + u64 num_ipv4_pkts; + u64 num_ipv6_pkts; +}; + +/* + * struct ipahal_stats_tethering_all - Tethering statistics for all pipes + * @stats: matrix of statistics per pair of pipes + */ +struct ipahal_stats_tethering_all { + struct ipahal_stats_tethering + stats[IPAHAL_MAX_PIPES][IPAHAL_MAX_PIPES]; +}; + +/* + * struct ipahal_stats_init_flt_rt - Initializations parameters for flt_rt + * @rule_id_bitmask: array describes which rule ids to monitor. + * rule_id bit is determined by: + * index to the array => rule_id / 32 + * bit to enable => rule_id % 32 + */ +struct ipahal_stats_init_flt_rt { + u32 rule_id_bitmask[IPAHAL_MAX_RULE_ID_32]; +}; + +/* + * struct ipahal_stats_get_offset_flt_rt - Get offset parameters for flt_rt + * @init: initialization parameters used in initialization of stats + * @rule_id: rule_id to get the offset for + */ +struct ipahal_stats_get_offset_flt_rt { + struct ipahal_stats_init_flt_rt init; + u32 rule_id; +}; + +/* + * struct ipahal_stats_flt_rt - flt_rt statistics + * @num_packets: Total number of packets hit this rule + * @num_packets_hash: Total number of packets hit this rule in hash table + */ +struct ipahal_stats_flt_rt { + u32 num_packets; + u32 num_packets_hash; +}; + +/* + * struct ipahal_stats_init_drop - Initializations parameters for Drop + * @enabled_bitmask: bit mask of pipes to be monitored + */ +struct ipahal_stats_init_drop { + u32 enabled_bitmask; +}; + +/* + * struct ipahal_stats_get_offset_drop - Get offset parameters for Drop + * @init: initialization parameters used in initialization of stats + */ +struct ipahal_stats_get_offset_drop { + struct ipahal_stats_init_drop init; +}; + +/* + * struct ipahal_stats_drop - Packet Drop statistics + * @drop_packet_cnt: number of packets dropped + * @drop_byte_cnt: number of bytes dropped + */ +struct ipahal_stats_drop { + u32 drop_packet_cnt; + u32 drop_byte_cnt; +}; + +/* + * struct ipahal_stats_drop_all - Drop statistics for all pipes + * @stats: array of statistics per pipes + */ +struct ipahal_stats_drop_all { + struct ipahal_stats_drop stats[IPAHAL_MAX_PIPES]; +}; + +/* + * ipahal_stats_generate_init_pyld - Generate the init payload for stats + * @type: type of stats + * @params: init_pyld parameters based of stats type + * @is_atomic_ctx: is calling context atomic ? + * + * This function will generate the initialization payload for a particular + * statistic in hardware. IPA driver is expected to use this payload to + * initialize the SRAM. + * + * Return: pointer to ipahal_stats_init_pyld on success or NULL on failure. + */ +struct ipahal_stats_init_pyld *ipahal_stats_generate_init_pyld( + enum ipahal_hw_stats_type type, void *params, bool is_atomic_ctx); + +/* + * ipahal_destroy_stats_init_pyld() - Destroy/Release bulk that was built + * by the ipahal_stats_generate_init_pyld function. + */ +static inline void ipahal_destroy_stats_init_pyld( + struct ipahal_stats_init_pyld *pyld) +{ + kfree(pyld); +} + +/* + * ipahal_stats_get_offset - Get the offset / size of payload for stats + * @type: type of stats + * @params: get_offset parameters based of stats type + * @out: out parameter for the offset and size. + * + * This function will return the offset of the counter from beginning of + * the table.IPA driver is expected to read this portion in SRAM and pass + * it to ipahal_parse_stats() to interprete the stats. + * + * Return: 0 on success and negative on failure + */ +int ipahal_stats_get_offset(enum ipahal_hw_stats_type type, void *params, + struct ipahal_stats_offset *out); + +/* + * ipahal_parse_stats - parse statistics + * @type: type of stats + * @init_params: init_pyld parameters used on init + * @raw_stats: stats read from IPA SRAM + * @parsed_stats: pointer to parsed stats based on type + * + * Return: 0 on success and negative on failure + */ +int ipahal_parse_stats(enum ipahal_hw_stats_type type, void *init_params, + void *raw_stats, void *parsed_stats); + + +#endif /* _IPAHAL_HW_STATS_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h new file mode 100644 index 000000000000..530cb51f6199 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPAHAL_HW_STATS_I_H_ +#define _IPAHAL_HW_STATS_I_H_ + +#include "ipahal_hw_stats.h" + +int ipahal_hw_stats_init(enum ipa_hw_type ipa_hw_type); + +struct ipahal_stats_quota_hw { + u64 num_ipv4_bytes; + u64 num_ipv4_pkts:32; + u64 num_ipv6_pkts:32; + u64 num_ipv6_bytes; +}; + +struct ipahal_stats_tethering_hdr_hw { + u64 dst_mask:32; + u64 offset:32; +}; + +struct ipahal_stats_tethering_hw { + u64 num_ipv4_bytes; + u64 num_ipv4_pkts:32; + u64 num_ipv6_pkts:32; + u64 num_ipv6_bytes; +}; + +struct ipahal_stats_flt_rt_hdr_hw { + u64 en_mask:32; + u64 reserved:16; + u64 cnt_offset:16; +}; + +struct ipahal_stats_flt_rt_hw { + u64 num_packets_hash:32; + u64 num_packets:32; +}; + +struct ipahal_stats_drop_hw { + u64 drop_byte_cnt:40; + u64 drop_packet_cnt:24; +}; + +#endif /* _IPAHAL_HW_STATS_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h new file mode 100644 index 000000000000..94aedde36666 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h @@ -0,0 +1,724 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPAHAL_I_H_ +#define _IPAHAL_I_H_ + +#include +#include "../../ipa_common_i.h" + +#define IPAHAL_DRV_NAME "ipahal" + +#define IPAHAL_DBG(fmt, args...) \ + do { \ + pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \ + ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAHAL_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \ + ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAHAL_ERR(fmt, args...) \ + do { \ + pr_err(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \ + ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAHAL_ERR_RL(fmt, args...) \ + do { \ + pr_err_ratelimited_ipa(IPAHAL_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAHAL_DBG_REG(fmt, args...) \ + do { \ + pr_err(fmt, ## args); \ + IPA_IPC_LOGGING(ipahal_ctx->regdumpbuf, \ + " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAHAL_DBG_REG_IPC_ONLY(fmt, args...) \ + IPA_IPC_LOGGING(ipahal_ctx->regdumpbuf, " %s:%d " fmt, ## args) + +#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \ + (kzalloc((__size), ((__is_atomic_ctx) ? GFP_ATOMIC : GFP_KERNEL))) + +#define IPAHAL_IPC_LOG_PAGES 50 + +/* + * struct ipahal_context - HAL global context data + * @hw_type: IPA H/W type/version. + * @base: Base address to be used for accessing IPA memory. This is + * I/O memory mapped address. + * Controlled by debugfs. default is off + * @dent: Debugfs folder dir entry + * @ipa_pdev: IPA Platform Device. Will be used for DMA memory + * @empty_fltrt_tbl: Empty table to be used at tables init. + */ +struct ipahal_context { + enum ipa_hw_type hw_type; + void __iomem *base; + struct dentry *dent; + struct device *ipa_pdev; + struct ipa_mem_buffer empty_fltrt_tbl; + void *regdumpbuf; +}; + +extern struct ipahal_context *ipahal_ctx; + + + +/* Immediate commands H/W structures */ + +/* + * struct ipa_imm_cmd_hw_ip_v4_filter_init - IP_V4_FILTER_INIT command payload + * in H/W format. + * Inits IPv4 filter block. + * @hash_rules_addr: Addr in system mem where ipv4 hashable flt rules starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should + * be copied to + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should + * be copied to + * @rsvd: reserved + * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts + */ +struct ipa_imm_cmd_hw_ip_v4_filter_init { + u64 hash_rules_addr:64; + u64 hash_rules_size:12; + u64 hash_local_addr:16; + u64 nhash_rules_size:12; + u64 nhash_local_addr:16; + u64 rsvd:8; + u64 nhash_rules_addr:64; +}; + +/* + * struct ipa_imm_cmd_hw_ip_v6_filter_init - IP_V6_FILTER_INIT command payload + * in H/W format. + * Inits IPv6 filter block. + * @hash_rules_addr: Addr in system mem where ipv6 hashable flt rules starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should + * be copied to + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should + * be copied to + * @rsvd: reserved + * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts + */ +struct ipa_imm_cmd_hw_ip_v6_filter_init { + u64 hash_rules_addr:64; + u64 hash_rules_size:12; + u64 hash_local_addr:16; + u64 nhash_rules_size:12; + u64 nhash_local_addr:16; + u64 rsvd:8; + u64 nhash_rules_addr:64; +}; + +/* + * struct ipa_imm_cmd_hw_ip_v4_nat_init - IP_V4_NAT_INIT command payload + * in H/W format. + * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location + * cache address and other related parameters. + * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start + * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expansion NAT + * table starts. IPv4 NAT rules that result in hash collision are located + * in this table. + * @index_table_addr: Addr in sys/shared mem where index table, which points + * to NAT table starts + * @index_table_expansion_addr: Addr in sys/shared mem where expansion index + * table starts + * @table_index: For future support of multiple NAT tables + * @rsvd1: reserved + * @ipv4_rules_addr_type: ipv4_rules_addr in sys or shared mem + * @ipv4_expansion_rules_addr_type: ipv4_expansion_rules_addr in + * sys or shared mem + * @index_table_addr_type: index_table_addr in sys or shared mem + * @index_table_expansion_addr_type: index_table_expansion_addr in + * sys or shared mem + * @size_base_tables: Num of entries in NAT tbl and idx tbl (each) + * @size_expansion_tables: Num of entries in NAT expansion tbl and expansion + * idx tbl (each) + * @rsvd2: reserved + * @public_addr_info: Public IP addresses info suitable to the IPA H/W version + * IPA H/W >= 4.0 - PDN config table offset in SMEM + * IPA H/W < 4.0 - The public IP address + */ +struct ipa_imm_cmd_hw_ip_v4_nat_init { + u64 ipv4_rules_addr:64; + u64 ipv4_expansion_rules_addr:64; + u64 index_table_addr:64; + u64 index_table_expansion_addr:64; + u64 table_index:3; + u64 rsvd1:1; + u64 ipv4_rules_addr_type:1; + u64 ipv4_expansion_rules_addr_type:1; + u64 index_table_addr_type:1; + u64 index_table_expansion_addr_type:1; + u64 size_base_tables:12; + u64 size_expansion_tables:10; + u64 rsvd2:2; + u64 public_addr_info:32; +}; + +/* + * struct ipa_imm_cmd_hw_ip_v6_ct_init - IP_V6_CONN_TRACK_INIT command payload + * in H/W format. + * Inits IPv6CT block. Initiate IPv6CT table with it dimensions, location + * cache address and other related parameters. + * @table_addr: Address in sys/shared mem where IPv6CT rules start + * @expansion_table_addr: Address in sys/shared mem where IPv6CT expansion + * table starts. IPv6CT rules that result in hash collision are located + * in this table. + * @table_index: For future support of multiple IPv6CT tables + * @rsvd1: reserved + * @table_addr_type: table_addr in sys or shared mem + * @expansion_table_addr_type: expansion_table_addr in sys or shared mem + * @rsvd2: reserved + * @size_base_tables: Number of entries in IPv6CT table + * @size_expansion_tables: Number of entries in IPv6CT expansion table + * @rsvd3: reserved + */ +struct ipa_imm_cmd_hw_ip_v6_ct_init { + u64 table_addr:64; + u64 expansion_table_addr:64; + u64 table_index:3; + u64 rsvd1:1; + u64 table_addr_type:1; + u64 expansion_table_addr_type:1; + u64 rsvd2:2; + u64 size_base_table:12; + u64 size_expansion_table:10; + u64 rsvd3:34; +}; + +/* + * struct ipa_imm_cmd_hw_ip_v4_routing_init - IP_V4_ROUTING_INIT command payload + * in H/W format. + * Inits IPv4 routing table/structure - with the rules and other related params + * @hash_rules_addr: Addr in system mem where ipv4 hashable rt rules starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should + * be copied to + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should + * be copied to + * @rsvd: reserved + * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts + */ +struct ipa_imm_cmd_hw_ip_v4_routing_init { + u64 hash_rules_addr:64; + u64 hash_rules_size:12; + u64 hash_local_addr:16; + u64 nhash_rules_size:12; + u64 nhash_local_addr:16; + u64 rsvd:8; + u64 nhash_rules_addr:64; +}; + +/* + * struct ipa_imm_cmd_hw_ip_v6_routing_init - IP_V6_ROUTING_INIT command payload + * in H/W format. + * Inits IPv6 routing table/structure - with the rules and other related params + * @hash_rules_addr: Addr in system mem where ipv6 hashable rt rules starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should + * be copied to + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should + * be copied to + * @rsvd: reserved + * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts + */ +struct ipa_imm_cmd_hw_ip_v6_routing_init { + u64 hash_rules_addr:64; + u64 hash_rules_size:12; + u64 hash_local_addr:16; + u64 nhash_rules_size:12; + u64 nhash_local_addr:16; + u64 rsvd:8; + u64 nhash_rules_addr:64; +}; + +/* + * struct ipa_imm_cmd_hw_hdr_init_local - HDR_INIT_LOCAL command payload + * in H/W format. + * Inits hdr table within local mem with the hdrs and their length. + * @hdr_table_addr: Word address in sys mem where the table starts (SRC) + * @size_hdr_table: Size of the above (in bytes) + * @hdr_addr: header address in IPA sram (used as DST for memory copy) + * @rsvd: reserved + */ +struct ipa_imm_cmd_hw_hdr_init_local { + u64 hdr_table_addr:64; + u64 size_hdr_table:12; + u64 hdr_addr:16; + u64 rsvd:4; +}; + +/* + * struct ipa_imm_cmd_hw_nat_dma - NAT_DMA command payload + * in H/W format + * Perform DMA operation on NAT related mem addressess. Copy data into + * different locations within NAT associated tbls. (For add/remove NAT rules) + * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op. + * @rsvd1: reserved + * @base_addr: Base addr to which the DMA operation should be performed. + * @rsvd2: reserved + * @offset: offset in bytes from base addr to write 'data' to + * @data: data to be written + * @rsvd3: reserved + */ +struct ipa_imm_cmd_hw_nat_dma { + u64 table_index:3; + u64 rsvd1:1; + u64 base_addr:2; + u64 rsvd2:2; + u64 offset:32; + u64 data:16; + u64 rsvd3:8; +}; + +/* + * struct ipa_imm_cmd_hw_table_dma_ipav4 - TABLE_DMA command payload + * in H/W format + * Perform DMA operation on NAT and ipv6 connection tracking related mem + * addresses. Copy data into different locations within NAT associated tbls + * (For add/remove NAT rules) + * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op. + * @rsvd1: reserved + * @base_addr: Base addr to which the DMA operation should be performed. + * @rsvd2: reserved + * @offset: offset in bytes from base addr to write 'data' to + * @data: data to be written + * @rsvd3: reserved + */ +struct ipa_imm_cmd_hw_table_dma_ipav4 { + u64 table_index : 3; + u64 rsvd1 : 1; + u64 base_addr : 3; + u64 rsvd2 : 1; + u64 offset : 32; + u64 data : 16; + u64 rsvd3 : 8; +}; + +/* + * struct ipa_imm_cmd_hw_hdr_init_system - HDR_INIT_SYSTEM command payload + * in H/W format. + * Inits hdr table within sys mem with the hdrs and their length. + * @hdr_table_addr: Word address in system memory where the hdrs tbl starts. + */ +struct ipa_imm_cmd_hw_hdr_init_system { + u64 hdr_table_addr:64; +}; + +/* + * struct ipa_imm_cmd_hw_ip_packet_init - IP_PACKET_INIT command payload + * in H/W format. + * Configuration for specific IP pkt. Shall be called prior to an IP pkt + * data. Pkt will not go through IP pkt processing. + * @destination_pipe_index: Destination pipe index (in case routing + * is enabled, this field will overwrite the rt rule) + * @rsvd: reserved + */ +struct ipa_imm_cmd_hw_ip_packet_init { + u64 destination_pipe_index:5; + u64 rsv1:59; +}; + +/* + * struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload + * in H/W format. + * Write value to register. Allows reg changes to be synced with data packet + * and other immediate command. Can be used to access the sram + * @sw_rsvd: Ignored by H/W. May be used by S/W + * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait + * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr + * @value: value to write to register + * @value_mask: mask specifying which value bits to write to the register + * @pipeline_clear_options: options for pipeline to clear + * 0: HPS - no pkt inside HPS (not grp specific) + * 1: source group - The immediate cmd src grp does not use any pkt ctxs + * 2: Wait until no pkt reside inside IPA pipeline + * 3: reserved + * @rsvd: reserved - should be set to zero + */ +struct ipa_imm_cmd_hw_register_write { + u64 sw_rsvd:15; + u64 skip_pipeline_clear:1; + u64 offset:16; + u64 value:32; + u64 value_mask:32; + u64 pipeline_clear_options:2; + u64 rsvd:30; +}; + +/* + * struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload + * in H/W format. + * Write value to register. Allows reg changes to be synced with data packet + * and other immediate command. Can be used to access the sram + * @sw_rsvd: Ignored by H/W. May be used by S/W + * @offset_high: high bits of the Offset field - bits 17-20 + * @rsvd: reserved - should be set to zero + * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr + * @value: value to write to register + * @value_mask: mask specifying which value bits to write to the register + * @rsvd2: reserved - should be set to zero + */ +struct ipa_imm_cmd_hw_register_write_v_4_0 { + u64 sw_rsvd:11; + u64 offset_high:4; + u64 rsvd:1; + u64 offset:16; + u64 value:32; + u64 value_mask:32; + u64 rsvd2:32; +}; + +/* + * struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload + * in H/W format. + * Perform mem copy into or out of the SW area of IPA local mem + * @sw_rsvd: Ignored by H/W. My be used by S/W + * @size: Size in bytes of data to copy. Expected size is up to 2K bytes + * @local_addr: Address in IPA local memory + * @direction: Read or write? + * 0: IPA write, Write to local address from system address + * 1: IPA read, Read from local address to system address + * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait + * @pipeline_clear_options: options for pipeline to clear + * 0: HPS - no pkt inside HPS (not grp specific) + * 1: source group - The immediate cmd src grp does npt use any pkt ctxs + * 2: Wait until no pkt reside inside IPA pipeline + * 3: reserved + * @rsvd: reserved - should be set to zero + * @system_addr: Address in system memory + */ +struct ipa_imm_cmd_hw_dma_shared_mem { + u64 sw_rsvd:16; + u64 size:16; + u64 local_addr:16; + u64 direction:1; + u64 skip_pipeline_clear:1; + u64 pipeline_clear_options:2; + u64 rsvd:12; + u64 system_addr:64; +}; + +/* + * struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload + * in H/W format. + * Perform mem copy into or out of the SW area of IPA local mem + * @sw_rsvd: Ignored by H/W. My be used by S/W + * @size: Size in bytes of data to copy. Expected size is up to 2K bytes + * @clear_after_read: Clear local memory at the end of a read operation allows + * atomic read and clear if HPS is clear. Ignore for writes. + * @local_addr: Address in IPA local memory + * @direction: Read or write? + * 0: IPA write, Write to local address from system address + * 1: IPA read, Read from local address to system address + * @rsvd: reserved - should be set to zero + * @system_addr: Address in system memory + */ +struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 { + u64 sw_rsvd:15; + u64 clear_after_read:1; + u64 size:16; + u64 local_addr:16; + u64 direction:1; + u64 rsvd:15; + u64 system_addr:64; +}; + +/* + * struct ipa_imm_cmd_hw_ip_packet_tag_status - + * IP_PACKET_TAG_STATUS command payload in H/W format. + * This cmd is used for to allow SW to track HW processing by setting a TAG + * value that is passed back to SW inside Packet Status information. + * TAG info will be provided as part of Packet Status info generated for + * the next pkt transferred over the pipe. + * This immediate command must be followed by a packet in the same transfer. + * @sw_rsvd: Ignored by H/W. My be used by S/W + * @tag: Tag that is provided back to SW + */ +struct ipa_imm_cmd_hw_ip_packet_tag_status { + u64 sw_rsvd:16; + u64 tag:48; +}; + +/* + * struct ipa_imm_cmd_hw_dma_task_32b_addr - + * IPA_DMA_TASK_32B_ADDR command payload in H/W format. + * Used by clients using 32bit addresses. Used to perform DMA operation on + * multiple descriptors. + * The Opcode is dynamic, where it holds the number of buffer to process + * @sw_rsvd: Ignored by H/W. My be used by S/W + * @cmplt: Complete flag: When asserted IPA will interrupt SW when the entire + * DMA related data was completely xfered to its destination. + * @eof: Enf Of Frame flag: When asserted IPA will assert the EOT to the + * dest client. This is used used for aggr sequence + * @flsh: Flush flag: When asserted, pkt will go through the IPA blocks but + * will not be xfered to dest client but rather will be discarded + * @lock: Lock pipe flag: When asserted, IPA will stop processing descriptors + * from other EPs in the same src grp (RX queue) + * @unlock: Unlock pipe flag: When asserted, IPA will stop exclusively + * servicing current EP out of the src EPs of the grp (RX queue) + * @size1: Size of buffer1 data + * @addr1: Pointer to buffer1 data + * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs, + * only the first one needs to have this field set. It will be ignored + * in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK + * must contain this field (2 or more buffers) or EOT. + */ +struct ipa_imm_cmd_hw_dma_task_32b_addr { + u64 sw_rsvd:11; + u64 cmplt:1; + u64 eof:1; + u64 flsh:1; + u64 lock:1; + u64 unlock:1; + u64 size1:16; + u64 addr1:32; + u64 packet_size:16; +}; + + + +/* IPA Status packet H/W structures and info */ + +/* + * struct ipa_status_pkt_hw - IPA status packet payload in H/W format. + * This structure describes the status packet H/W structure for the + * following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET, + * IPA_STATUS_SUSPENDED_PACKET. + * Other statuses types has different status packet structure. + * @status_opcode: The Type of the status (Opcode). + * @exception: (not bitmask) - the first exception that took place. + * In case of exception, src endp and pkt len are always valid. + * @status_mask: Bit mask specifying on which H/W blocks the pkt was processed. + * @pkt_len: Pkt pyld len including hdr, include retained hdr if used. Does + * not include padding or checksum trailer len. + * @endp_src_idx: Source end point index. + * @rsvd1: reserved + * @endp_dest_idx: Destination end point index. + * Not valid in case of exception + * @rsvd2: reserved + * @metadata: meta data value used by packet + * @flt_local: Filter table location flag: Does matching flt rule belongs to + * flt tbl that resides in lcl memory? (if not, then system mem) + * @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl? + * @flt_global: Global filter rule flag: Does matching flt rule belongs to + * the global flt tbl? (if not, then the per endp tables) + * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule + * specifies to retain header? + * Starting IPA4.5, this will be true only if packet has L2 header. + * @flt_rule_id: The ID of the matching filter rule. This info can be combined + * with endp_src_idx to locate the exact rule. ID=0x3FF reserved to specify + * flt miss. In case of miss, all flt info to be ignored + * @rt_local: Route table location flag: Does matching rt rule belongs to + * rt tbl that resides in lcl memory? (if not, then system mem) + * @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl? + * @ucp: UC Processing flag. + * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match + * @rt_rule_id: The ID of the matching rt rule. This info can be combined + * with rt_tbl_idx to locate the exact rule. ID=0x3FF reserved to specify + * rt miss. In case of miss, all rt info to be ignored + * @nat_hit: NAT hit flag: Was their NAT hit? + * @nat_entry_idx: Index of the NAT entry used of NAT processing + * @nat_type: Defines the type of the NAT operation: + * 00: No NAT + * 01: Source NAT + * 10: Destination NAT + * 11: Reserved + * @tag_info: S/W defined value provided via immediate command + * @seq_num: Per source endp unique packet sequence number + * @time_of_day_ctr: running counter from IPA clock + * @hdr_local: Header table location flag: In header insertion, was the header + * taken from the table resides in local memory? (If no, then system mem) + * @hdr_offset: Offset of used header in the header table + * @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table? + * @frag_rule: Frag rule index in H/W frag table in case of frag hit + * @hw_specific: H/W specific reserved value + */ +struct ipa_pkt_status_hw { + u64 status_opcode:8; + u64 exception:8; + u64 status_mask:16; + u64 pkt_len:16; + u64 endp_src_idx:5; + u64 rsvd1:3; + u64 endp_dest_idx:5; + u64 rsvd2:3; + u64 metadata:32; + u64 flt_local:1; + u64 flt_hash:1; + u64 flt_global:1; + u64 flt_ret_hdr:1; + u64 flt_rule_id:10; + u64 rt_local:1; + u64 rt_hash:1; + u64 ucp:1; + u64 rt_tbl_idx:5; + u64 rt_rule_id:10; + u64 nat_hit:1; + u64 nat_entry_idx:13; + u64 nat_type:2; + u64 tag_info:48; + u64 seq_num:8; + u64 time_of_day_ctr:24; + u64 hdr_local:1; + u64 hdr_offset:10; + u64 frag_hit:1; + u64 frag_rule:4; + u64 hw_specific:16; +}; + +/* Size of H/W Packet Status */ +#define IPA3_0_PKT_STATUS_SIZE 32 + +/* Headers and processing context H/W structures and definitions */ + +/* uCP command numbers */ +#define IPA_HDR_UCP_802_3_TO_802_3 6 +#define IPA_HDR_UCP_802_3_TO_ETHII 7 +#define IPA_HDR_UCP_ETHII_TO_802_3 8 +#define IPA_HDR_UCP_ETHII_TO_ETHII 9 +#define IPA_HDR_UCP_L2TP_HEADER_ADD 10 +#define IPA_HDR_UCP_L2TP_HEADER_REMOVE 11 + +/* Processing context TLV type */ +#define IPA_PROC_CTX_TLV_TYPE_END 0 +#define IPA_PROC_CTX_TLV_TYPE_HDR_ADD 1 +#define IPA_PROC_CTX_TLV_TYPE_PROC_CMD 3 + +/** + * struct ipa_hw_hdr_proc_ctx_tlv - + * HW structure of IPA processing context header - TLV part + * @type: 0 - end type + * 1 - header addition type + * 3 - processing command type + * @length: number of bytes after tlv + * for type: + * 0 - needs to be 0 + * 1 - header addition length + * 3 - number of 32B including type and length. + * @value: specific value for type + * for type: + * 0 - needs to be 0 + * 1 - header length + * 3 - command ID (see IPA_HDR_UCP_* definitions) + */ +struct ipa_hw_hdr_proc_ctx_tlv { + u32 type:8; + u32 length:8; + u32 value:16; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_hdr_add - + * HW structure of IPA processing context - add header tlv + * @tlv: IPA processing context TLV + * @hdr_addr: processing context header address + */ +struct ipa_hw_hdr_proc_ctx_hdr_add { + struct ipa_hw_hdr_proc_ctx_tlv tlv; + u32 hdr_addr; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr - + * HW structure of IPA processing context - add l2tp header tlv + * @tlv: IPA processing context TLV + * @l2tp_params: l2tp parameters + */ +struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr { + struct ipa_hw_hdr_proc_ctx_tlv tlv; + struct ipa_l2tp_header_add_procparams l2tp_params; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr - + * HW structure of IPA processing context - remove l2tp header tlv + * @tlv: IPA processing context TLV + * @l2tp_params: l2tp parameters + */ +struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr { + struct ipa_hw_hdr_proc_ctx_tlv tlv; + struct ipa_l2tp_header_remove_procparams l2tp_params; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_add_hdr_seq - + * IPA processing context header - add header sequence + * @hdr_add: add header command + * @end: tlv end command (cmd.type must be 0) + */ +struct ipa_hw_hdr_proc_ctx_add_hdr_seq { + struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add; + struct ipa_hw_hdr_proc_ctx_tlv end; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq - + * IPA processing context header - process command sequence + * @hdr_add: add header command + * @cmd: tlv processing command (cmd.type must be 3) + * @end: tlv end command (cmd.type must be 0) + */ +struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq { + struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add; + struct ipa_hw_hdr_proc_ctx_tlv cmd; + struct ipa_hw_hdr_proc_ctx_tlv end; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq - + * IPA processing context header - process command sequence + * @hdr_add: add header command + * @l2tp_params: l2tp params for header addition + * @end: tlv end command (cmd.type must be 0) + */ +struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq { + struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add; + struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr l2tp_params; + struct ipa_hw_hdr_proc_ctx_tlv end; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq - + * IPA processing context header - process command sequence + * @hdr_add: add header command + * @l2tp_params: l2tp params for header removal + * @end: tlv end command (cmd.type must be 0) + */ +struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq { + struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add; + struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr l2tp_params; + struct ipa_hw_hdr_proc_ctx_tlv end; +}; + +#endif /* _IPAHAL_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c new file mode 100644 index 000000000000..616d1bd90439 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include +#include "ipahal_nat.h" +#include "ipahal_nat_i.h" +#include "ipahal_i.h" + +#define IPA_64_LOW_32_MASK (0xFFFFFFFF) +#define IPA_64_HIGH_32_MASK (0xFFFFFFFF00000000ULL) +#define IPAHAL_NAT_INVALID_PROTOCOL (0xFF) + +static const char *ipahal_nat_type_to_str[IPA_NAT_MAX] = { + __stringify(IPAHAL_NAT_IPV4), + __stringify(IPAHAL_NAT_IPV4_INDEX), + __stringify(IPAHAL_NAT_IPV4_PDN), + __stringify(IPAHAL_NAT_IPV6CT) +}; + +static size_t ipa_nat_ipv4_entry_size_v_3_0(void) +{ + return sizeof(struct ipa_nat_hw_ipv4_entry); +} + +static size_t ipa_nat_ipv4_index_entry_size_v_3_0(void) +{ + return sizeof(struct ipa_nat_hw_indx_entry); +} + +static size_t ipa_nat_ipv4_pdn_entry_size_v_4_0(void) +{ + return sizeof(struct ipa_nat_hw_pdn_entry); +} + +static size_t ipa_nat_ipv6ct_entry_size_v_4_0(void) +{ + return sizeof(struct ipa_nat_hw_ipv6ct_entry); +} + +static bool ipa_nat_ipv4_is_entry_zeroed_v_3_0(const void *entry) +{ + struct ipa_nat_hw_ipv4_entry zero_entry = { 0 }; + + return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true; +} + +static bool ipa_nat_ipv4_is_index_entry_zeroed_v_3_0(const void *entry) +{ + struct ipa_nat_hw_indx_entry zero_entry = { 0 }; + + return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true; +} + +static bool ipa_nat_ipv4_is_pdn_entry_zeroed_v_4_0(const void *entry) +{ + struct ipa_nat_hw_pdn_entry zero_entry = { 0 }; + + return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true; +} + +static bool ipa_nat_ipv6ct_is_entry_zeroed_v_4_0(const void *entry) +{ + struct ipa_nat_hw_ipv6ct_entry zero_entry = { 0 }; + + return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true; +} + +static bool ipa_nat_ipv4_is_entry_valid_v_3_0(const void *entry) +{ + struct ipa_nat_hw_ipv4_entry *hw_entry = + (struct ipa_nat_hw_ipv4_entry *)entry; + + return hw_entry->enable && + hw_entry->protocol != IPAHAL_NAT_INVALID_PROTOCOL; +} + +static bool ipa_nat_ipv4_is_index_entry_valid_v_3_0(const void *entry) +{ + struct ipa_nat_hw_indx_entry *hw_entry = + (struct ipa_nat_hw_indx_entry *)entry; + + return hw_entry->tbl_entry != 0; +} + +static bool ipa_nat_ipv4_is_pdn_entry_valid_v_4_0(const void *entry) +{ + struct ipa_nat_hw_pdn_entry *hw_entry = + (struct ipa_nat_hw_pdn_entry *)entry; + + return hw_entry->public_ip != 0; +} + +static bool ipa_nat_ipv6ct_is_entry_valid_v_4_0(const void *entry) +{ + struct ipa_nat_hw_ipv6ct_entry *hw_entry = + (struct ipa_nat_hw_ipv6ct_entry *)entry; + + return hw_entry->enable && + hw_entry->protocol != IPAHAL_NAT_INVALID_PROTOCOL; +} + +static int ipa_nat_ipv4_stringify_entry_v_3_0(const void *entry, + char *buff, size_t buff_size) +{ + const struct ipa_nat_hw_ipv4_entry *nat_entry = + (const struct ipa_nat_hw_ipv4_entry *)entry; + + return scnprintf(buff, buff_size, + "\t\tPrivate_IP=%pI4h Target_IP=%pI4h\n" + "\t\tNext_Index=%d Public_Port=%d\n" + "\t\tPrivate_Port=%d Target_Port=%d\n" + "\t\tIP_CKSM_delta=0x%x Enable=%s Redirect=%s\n" + "\t\tTime_stamp=0x%x Proto=%d\n" + "\t\tPrev_Index=%d Indx_tbl_entry=%d\n" + "\t\tTCP_UDP_cksum_delta=0x%x\n", + &nat_entry->private_ip, &nat_entry->target_ip, + nat_entry->next_index, nat_entry->public_port, + nat_entry->private_port, nat_entry->target_port, + nat_entry->ip_chksum, + (nat_entry->enable) ? "true" : "false", + (nat_entry->redirect) ? "Direct_To_APPS" : "Fwd_to_route", + nat_entry->time_stamp, nat_entry->protocol, + nat_entry->prev_index, nat_entry->indx_tbl_entry, + nat_entry->tcp_udp_chksum); +} + +static int ipa_nat_ipv4_stringify_entry_v_4_0(const void *entry, + char *buff, size_t buff_size) +{ + int length; + const struct ipa_nat_hw_ipv4_entry *nat_entry = + (const struct ipa_nat_hw_ipv4_entry *)entry; + + length = ipa_nat_ipv4_stringify_entry_v_3_0(entry, buff, buff_size); + + length += scnprintf(buff + length, buff_size - length, + "\t\tPDN_Index=%d\n", nat_entry->pdn_index); + + return length; +} + +static int ipa_nat_ipv4_index_stringify_entry_v_3_0(const void *entry, + char *buff, size_t buff_size) +{ + const struct ipa_nat_hw_indx_entry *index_entry = + (const struct ipa_nat_hw_indx_entry *)entry; + + return scnprintf(buff, buff_size, + "\t\tTable_Entry=%d Next_Index=%d\n", + index_entry->tbl_entry, index_entry->next_index); +} + +static int ipa_nat_ipv4_pdn_stringify_entry_v_4_0(const void *entry, + char *buff, size_t buff_size) +{ + const struct ipa_nat_hw_pdn_entry *pdn_entry = + (const struct ipa_nat_hw_pdn_entry *)entry; + + return scnprintf(buff, buff_size, + "ip=%pI4h src_metadata=0x%X, dst_metadata=0x%X\n", + &pdn_entry->public_ip, + pdn_entry->src_metadata, pdn_entry->dst_metadata); +} + +static inline int ipa_nat_ipv6_stringify_addr(char *buff, size_t buff_size, + const char *msg, u64 lsb, u64 msb) +{ + struct in6_addr addr; + + addr.s6_addr32[0] = cpu_to_be32((msb & IPA_64_HIGH_32_MASK) >> 32); + addr.s6_addr32[1] = cpu_to_be32(msb & IPA_64_LOW_32_MASK); + addr.s6_addr32[2] = cpu_to_be32((lsb & IPA_64_HIGH_32_MASK) >> 32); + addr.s6_addr32[3] = cpu_to_be32(lsb & IPA_64_LOW_32_MASK); + + return scnprintf(buff, buff_size, + "\t\t%s_IPv6_Addr=%pI6c\n", msg, &addr); +} + +static int ipa_nat_ipv6ct_stringify_entry_v_4_0(const void *entry, + char *buff, size_t buff_size) +{ + int length = 0; + const struct ipa_nat_hw_ipv6ct_entry *ipv6ct_entry = + (const struct ipa_nat_hw_ipv6ct_entry *)entry; + + length += ipa_nat_ipv6_stringify_addr( + buff + length, + buff_size - length, + "Src", + ipv6ct_entry->src_ipv6_lsb, + ipv6ct_entry->src_ipv6_msb); + + length += ipa_nat_ipv6_stringify_addr( + buff + length, + buff_size - length, + "Dest", + ipv6ct_entry->dest_ipv6_lsb, + ipv6ct_entry->dest_ipv6_msb); + + length += scnprintf(buff + length, buff_size - length, + "\t\tEnable=%s Redirect=%s Time_Stamp=0x%x Proto=%d\n" + "\t\tNext_Index=%d Dest_Port=%d Src_Port=%d\n" + "\t\tDirection Settings: Out=%s In=%s\n" + "\t\tPrev_Index=%d\n", + (ipv6ct_entry->enable) ? "true" : "false", + (ipv6ct_entry->redirect) ? "Direct_To_APPS" : "Fwd_to_route", + ipv6ct_entry->time_stamp, + ipv6ct_entry->protocol, + ipv6ct_entry->next_index, + ipv6ct_entry->dest_port, + ipv6ct_entry->src_port, + (ipv6ct_entry->out_allowed) ? "Allow" : "Deny", + (ipv6ct_entry->in_allowed) ? "Allow" : "Deny", + ipv6ct_entry->prev_index); + + return length; +} + +/* + * struct ipahal_nat_obj - H/W information for specific IPA version + * @entry_size - CB to get the size of the entry + * @is_entry_zeroed - CB to determine whether an entry is definitely zero + * @is_entry_valid - CB to determine whether an entry is valid + * Validity criterium depends on entry type. E.g. for NAT base table + * Entry need to be with valid protocol and enabled. + * @stringify_entry - CB to create string that represents an entry + */ +struct ipahal_nat_obj { + size_t (*entry_size)(void); + bool (*is_entry_zeroed)(const void *entry); + bool (*is_entry_valid)(const void *entry); + int (*stringify_entry)(const void *entry, char *buff, size_t buff_size); +}; + +/* + * This table contains the info regard each NAT type for IPAv3 and later. + * Information like: get entry size and stringify entry functions. + * All the information on all the NAT types on IPAv3 are statically + * defined below. If information is missing regard some NAT type on some + * IPA version, the init function will fill it with the information from the + * previous IPA version. + * Information is considered missing if all of the fields are 0 + */ +static struct ipahal_nat_obj ipahal_nat_objs[IPA_HW_MAX][IPA_NAT_MAX] = { + /* IPAv3 */ + [IPA_HW_v3_0][IPAHAL_NAT_IPV4] = { + ipa_nat_ipv4_entry_size_v_3_0, + ipa_nat_ipv4_is_entry_zeroed_v_3_0, + ipa_nat_ipv4_is_entry_valid_v_3_0, + ipa_nat_ipv4_stringify_entry_v_3_0 + }, + [IPA_HW_v3_0][IPAHAL_NAT_IPV4_INDEX] = { + ipa_nat_ipv4_index_entry_size_v_3_0, + ipa_nat_ipv4_is_index_entry_zeroed_v_3_0, + ipa_nat_ipv4_is_index_entry_valid_v_3_0, + ipa_nat_ipv4_index_stringify_entry_v_3_0 + }, + + /* IPAv4 */ + [IPA_HW_v4_0][IPAHAL_NAT_IPV4] = { + ipa_nat_ipv4_entry_size_v_3_0, + ipa_nat_ipv4_is_entry_zeroed_v_3_0, + ipa_nat_ipv4_is_entry_valid_v_3_0, + ipa_nat_ipv4_stringify_entry_v_4_0 + }, + [IPA_HW_v4_0][IPAHAL_NAT_IPV4_PDN] = { + ipa_nat_ipv4_pdn_entry_size_v_4_0, + ipa_nat_ipv4_is_pdn_entry_zeroed_v_4_0, + ipa_nat_ipv4_is_pdn_entry_valid_v_4_0, + ipa_nat_ipv4_pdn_stringify_entry_v_4_0 + }, + [IPA_HW_v4_0][IPAHAL_NAT_IPV6CT] = { + ipa_nat_ipv6ct_entry_size_v_4_0, + ipa_nat_ipv6ct_is_entry_zeroed_v_4_0, + ipa_nat_ipv6ct_is_entry_valid_v_4_0, + ipa_nat_ipv6ct_stringify_entry_v_4_0 + } +}; + +static void ipahal_nat_check_obj(struct ipahal_nat_obj *obj, + int nat_type, int ver) +{ + WARN(obj->entry_size == NULL, "%s missing entry_size for version %d\n", + ipahal_nat_type_str(nat_type), ver); + WARN(obj->is_entry_zeroed == NULL, + "%s missing is_entry_zeroed for version %d\n", + ipahal_nat_type_str(nat_type), ver); + WARN(obj->stringify_entry == NULL, + "%s missing stringify_entry for version %d\n", + ipahal_nat_type_str(nat_type), ver); +} + +/* + * ipahal_nat_init() - Build the NAT information table + * See ipahal_nat_objs[][] comments + */ +int ipahal_nat_init(enum ipa_hw_type ipa_hw_type) +{ + int i; + int j; + struct ipahal_nat_obj zero_obj, *next_obj; + + IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type); + + memset(&zero_obj, 0, sizeof(zero_obj)); + + if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) { + IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type); + return -EINVAL; + } + + for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; ++i) { + for (j = 0; j < IPA_NAT_MAX; ++j) { + next_obj = &ipahal_nat_objs[i + 1][j]; + if (!memcmp(next_obj, &zero_obj, sizeof(*next_obj))) { + memcpy(next_obj, &ipahal_nat_objs[i][j], + sizeof(*next_obj)); + } else { + ipahal_nat_check_obj(next_obj, j, i + 1); + } + } + } + + return 0; +} + +const char *ipahal_nat_type_str(enum ipahal_nat_type nat_type) +{ + if (nat_type < 0 || nat_type >= IPA_NAT_MAX) { + IPAHAL_ERR("requested NAT type %d is invalid\n", nat_type); + return "Invalid NAT type"; + } + + return ipahal_nat_type_to_str[nat_type]; +} + +int ipahal_nat_entry_size(enum ipahal_nat_type nat_type, size_t *entry_size) +{ + if (WARN(entry_size == NULL, "entry_size is NULL\n")) + return -EINVAL; + if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX, + "requested NAT type %d is invalid\n", nat_type)) + return -EINVAL; + + IPAHAL_DBG("Get the entry size for NAT type=%s\n", + ipahal_nat_type_str(nat_type)); + + *entry_size = + ipahal_nat_objs[ipahal_ctx->hw_type][nat_type].entry_size(); + + IPAHAL_DBG("The entry size is %zu\n", *entry_size); + + return 0; +} + +int ipahal_nat_is_entry_zeroed(enum ipahal_nat_type nat_type, void *entry, + bool *entry_zeroed) +{ + struct ipahal_nat_obj *nat_ptr; + + if (WARN(entry == NULL || entry_zeroed == NULL, + "NULL pointer received\n")) + return -EINVAL; + if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX, + "requested NAT type %d is invalid\n", nat_type)) + return -EINVAL; + + IPAHAL_DBG("Determine whether the entry is zeroed for NAT type=%s\n", + ipahal_nat_type_str(nat_type)); + + nat_ptr = + &ipahal_nat_objs[ipahal_ctx->hw_type][nat_type]; + + *entry_zeroed = nat_ptr->is_entry_zeroed(entry); + + IPAHAL_DBG("The entry is %szeroed\n", (*entry_zeroed) ? "" : "not "); + + return 0; +} + +int ipahal_nat_is_entry_valid(enum ipahal_nat_type nat_type, void *entry, + bool *entry_valid) +{ + struct ipahal_nat_obj *nat_obj; + + if (WARN(entry == NULL || entry_valid == NULL, + "NULL pointer received\n")) + return -EINVAL; + if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX, + "requested NAT type %d is invalid\n", nat_type)) + return -EINVAL; + + IPAHAL_DBG("Determine whether the entry is valid for NAT type=%s\n", + ipahal_nat_type_str(nat_type)); + nat_obj = &ipahal_nat_objs[ipahal_ctx->hw_type][nat_type]; + *entry_valid = nat_obj->is_entry_valid(entry); + IPAHAL_DBG("The entry is %svalid\n", (*entry_valid) ? "" : "not "); + + return 0; +} + +int ipahal_nat_stringify_entry(enum ipahal_nat_type nat_type, void *entry, + char *buff, size_t buff_size) +{ + int result; + struct ipahal_nat_obj *nat_obj_ptr; + + if (WARN(entry == NULL || buff == NULL, "NULL pointer received\n")) + return -EINVAL; + if (WARN(!buff_size, "The output buff size is zero\n")) + return -EINVAL; + if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX, + "requested NAT type %d is invalid\n", nat_type)) + return -EINVAL; + + nat_obj_ptr = + &ipahal_nat_objs[ipahal_ctx->hw_type][nat_type]; + + IPAHAL_DBG("Create the string for the entry of NAT type=%s\n", + ipahal_nat_type_str(nat_type)); + + result = nat_obj_ptr->stringify_entry(entry, buff, buff_size); + + IPAHAL_DBG("The string successfully created with length %d\n", + result); + + return result; +} + diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.h new file mode 100644 index 000000000000..bdba969f266a --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPAHAL_NAT_H_ +#define _IPAHAL_NAT_H_ + +/* + * NAT types + * + * NOTE:: Any change to this enum, need to change to ipahal_nat_to_str + * array as well. + */ +enum ipahal_nat_type { + IPAHAL_NAT_IPV4, + IPAHAL_NAT_IPV4_INDEX, + IPAHAL_NAT_IPV4_PDN, + IPAHAL_NAT_IPV6CT, + IPA_NAT_MAX +}; + +/* NAT Function APIs */ + +/* + * ipahal_nat_type_str() - returns string that represent the NAT type + * @nat_type: [in] NAT type + */ +const char *ipahal_nat_type_str(enum ipahal_nat_type nat_type); + +/* + * ipahal_nat_entry_size() - Gets the size of HW NAT entry + * @nat_type: [in] The type of the NAT entry + * @entry_size: [out] The size of the HW NAT entry + */ +int ipahal_nat_entry_size(enum ipahal_nat_type nat_type, size_t *entry_size); + +/* + * ipahal_nat_is_entry_zeroed() - Determines whether HW NAT entry is + * definitely zero + * @nat_type: [in] The type of the NAT entry + * @entry: [in] The NAT entry + * @entry_zeroed: [out] True if the received entry is definitely zero + */ +int ipahal_nat_is_entry_zeroed(enum ipahal_nat_type nat_type, void *entry, + bool *entry_zeroed); + +/* + * ipahal_nat_is_entry_valid() - Determines whether HW NAT entry is + * valid. + * Validity criterium depends on entry type. E.g. for NAT base table + * Entry need to be with valid protocol and enabled. + * @nat_type: [in] The type of the NAT entry + * @entry: [in] The NAT entry + * @entry_valid: [out] True if the received entry is valid + */ +int ipahal_nat_is_entry_valid(enum ipahal_nat_type nat_type, void *entry, + bool *entry_valid); + +/* + * ipahal_nat_stringify_entry() - Creates a string for HW NAT entry + * @nat_type: [in] The type of the NAT entry + * @entry: [in] The NAT entry + * @buff: [out] Output buffer for the result string + * @buff_size: [in] The size of the output buffer + * @return the number of characters written into buff not including + * the trailing '\0' + */ +int ipahal_nat_stringify_entry(enum ipahal_nat_type nat_type, void *entry, + char *buff, size_t buff_size); + +#endif /* _IPAHAL_NAT_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat_i.h new file mode 100644 index 000000000000..5ef528e5ac25 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat_i.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPAHAL_NAT_I_H_ +#define _IPAHAL_NAT_I_H_ + +#include + +/* ----------------------- IPv4 NAT Table Entry ------------------------- + * + * ----------------------------------------------------------------------- + * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * ----------------------------------------------------------------------- + * | Target IP(4B) | Private IP(4B) | + * ----------------------------------------------------------------------- + * |Target Port(2B) |Private Port(2B)| Public Port(2B) | Next Index(2B) | + * ----------------------------------------------------------------------- + * |Proto| TimeStamp(3B) | Flags(2B) |IP check sum Diff| + * |(1B) | |EN|Redirect|Resv | (2B) | + * ----------------------------------------------------------------------- + * |TCP/UDP checksum| PDN info(2B) | SW Specific Parameters(4B) | + * | diff (2B) |Info|Resv |index table entry| prev index | + * ----------------------------------------------------------------------- + */ +struct ipa_nat_hw_ipv4_entry { + /* An IP address can't be bit-field, because its address is used */ + u32 private_ip; + u32 target_ip; + + u32 next_index : 16; + u32 public_port : 16; + u32 private_port : 16; + u32 target_port : 16; + u32 ip_chksum : 16; + + u32 rsvd1 : 14; + u32 redirect : 1; + u32 enable : 1; + + u32 time_stamp : 24; + u32 protocol : 8; + + u32 prev_index : 16; + u32 indx_tbl_entry : 16; + + u32 rsvd2 : 12; + u32 pdn_index : 4; /* IPA 4.0 and greater */ + + u32 tcp_udp_chksum : 16; +}; + +/*--- IPV4 NAT Index Table Entry -- + *--------------------------------- + *| 3 | 2 | 1 | 0 | + *--------------------------------- + *|next index(2B) |table entry(2B)| + *--------------------------------- + */ +struct ipa_nat_hw_indx_entry { + u16 tbl_entry; + u16 next_index; +}; + +/** + * struct ipa_nat_hw_pdn_entry - IPA PDN config table entry + * @public_ip: the PDN's public ip + * @src_metadata: the PDN's metadata to be replaced for source NAT + * @dst_metadata: the PDN's metadata to be replaced for destination NAT + * @resrvd: reserved field + * --------------------------------- + * | 3 | 2 | 1 | 0 | + * --------------------------------- + * | public_ip (4B) | + * --------------------------------- + * | src_metadata (4B) | + * --------------------------------- + * | dst_metadata (4B) | + * --------------------------------- + * | resrvd (4B) | + * --------------------------------- + */ +struct ipa_nat_hw_pdn_entry { + u32 public_ip; + u32 src_metadata; + u32 dst_metadata; + u32 resrvd; +}; + +/*------------------------- IPV6CT Table Entry ------------------------------ + *----------------------------------------------------------------------------- + *| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + *----------------------------------------------------------------------------- + *| Outbound Src IPv6 Address (8 LSB Bytes) | + *----------------------------------------------------------------------------- + *| Outbound Src IPv6 Address (8 MSB Bytes) | + *----------------------------------------------------------------------------- + *| Outbound Dest IPv6 Address (8 LSB Bytes) | + *----------------------------------------------------------------------------- + *| Outbound Dest IPv6 Address (8 MSB Bytes) | + *----------------------------------------------------------------------------- + *|Protocol| TimeStamp (3B) | Flags (2B) |Reserved (2B) | + *| (1B) | |Enable|Redirect|Resv | | + *----------------------------------------------------------------------------- + *|Reserved|Direction(1B)|Src Port(2B)| Dest Port (2B) |Next Index(2B)| + *| (1B) |IN|OUT|Resv | | | | + *----------------------------------------------------------------------------- + *| SW Specific Parameters(4B) | Reserved (4B) | + *| Prev Index (2B) |Reserved(2B)| | + *----------------------------------------------------------------------------- + *| Reserved (8B) | + *----------------------------------------------------------------------------- + */ +struct ipa_nat_hw_ipv6ct_entry { + /* An IP address can't be bit-field, because its address is used */ + u64 src_ipv6_lsb; + u64 src_ipv6_msb; + u64 dest_ipv6_lsb; + u64 dest_ipv6_msb; + + u64 rsvd1 : 30; + u64 redirect : 1; + u64 enable : 1; + + u64 time_stamp : 24; + u64 protocol : 8; + + u64 next_index : 16; + u64 dest_port : 16; + u64 src_port : 16; + u64 rsvd2 : 6; + u64 out_allowed : 1; + u64 in_allowed : 1; + u64 rsvd3 : 8; + + u64 rsvd4 : 48; + u64 prev_index : 16; + + u64 rsvd5 : 64; +}; + +int ipahal_nat_init(enum ipa_hw_type ipa_hw_type); + +#endif /* _IPAHAL_NAT_I_H_ */ + diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c new file mode 100644 index 000000000000..81481b05db25 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c @@ -0,0 +1,3464 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include "ipahal_i.h" +#include "ipahal_reg.h" +#include "ipahal_reg_i.h" + +#define IPA_MAX_MSG_LEN 4096 + +static const char *ipareg_name_to_str[IPA_REG_MAX] = { + __stringify(IPA_ROUTE), + __stringify(IPA_IRQ_STTS_EE_n), + __stringify(IPA_IRQ_EN_EE_n), + __stringify(IPA_IRQ_CLR_EE_n), + __stringify(IPA_SUSPEND_IRQ_INFO_EE_n), + __stringify(IPA_SUSPEND_IRQ_EN_EE_n), + __stringify(IPA_SUSPEND_IRQ_CLR_EE_n), + __stringify(IPA_HOLB_DROP_IRQ_INFO_EE_n), + __stringify(IPA_HOLB_DROP_IRQ_EN_EE_n), + __stringify(IPA_HOLB_DROP_IRQ_CLR_EE_n), + __stringify(IPA_BCR), + __stringify(IPA_ENABLED_PIPES), + __stringify(IPA_VERSION), + __stringify(IPA_TAG_TIMER), + __stringify(IPA_NAT_TIMER), + __stringify(IPA_COMP_HW_VERSION), + __stringify(IPA_COMP_CFG), + __stringify(IPA_STATE_TX_WRAPPER), + __stringify(IPA_STATE_TX1), + __stringify(IPA_STATE_FETCHER), + __stringify(IPA_STATE_FETCHER_MASK), + __stringify(IPA_STATE_FETCHER_MASK_0), + __stringify(IPA_STATE_FETCHER_MASK_1), + __stringify(IPA_STATE_DFETCHER), + __stringify(IPA_STATE_ACL), + __stringify(IPA_STATE), + __stringify(IPA_STATE_RX_ACTIVE), + __stringify(IPA_STATE_TX0), + __stringify(IPA_STATE_AGGR_ACTIVE), + __stringify(IPA_COUNTER_CFG), + __stringify(IPA_STATE_GSI_TLV), + __stringify(IPA_STATE_GSI_AOS), + __stringify(IPA_STATE_GSI_IF), + __stringify(IPA_STATE_GSI_SKIP), + __stringify(IPA_STATE_GSI_IF_CONS), + __stringify(IPA_STATE_DPL_FIFO), + __stringify(IPA_STATE_COAL_MASTER), + __stringify(IPA_GENERIC_RAM_ARBITER_PRIORITY), + __stringify(IPA_STATE_NLO_AGGR), + __stringify(IPA_STATE_COAL_MASTER_1), + __stringify(IPA_ENDP_INIT_HDR_n), + __stringify(IPA_ENDP_INIT_HDR_EXT_n), + __stringify(IPA_ENDP_INIT_AGGR_n), + __stringify(IPA_AGGR_FORCE_CLOSE), + __stringify(IPA_ENDP_INIT_ROUTE_n), + __stringify(IPA_ENDP_INIT_MODE_n), + __stringify(IPA_ENDP_INIT_NAT_n), + __stringify(IPA_ENDP_INIT_CONN_TRACK_n), + __stringify(IPA_ENDP_INIT_CTRL_n), + __stringify(IPA_ENDP_INIT_CTRL_SCND_n), + __stringify(IPA_ENDP_INIT_CTRL_STATUS_n), + __stringify(IPA_ENDP_INIT_HOL_BLOCK_EN_n), + __stringify(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n), + __stringify(IPA_ENDP_INIT_DEAGGR_n), + __stringify(IPA_ENDP_INIT_SEQ_n), + __stringify(IPA_DEBUG_CNT_REG_n), + __stringify(IPA_ENDP_INIT_CFG_n), + __stringify(IPA_IRQ_EE_UC_n), + __stringify(IPA_ENDP_INIT_HDR_METADATA_MASK_n), + __stringify(IPA_ENDP_INIT_HDR_METADATA_n), + __stringify(IPA_ENDP_INIT_PROD_CFG_n), + __stringify(IPA_ENDP_INIT_RSRC_GRP_n), + __stringify(IPA_SHARED_MEM_SIZE), + __stringify(IPA_SW_AREA_RAM_DIRECT_ACCESS_n), + __stringify(IPA_DEBUG_CNT_CTRL_n), + __stringify(IPA_UC_MAILBOX_m_n), + __stringify(IPA_FILT_ROUT_HASH_FLUSH), + __stringify(IPA_FILT_ROUT_HASH_EN), + __stringify(IPA_SINGLE_NDP_MODE), + __stringify(IPA_QCNCM), + __stringify(IPA_SYS_PKT_PROC_CNTXT_BASE), + __stringify(IPA_LOCAL_PKT_PROC_CNTXT_BASE), + __stringify(IPA_ENDP_STATUS_n), + __stringify(IPA_ENDP_YELLOW_RED_MARKER_CFG_n), + __stringify(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n), + __stringify(IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n), + __stringify(IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n), + __stringify(IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n), + __stringify(IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n), + __stringify(IPA_DST_RSRC_GRP_01_RSRC_TYPE_n), + __stringify(IPA_DST_RSRC_GRP_23_RSRC_TYPE_n), + __stringify(IPA_DST_RSRC_GRP_45_RSRC_TYPE_n), + __stringify(IPA_DST_RSRC_GRP_67_RSRC_TYPE_n), + __stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_0), + __stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_1), + __stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_0), + __stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_1), + __stringify(IPA_HPS_FTCH_ARB_QUEUE_WEIGHT), + __stringify(IPA_QSB_MAX_WRITES), + __stringify(IPA_QSB_MAX_READS), + __stringify(IPA_TX_CFG), + __stringify(IPA_IDLE_INDICATION_CFG), + __stringify(IPA_DPS_SEQUENCER_FIRST), + __stringify(IPA_DPS_SEQUENCER_LAST), + __stringify(IPA_HPS_SEQUENCER_FIRST), + __stringify(IPA_HPS_SEQUENCER_LAST), + __stringify(IPA_CLKON_CFG), + __stringify(IPA_QTIME_TIMESTAMP_CFG), + __stringify(IPA_TIMERS_PULSE_GRAN_CFG), + __stringify(IPA_TIMERS_XO_CLK_DIV_CFG), + __stringify(IPA_STAT_QUOTA_BASE_n), + __stringify(IPA_STAT_QUOTA_MASK_n), + __stringify(IPA_STAT_TETHERING_BASE_n), + __stringify(IPA_STAT_TETHERING_MASK_n), + __stringify(IPA_STAT_FILTER_IPV4_BASE), + __stringify(IPA_STAT_FILTER_IPV6_BASE), + __stringify(IPA_STAT_ROUTER_IPV4_BASE), + __stringify(IPA_STAT_ROUTER_IPV6_BASE), + __stringify(IPA_STAT_FILTER_IPV4_START_ID), + __stringify(IPA_STAT_FILTER_IPV6_START_ID), + __stringify(IPA_STAT_ROUTER_IPV4_START_ID), + __stringify(IPA_STAT_ROUTER_IPV6_START_ID), + __stringify(IPA_STAT_FILTER_IPV4_END_ID), + __stringify(IPA_STAT_FILTER_IPV6_END_ID), + __stringify(IPA_STAT_ROUTER_IPV4_END_ID), + __stringify(IPA_STAT_ROUTER_IPV6_END_ID), + __stringify(IPA_STAT_DROP_CNT_BASE_n), + __stringify(IPA_STAT_DROP_CNT_MASK_n), + __stringify(IPA_SNOC_FEC_EE_n), + __stringify(IPA_FEC_ADDR_EE_n), + __stringify(IPA_FEC_ADDR_MSB_EE_n), + __stringify(IPA_FEC_ATTR_EE_n), + __stringify(IPA_ENDP_GSI_CFG1_n), + __stringify(IPA_ENDP_GSI_CFG_AOS_n), + __stringify(IPA_ENDP_GSI_CFG_TLV_n), +}; + +static void ipareg_construct_dummy(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + IPAHAL_ERR("No construct function for %s\n", + ipahal_reg_name_str(reg)); + WARN(1, "invalid register operation"); +} + +static void ipareg_parse_dummy(enum ipahal_reg_name reg, + void *fields, u32 val) +{ + IPAHAL_ERR("No parse function for %s\n", + ipahal_reg_name_str(reg)); + WARN(1, "invalid register operation"); +} + +static void ipareg_construct_rx_hps_clients_depth1( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_rx_hps_clients *clients = + (struct ipahal_reg_rx_hps_clients *)fields; + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0)); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1)); +} + +static void ipareg_construct_rx_hps_clients_depth0( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_rx_hps_clients *clients = + (struct ipahal_reg_rx_hps_clients *)fields; + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0)); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1)); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(2)); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(3)); +} + +static void ipareg_construct_rx_hps_clients_depth0_v3_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_rx_hps_clients *clients = + (struct ipahal_reg_rx_hps_clients *)fields; + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(0)); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(1)); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(2)); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(3)); +} + +static void ipareg_construct_rx_hps_clients_depth0_v4_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_rx_hps_clients *clients = + (struct ipahal_reg_rx_hps_clients *)fields; + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_0_SHFT_v4_5, + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_0_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_1_SHFT_v4_5, + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_1_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_2_SHFT_v4_5, + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_2_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_3_SHFT_v4_5, + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_3_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[4], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_4_SHFT_v4_5, + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_4_BMSK_v4_5); +} + +static void ipareg_construct_rsrg_grp_xy( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_rsrc_grp_cfg *grp = + (struct ipahal_reg_rsrc_grp_cfg *)fields; + + IPA_SETFIELD_IN_REG(*val, grp->x_min, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK); + IPA_SETFIELD_IN_REG(*val, grp->x_max, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK); + IPA_SETFIELD_IN_REG(*val, grp->y_min, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK); + IPA_SETFIELD_IN_REG(*val, grp->y_max, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK); +} + +static void ipareg_construct_rsrg_grp_xy_v3_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_rsrc_grp_cfg *grp = + (struct ipahal_reg_rsrc_grp_cfg *)fields; + + IPA_SETFIELD_IN_REG(*val, grp->x_min, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5); + IPA_SETFIELD_IN_REG(*val, grp->x_max, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5); + + /* DST_23 register has only X fields at ipa V3_5 */ + if (reg == IPA_DST_RSRC_GRP_23_RSRC_TYPE_n) + return; + + IPA_SETFIELD_IN_REG(*val, grp->y_min, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5); + IPA_SETFIELD_IN_REG(*val, grp->y_max, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5); +} + +static void ipareg_construct_rsrg_grp_xy_v4_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_rsrc_grp_cfg *grp = + (struct ipahal_reg_rsrc_grp_cfg *)fields; + + IPA_SETFIELD_IN_REG(*val, grp->x_min, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5); + IPA_SETFIELD_IN_REG(*val, grp->x_max, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5); + + /* SRC_45 and DST_45 register has only X fields at ipa V4_5 */ + if (reg == IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n || + reg == IPA_DST_RSRC_GRP_45_RSRC_TYPE_n) + return; + + IPA_SETFIELD_IN_REG(*val, grp->y_min, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5); + IPA_SETFIELD_IN_REG(*val, grp->y_max, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5); +} + +static void ipareg_construct_hash_cfg_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_fltrt_hash_tuple *tuple = + (struct ipahal_reg_fltrt_hash_tuple *)fields; + + IPA_SETFIELD_IN_REG(*val, tuple->flt.src_id, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->flt.src_ip_addr, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_ip_addr, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->flt.src_port, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_port, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->flt.protocol, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->flt.meta_data, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->undefined1, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->rt.src_id, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->rt.src_ip_addr, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_ip_addr, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->rt.src_port, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_port, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->rt.protocol, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->rt.meta_data, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->undefined2, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK); +} + +static void ipareg_parse_hash_cfg_n( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_fltrt_hash_tuple *tuple = + (struct ipahal_reg_fltrt_hash_tuple *)fields; + + tuple->flt.src_id = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK); + tuple->flt.src_ip_addr = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK); + tuple->flt.dst_ip_addr = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK); + tuple->flt.src_port = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK); + tuple->flt.dst_port = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK); + tuple->flt.protocol = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK); + tuple->flt.meta_data = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK); + tuple->undefined1 = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK); + tuple->rt.src_id = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK); + tuple->rt.src_ip_addr = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK); + tuple->rt.dst_ip_addr = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK); + tuple->rt.src_port = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK); + tuple->rt.dst_port = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK); + tuple->rt.protocol = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK); + tuple->rt.meta_data = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK); + tuple->undefined2 = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK); +} + +static void ipareg_construct_endp_status_n_common( + const struct ipahal_reg_ep_cfg_status *ep_status, u32 *val) +{ + IPA_SETFIELD_IN_REG(*val, ep_status->status_en, + IPA_ENDP_STATUS_n_STATUS_EN_SHFT, + IPA_ENDP_STATUS_n_STATUS_EN_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_status->status_ep, + IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT, + IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK); +} + +static void ipareg_construct_endp_status_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + const struct ipahal_reg_ep_cfg_status *ep_status = + (const struct ipahal_reg_ep_cfg_status *)fields; + + ipareg_construct_endp_status_n_common(ep_status, val); + + IPA_SETFIELD_IN_REG(*val, ep_status->status_location, + IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT, + IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK); +} + +static void ipareg_construct_endp_status_n_v4_0( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_ep_cfg_status *ep_status = + (struct ipahal_reg_ep_cfg_status *)fields; + + ipareg_construct_endp_status_n_common(ep_status, val); + + IPA_SETFIELD_IN_REG(*val, ep_status->status_location, + IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT, + IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_status->status_pkt_suppress, + IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_SHFT, + IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_BMSK); +} + +static void ipareg_construct_endp_status_n_v4_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_ep_cfg_status *ep_status = + (struct ipahal_reg_ep_cfg_status *)fields; + + ipareg_construct_endp_status_n_common(ep_status, val); + + IPA_SETFIELD_IN_REG(*val, ep_status->status_pkt_suppress, + IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_SHFT, + IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_BMSK); +} + +static void ipareg_construct_clkon_cfg_common( + const struct ipahal_reg_clkon_cfg *clkon_cfg, u32 *val) +{ + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_global_2x_clk, + IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_SHFT, + IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_global, + IPA_CLKON_CFG_OPEN_GLOBAL_SHFT, + IPA_CLKON_CFG_OPEN_GLOBAL_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_gsi_if, + IPA_CLKON_CFG_OPEN_GSI_IF_SHFT, + IPA_CLKON_CFG_OPEN_GSI_IF_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_weight_arb, + IPA_CLKON_CFG_OPEN_WEIGHT_ARB_SHFT, + IPA_CLKON_CFG_OPEN_WEIGHT_ARB_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_qmb, + IPA_CLKON_CFG_OPEN_QMB_SHFT, + IPA_CLKON_CFG_OPEN_QMB_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_ram_slaveway, + IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_SHFT, + IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_aggr_wrapper, + IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_SHFT, + IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_qsb2axi_cmdq_l, + IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_SHFT, + IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_fnr, + IPA_CLKON_CFG_OPEN_FNR_SHFT, + IPA_CLKON_CFG_OPEN_FNR_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_tx_1, + IPA_CLKON_CFG_OPEN_TX_1_SHFT, + IPA_CLKON_CFG_OPEN_TX_1_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_tx_0, + IPA_CLKON_CFG_OPEN_TX_0_SHFT, + IPA_CLKON_CFG_OPEN_TX_0_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_ntf_tx_cmdqs, + IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_SHFT, + IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_h_dcph, + IPA_CLKON_CFG_OPEN_H_DCPH_SHFT, + IPA_CLKON_CFG_OPEN_H_DCPH_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_d_dcph, + IPA_CLKON_CFG_OPEN_D_DCPH_SHFT, + IPA_CLKON_CFG_OPEN_D_DCPH_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_ack_mngr, + IPA_CLKON_CFG_OPEN_ACK_MNGR_SHFT, + IPA_CLKON_CFG_OPEN_ACK_MNGR_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_ctx_handler, + IPA_CLKON_CFG_OPEN_CTX_HANDLER_SHFT, + IPA_CLKON_CFG_OPEN_CTX_HANDLER_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_rsrc_mngr, + IPA_CLKON_CFG_OPEN_RSRC_MNGR_SHFT, + IPA_CLKON_CFG_OPEN_RSRC_MNGR_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_dps_tx_cmdqs, + IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_SHFT, + IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_hps_dps_cmdqs, + IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_SHFT, + IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_rx_hps_cmdqs, + IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_SHFT, + IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_dps, + IPA_CLKON_CFG_OPEN_DPS_SHFT, + IPA_CLKON_CFG_OPEN_DPS_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_hps, + IPA_CLKON_CFG_OPEN_HPS_SHFT, + IPA_CLKON_CFG_OPEN_HPS_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_ftch_dps, + IPA_CLKON_CFG_OPEN_FTCH_DPS_SHFT, + IPA_CLKON_CFG_OPEN_FTCH_DPS_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_ftch_hps, + IPA_CLKON_CFG_OPEN_FTCH_HPS_SHFT, + IPA_CLKON_CFG_OPEN_FTCH_HPS_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_ram_arb, + IPA_CLKON_CFG_OPEN_RAM_ARB_SHFT, + IPA_CLKON_CFG_OPEN_RAM_ARB_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_misc, + IPA_CLKON_CFG_OPEN_MISC_SHFT, + IPA_CLKON_CFG_OPEN_MISC_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_tx_wrapper, + IPA_CLKON_CFG_OPEN_TX_WRAPPER_SHFT, + IPA_CLKON_CFG_OPEN_TX_WRAPPER_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_proc, + IPA_CLKON_CFG_OPEN_PROC_SHFT, + IPA_CLKON_CFG_OPEN_PROC_BMSK); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_rx, + IPA_CLKON_CFG_OPEN_RX_SHFT, + IPA_CLKON_CFG_OPEN_RX_BMSK); +} + +static void ipareg_construct_clkon_cfg( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_clkon_cfg *clkon_cfg = + (struct ipahal_reg_clkon_cfg *)fields; + + ipareg_construct_clkon_cfg_common(clkon_cfg, val); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_dcmp, + IPA_CLKON_CFG_OPEN_DCMP_SHFT, + IPA_CLKON_CFG_OPEN_DCMP_BMSK); +} + +static void ipareg_construct_clkon_cfg_v4_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_clkon_cfg *clkon_cfg = + (struct ipahal_reg_clkon_cfg *)fields; + + ipareg_construct_clkon_cfg_common(clkon_cfg, val); + + IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_dpl_fifo, + IPA_CLKON_CFG_CGC_OPEN_DPL_FIFO_SHFT_V4_5, + IPA_CLKON_CFG_CGC_OPEN_DPL_FIFO_BMSK_V4_5); +} + +static void ipareg_parse_clkon_cfg_common( + struct ipahal_reg_clkon_cfg *clkon_cfg, u32 val) +{ + memset(clkon_cfg, 0, sizeof(struct ipahal_reg_clkon_cfg)); + + clkon_cfg->open_global_2x_clk = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_SHFT, + IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_BMSK); + + clkon_cfg->open_global = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_GLOBAL_SHFT, + IPA_CLKON_CFG_OPEN_GLOBAL_BMSK); + + clkon_cfg->open_gsi_if = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_GSI_IF_SHFT, + IPA_CLKON_CFG_OPEN_GSI_IF_BMSK); + + clkon_cfg->open_weight_arb = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_WEIGHT_ARB_SHFT, + IPA_CLKON_CFG_OPEN_WEIGHT_ARB_BMSK); + + clkon_cfg->open_qmb = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_QMB_SHFT, + IPA_CLKON_CFG_OPEN_QMB_BMSK); + + clkon_cfg->open_ram_slaveway = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_SHFT, + IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_BMSK); + + clkon_cfg->open_aggr_wrapper = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_SHFT, + IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_BMSK); + + clkon_cfg->open_qsb2axi_cmdq_l = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_SHFT, + IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_BMSK); + + clkon_cfg->open_fnr = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_FNR_SHFT, + IPA_CLKON_CFG_OPEN_FNR_BMSK); + + clkon_cfg->open_tx_1 = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_TX_1_SHFT, + IPA_CLKON_CFG_OPEN_TX_1_BMSK); + + clkon_cfg->open_tx_0 = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_TX_0_SHFT, + IPA_CLKON_CFG_OPEN_TX_0_BMSK); + + clkon_cfg->open_ntf_tx_cmdqs = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_SHFT, + IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_BMSK); + + clkon_cfg->open_h_dcph = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_H_DCPH_SHFT, + IPA_CLKON_CFG_OPEN_H_DCPH_BMSK); + + clkon_cfg->open_d_dcph = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_D_DCPH_SHFT, + IPA_CLKON_CFG_OPEN_D_DCPH_BMSK); + + clkon_cfg->open_ack_mngr = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_ACK_MNGR_SHFT, + IPA_CLKON_CFG_OPEN_ACK_MNGR_BMSK); + + clkon_cfg->open_ctx_handler = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_CTX_HANDLER_SHFT, + IPA_CLKON_CFG_OPEN_CTX_HANDLER_BMSK); + + clkon_cfg->open_rsrc_mngr = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_RSRC_MNGR_SHFT, + IPA_CLKON_CFG_OPEN_RSRC_MNGR_BMSK); + + clkon_cfg->open_dps_tx_cmdqs = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_SHFT, + IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_BMSK); + + clkon_cfg->open_hps_dps_cmdqs = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_SHFT, + IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_BMSK); + + clkon_cfg->open_rx_hps_cmdqs = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_SHFT, + IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_BMSK); + + clkon_cfg->open_dps = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_DPS_SHFT, + IPA_CLKON_CFG_OPEN_DPS_BMSK); + + clkon_cfg->open_hps = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_HPS_SHFT, + IPA_CLKON_CFG_OPEN_HPS_BMSK); + + clkon_cfg->open_ftch_dps = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_FTCH_DPS_SHFT, + IPA_CLKON_CFG_OPEN_FTCH_DPS_BMSK); + + clkon_cfg->open_ftch_hps = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_FTCH_HPS_SHFT, + IPA_CLKON_CFG_OPEN_FTCH_HPS_BMSK); + + clkon_cfg->open_ram_arb = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_RAM_ARB_SHFT, + IPA_CLKON_CFG_OPEN_RAM_ARB_BMSK); + + clkon_cfg->open_misc = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_MISC_SHFT, + IPA_CLKON_CFG_OPEN_MISC_BMSK); + + clkon_cfg->open_tx_wrapper = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_TX_WRAPPER_SHFT, + IPA_CLKON_CFG_OPEN_TX_WRAPPER_BMSK); + + clkon_cfg->open_proc = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_PROC_SHFT, + IPA_CLKON_CFG_OPEN_PROC_BMSK); + + clkon_cfg->open_rx = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_RX_SHFT, + IPA_CLKON_CFG_OPEN_RX_BMSK); +} + +static void ipareg_parse_clkon_cfg( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_clkon_cfg *clkon_cfg = + (struct ipahal_reg_clkon_cfg *)fields; + + ipareg_parse_clkon_cfg_common(clkon_cfg, val); + + clkon_cfg->open_dcmp = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_OPEN_DCMP_SHFT, + IPA_CLKON_CFG_OPEN_DCMP_BMSK); +} + +static void ipareg_parse_clkon_cfg_v4_5( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_clkon_cfg *clkon_cfg = + (struct ipahal_reg_clkon_cfg *)fields; + + ipareg_parse_clkon_cfg_common(clkon_cfg, val); + + clkon_cfg->open_dpl_fifo = IPA_GETFIELD_FROM_REG(val, + IPA_CLKON_CFG_CGC_OPEN_DPL_FIFO_SHFT_V4_5, + IPA_CLKON_CFG_CGC_OPEN_DPL_FIFO_BMSK_V4_5); +} + +static void ipareg_construct_qtime_timestamp_cfg( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + const struct ipahal_reg_qtime_timestamp_cfg *ts_cfg = + (const struct ipahal_reg_qtime_timestamp_cfg *)fields; + + if (!ts_cfg->dpl_timestamp_sel && + ts_cfg->dpl_timestamp_lsb) { + IPAHAL_ERR("non zero DPL shift while legacy mode\n"); + WARN_ON(1); + } + + IPA_SETFIELD_IN_REG(*val, + ts_cfg->dpl_timestamp_lsb, + IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_LSB_SHFT, + IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_LSB_BMSK); + IPA_SETFIELD_IN_REG(*val, + ts_cfg->dpl_timestamp_sel ? 1 : 0, + IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_SEL_SHFT, + IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_SEL_BMSK); + IPA_SETFIELD_IN_REG(*val, + ts_cfg->tag_timestamp_lsb, + IPA_QTIME_TIMESTAMP_CFG_TAG_TIMESTAMP_LSB_SHFT, + IPA_QTIME_TIMESTAMP_CFG_TAG_TIMESTAMP_LSB_BMSK); + IPA_SETFIELD_IN_REG(*val, + ts_cfg->nat_timestamp_lsb, + IPA_QTIME_TIMESTAMP_CFG_NAT_TIMESTAMP_LSB_SHFT, + IPA_QTIME_TIMESTAMP_CFG_NAT_TIMESTAMP_LSB_BMSK); +} + +static u8 ipareg_timers_pulse_gran_code( + enum ipa_timers_time_gran_type gran) +{ + switch (gran) { + case IPA_TIMERS_TIME_GRAN_10_USEC: return 0; + case IPA_TIMERS_TIME_GRAN_20_USEC: return 1; + case IPA_TIMERS_TIME_GRAN_50_USEC: return 2; + case IPA_TIMERS_TIME_GRAN_100_USEC: return 3; + case IPA_TIMERS_TIME_GRAN_1_MSEC: return 4; + case IPA_TIMERS_TIME_GRAN_10_MSEC: return 5; + case IPA_TIMERS_TIME_GRAN_100_MSEC: return 6; + case IPA_TIMERS_TIME_GRAN_NEAR_HALF_SEC: return 7; + default: + IPAHAL_ERR("Invalid granularity %d\n", gran); + break; + } + + return 3; +} + +static enum ipa_timers_time_gran_type + ipareg_timers_pulse_gran_decode(u8 code) +{ + switch (code) { + case 0: return IPA_TIMERS_TIME_GRAN_10_USEC; + case 1: return IPA_TIMERS_TIME_GRAN_20_USEC; + case 2: return IPA_TIMERS_TIME_GRAN_50_USEC; + case 3: return IPA_TIMERS_TIME_GRAN_100_USEC; + case 4: return IPA_TIMERS_TIME_GRAN_1_MSEC; + case 5: return IPA_TIMERS_TIME_GRAN_10_MSEC; + case 6: return IPA_TIMERS_TIME_GRAN_100_MSEC; + case 7: return IPA_TIMERS_TIME_GRAN_NEAR_HALF_SEC; + default: + IPAHAL_ERR("Invalid coded granularity %d\n", code); + break; + } + + return IPA_TIMERS_TIME_GRAN_100_USEC; +} + +static void ipareg_construct_timers_pulse_gran_cfg( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + const struct ipahal_reg_timers_pulse_gran_cfg *gran_cfg = + (const struct ipahal_reg_timers_pulse_gran_cfg *)fields; + + IPA_SETFIELD_IN_REG(*val, + ipareg_timers_pulse_gran_code(gran_cfg->gran_0), + IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_SHFT(0), + IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_BMSK(0)); + + IPA_SETFIELD_IN_REG(*val, + ipareg_timers_pulse_gran_code(gran_cfg->gran_1), + IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_SHFT(1), + IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_BMSK(1)); + + IPA_SETFIELD_IN_REG(*val, + ipareg_timers_pulse_gran_code(gran_cfg->gran_2), + IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_SHFT(2), + IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_BMSK(2)); +} + +static void ipareg_parse_timers_pulse_gran_cfg( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + u8 code; + struct ipahal_reg_timers_pulse_gran_cfg *gran_cfg = + (struct ipahal_reg_timers_pulse_gran_cfg *)fields; + + code = IPA_GETFIELD_FROM_REG(val, + IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_SHFT(0), + IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_BMSK(0)); + gran_cfg->gran_0 = ipareg_timers_pulse_gran_decode(code); + + code = IPA_GETFIELD_FROM_REG(val, + IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_SHFT(1), + IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_BMSK(1)); + gran_cfg->gran_1 = ipareg_timers_pulse_gran_decode(code); + + code = IPA_GETFIELD_FROM_REG(val, + IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_SHFT(2), + IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_BMSK(2)); + gran_cfg->gran_2 = ipareg_timers_pulse_gran_decode(code); +} + +static void ipareg_construct_timers_xo_clk_div_cfg( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + const struct ipahal_reg_timers_xo_clk_div_cfg *div_cfg = + (const struct ipahal_reg_timers_xo_clk_div_cfg *)fields; + + IPA_SETFIELD_IN_REG(*val, + div_cfg->enable ? 1 : 0, + IPA_TIMERS_XO_CLK_DIV_CFG_ENABLE_SHFT, + IPA_TIMERS_XO_CLK_DIV_CFG_ENABLE_BMSK); + + IPA_SETFIELD_IN_REG(*val, + div_cfg->value, + IPA_TIMERS_XO_CLK_DIV_CFG_VALUE_SHFT, + IPA_TIMERS_XO_CLK_DIV_CFG_VALUE_BMSK); +} + +static void ipareg_parse_timers_xo_clk_div_cfg( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_timers_xo_clk_div_cfg *div_cfg = + (struct ipahal_reg_timers_xo_clk_div_cfg *)fields; + + div_cfg->enable = + IPA_GETFIELD_FROM_REG(val, + IPA_TIMERS_XO_CLK_DIV_CFG_ENABLE_SHFT, + IPA_TIMERS_XO_CLK_DIV_CFG_ENABLE_BMSK); + + div_cfg->value = + IPA_GETFIELD_FROM_REG(val, + IPA_TIMERS_XO_CLK_DIV_CFG_VALUE_SHFT, + IPA_TIMERS_XO_CLK_DIV_CFG_VALUE_BMSK); +} + +static void ipareg_construct_comp_cfg_comon( + const struct ipahal_reg_comp_cfg *comp_cfg, u32 *val) +{ + IPA_SETFIELD_IN_REG(*val, + comp_cfg->ipa_atomic_fetcher_arb_lock_dis, + IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT, + IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->ipa_qmb_select_by_address_global_en, + IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_SHFT, + IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->gsi_multi_axi_masters_dis, + IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_SHFT, + IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->gsi_snoc_cnoc_loop_protection_disable, + IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT, + IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->gen_qmb_0_snoc_cnoc_loop_protection_disable, + IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT, + IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->gen_qmb_1_multi_inorder_wr_dis, + IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_SHFT, + IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->gen_qmb_0_multi_inorder_wr_dis, + IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_SHFT, + IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->gen_qmb_1_multi_inorder_rd_dis, + IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_SHFT, + IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->gen_qmb_0_multi_inorder_rd_dis, + IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_SHFT, + IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->gsi_multi_inorder_wr_dis, + IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_SHFT, + IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->gsi_multi_inorder_rd_dis, + IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_SHFT, + IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->ipa_qmb_select_by_address_prod_en, + IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_SHFT, + IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->ipa_qmb_select_by_address_cons_en, + IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_SHFT, + IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->gen_qmb_1_snoc_bypass_dis, + IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_SHFT, + IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->gen_qmb_0_snoc_bypass_dis, + IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_SHFT, + IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->gsi_snoc_bypass_dis, + IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_SHFT, + IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_BMSK); +} + +static void ipareg_construct_comp_cfg( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_comp_cfg *comp_cfg = + (struct ipahal_reg_comp_cfg *)fields; + + ipareg_construct_comp_cfg_comon(comp_cfg, val); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->enable, + IPA_COMP_CFG_ENABLE_SHFT, + IPA_COMP_CFG_ENABLE_BMSK); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->ipa_dcmp_fast_clk_en, + IPA_COMP_CFG_IPA_DCMP_FAST_CLK_EN_SHFT, + IPA_COMP_CFG_IPA_DCMP_FAST_CLK_EN_BMSK); +} + +static void ipareg_construct_comp_cfg_v4_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_comp_cfg *comp_cfg = + (struct ipahal_reg_comp_cfg *)fields; + + ipareg_construct_comp_cfg_comon(comp_cfg, val); + + IPA_SETFIELD_IN_REG(*val, + !!comp_cfg->ipa_full_flush_wait_rsc_closure_en, + IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_SHFT_v4_5, + IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK_v4_5); +} + +static void ipareg_parse_comp_cfg_common( + struct ipahal_reg_comp_cfg *comp_cfg, u32 val) +{ + memset(comp_cfg, 0, sizeof(struct ipahal_reg_comp_cfg)); + + comp_cfg->ipa_atomic_fetcher_arb_lock_dis = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT, + IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK); + + comp_cfg->ipa_qmb_select_by_address_global_en = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_SHFT, + IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_BMSK); + + comp_cfg->gsi_multi_axi_masters_dis = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_SHFT, + IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_BMSK); + + comp_cfg->gsi_snoc_cnoc_loop_protection_disable = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT, + IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK); + + comp_cfg->gen_qmb_0_snoc_cnoc_loop_protection_disable = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT, + IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK); + + comp_cfg->gen_qmb_1_multi_inorder_wr_dis = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_SHFT, + IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_BMSK); + + comp_cfg->gen_qmb_0_multi_inorder_wr_dis = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_SHFT, + IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_BMSK); + + comp_cfg->gen_qmb_1_multi_inorder_rd_dis = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_SHFT, + IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_BMSK); + + comp_cfg->gen_qmb_0_multi_inorder_rd_dis = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_SHFT, + IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_BMSK); + + comp_cfg->gsi_multi_inorder_wr_dis = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_SHFT, + IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_BMSK); + + comp_cfg->gsi_multi_inorder_rd_dis = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_SHFT, + IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_BMSK); + + comp_cfg->ipa_qmb_select_by_address_prod_en = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_SHFT, + IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_BMSK); + + comp_cfg->ipa_qmb_select_by_address_cons_en = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_SHFT, + IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_BMSK); + + comp_cfg->gen_qmb_1_snoc_bypass_dis = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_SHFT, + IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_BMSK); + + comp_cfg->gen_qmb_0_snoc_bypass_dis = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_SHFT, + IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_BMSK); + + comp_cfg->gsi_snoc_bypass_dis = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_SHFT, + IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_BMSK); +} + +static void ipareg_parse_comp_cfg( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_comp_cfg *comp_cfg = + (struct ipahal_reg_comp_cfg *)fields; + + ipareg_parse_comp_cfg_common(comp_cfg, val); + + comp_cfg->enable = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_ENABLE_SHFT, + IPA_COMP_CFG_ENABLE_BMSK); + + comp_cfg->ipa_dcmp_fast_clk_en = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_IPA_DCMP_FAST_CLK_EN_SHFT, + IPA_COMP_CFG_IPA_DCMP_FAST_CLK_EN_BMSK); +} + +static void ipareg_parse_comp_cfg_v4_5( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_comp_cfg *comp_cfg = + (struct ipahal_reg_comp_cfg *)fields; + + ipareg_parse_comp_cfg_common(comp_cfg, val); + + comp_cfg->ipa_full_flush_wait_rsc_closure_en = + IPA_GETFIELD_FROM_REG(val, + IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_SHFT_v4_5, + IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK_v4_5); +} + +static void ipareg_construct_qcncm( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_qcncm *qcncm = + (struct ipahal_reg_qcncm *)fields; + + IPA_SETFIELD_IN_REG(*val, qcncm->mode_en ? 1 : 0, + IPA_QCNCM_MODE_EN_SHFT, + IPA_QCNCM_MODE_EN_BMSK); + IPA_SETFIELD_IN_REG(*val, qcncm->mode_val, + IPA_QCNCM_MODE_VAL_SHFT, + IPA_QCNCM_MODE_VAL_BMSK); + IPA_SETFIELD_IN_REG(*val, qcncm->undefined, + 0, IPA_QCNCM_MODE_VAL_BMSK); +} + +static void ipareg_parse_qcncm( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_qcncm *qcncm = + (struct ipahal_reg_qcncm *)fields; + + memset(qcncm, 0, sizeof(struct ipahal_reg_qcncm)); + qcncm->mode_en = IPA_GETFIELD_FROM_REG(val, + IPA_QCNCM_MODE_EN_SHFT, + IPA_QCNCM_MODE_EN_BMSK); + qcncm->mode_val = IPA_GETFIELD_FROM_REG(val, + IPA_QCNCM_MODE_VAL_SHFT, + IPA_QCNCM_MODE_VAL_BMSK); + qcncm->undefined = IPA_GETFIELD_FROM_REG(val, + 0, IPA_QCNCM_UNDEFINED1_BMSK); + qcncm->undefined |= IPA_GETFIELD_FROM_REG(val, + 0, IPA_QCNCM_MODE_UNDEFINED2_BMSK); +} + +static void ipareg_construct_single_ndp_mode( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_single_ndp_mode *mode = + (struct ipahal_reg_single_ndp_mode *)fields; + + IPA_SETFIELD_IN_REG(*val, mode->single_ndp_en ? 1 : 0, + IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT, + IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK); + + IPA_SETFIELD_IN_REG(*val, mode->undefined, + IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT, + IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK); +} + +static void ipareg_parse_single_ndp_mode( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_single_ndp_mode *mode = + (struct ipahal_reg_single_ndp_mode *)fields; + + memset(mode, 0, sizeof(struct ipahal_reg_single_ndp_mode)); + mode->single_ndp_en = IPA_GETFIELD_FROM_REG(val, + IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT, + IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK); + mode->undefined = IPA_GETFIELD_FROM_REG(val, + IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT, + IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK); +} + +static void ipareg_construct_debug_cnt_ctrl_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_debug_cnt_ctrl *dbg_cnt_ctrl = + (struct ipahal_reg_debug_cnt_ctrl *)fields; + u8 type; + + IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->en ? 1 : 0, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK); + + switch (dbg_cnt_ctrl->type) { + case DBG_CNT_TYPE_IPV4_FLTR: + type = 0x0; + if (!dbg_cnt_ctrl->rule_idx_pipe_rule) { + IPAHAL_ERR("No FLT global rules\n"); + WARN_ON(1); + } + break; + case DBG_CNT_TYPE_IPV4_ROUT: + type = 0x1; + break; + case DBG_CNT_TYPE_GENERAL: + type = 0x2; + break; + case DBG_CNT_TYPE_IPV6_FLTR: + type = 0x4; + if (!dbg_cnt_ctrl->rule_idx_pipe_rule) { + IPAHAL_ERR("No FLT global rules\n"); + WARN_ON(1); + } + break; + case DBG_CNT_TYPE_IPV6_ROUT: + type = 0x5; + break; + default: + IPAHAL_ERR("Invalid dbg_cnt_ctrl type (%d) for %s\n", + dbg_cnt_ctrl->type, ipahal_reg_name_str(reg)); + WARN_ON(1); + return; + + } + + IPA_SETFIELD_IN_REG(*val, type, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK); + + IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->product ? 1 : 0, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK); + + IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->src_pipe, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK); + + if (ipahal_ctx->hw_type <= IPA_HW_v3_1) { + IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK); + IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx_pipe_rule, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK + ); + } else { + IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5); + } +} + +static void ipareg_parse_shared_mem_size( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_shared_mem_size *smem_sz = + (struct ipahal_reg_shared_mem_size *)fields; + + memset(smem_sz, 0, sizeof(struct ipahal_reg_shared_mem_size)); + smem_sz->shared_mem_sz = IPA_GETFIELD_FROM_REG(val, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK); + + smem_sz->shared_mem_baddr = IPA_GETFIELD_FROM_REG(val, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK); +} + +static void ipareg_construct_endp_init_rsrc_grp_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp = + (struct ipahal_reg_endp_init_rsrc_grp *)fields; + + IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp, + IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT, + IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK); +} + +static void ipareg_construct_endp_init_rsrc_grp_n_v3_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp = + (struct ipahal_reg_endp_init_rsrc_grp *)fields; + + IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp, + IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5, + IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5); +} + +static void ipareg_construct_endp_init_rsrc_grp_n_v4_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp = + (struct ipahal_reg_endp_init_rsrc_grp *)fields; + + IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp, + IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v4_5, + IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v4_5); +} + +static void ipareg_construct_endp_init_hdr_metadata_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_metadata *metadata = + (struct ipa_ep_cfg_metadata *)fields; + + IPA_SETFIELD_IN_REG(*val, metadata->qmap_id, + IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT, + IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK); +} + +static void ipareg_construct_endp_init_hdr_metadata_mask_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_metadata_mask *metadata_mask = + (struct ipa_ep_cfg_metadata_mask *)fields; + + IPA_SETFIELD_IN_REG(*val, metadata_mask->metadata_mask, + IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT, + IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK); +} + +static void ipareg_construct_endp_init_cfg_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_cfg *cfg = + (struct ipa_ep_cfg_cfg *)fields; + u32 cs_offload_en; + + switch (cfg->cs_offload_en) { + case IPA_DISABLE_CS_OFFLOAD: + cs_offload_en = 0; + break; + case IPA_ENABLE_CS_OFFLOAD_UL: + cs_offload_en = 1; + break; + case IPA_ENABLE_CS_OFFLOAD_DL: + cs_offload_en = 2; + break; + default: + IPAHAL_ERR("Invalid cs_offload_en value for %s\n", + ipahal_reg_name_str(reg)); + WARN_ON(1); + return; + } + + IPA_SETFIELD_IN_REG(*val, cfg->frag_offload_en ? 1 : 0, + IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT, + IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK); + IPA_SETFIELD_IN_REG(*val, cs_offload_en, + IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT, + IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK); + IPA_SETFIELD_IN_REG(*val, cfg->cs_metadata_hdr_offset, + IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT, + IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK); + IPA_SETFIELD_IN_REG(*val, cfg->gen_qmb_master_sel, + IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT, + IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK); + +} + +static void ipareg_construct_endp_init_deaggr_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_deaggr *ep_deaggr = + (struct ipa_ep_cfg_deaggr *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_deaggr->deaggr_hdr_len, + IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT, + IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_valid, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_location, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_deaggr->max_packet_len, + IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT, + IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK); +} + +static void ipareg_construct_endp_init_hol_block_en_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_holb *ep_holb = + (struct ipa_ep_cfg_holb *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_holb->en, + IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT, + IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK); +} + +static void ipareg_construct_endp_init_hol_block_timer_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_holb *ep_holb = + (struct ipa_ep_cfg_holb *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_holb->tmr_val, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK); +} + + +static void ipareg_construct_endp_init_hol_block_timer_n_v4_2( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_holb *ep_holb = + (struct ipa_ep_cfg_holb *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_holb->scale, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_SCALE_SHFT_V_4_2, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_SCALE_BMSK_V_4_2); + IPA_SETFIELD_IN_REG(*val, ep_holb->base_val, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_BASE_VALUE_SHFT_V_4_2, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_BASE_VALUE_BMSK_V_4_2); +} + +static void ipareg_construct_endp_init_hol_block_timer_n_v4_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_holb *ep_holb = + (struct ipa_ep_cfg_holb *)fields; + + if (ep_holb->pulse_generator != !!ep_holb->pulse_generator) { + IPAHAL_ERR("Pulse generator is not 0 or 1 %d\n", + ep_holb->pulse_generator); + WARN_ON(1); + } + + IPA_SETFIELD_IN_REG(*val, ep_holb->scaled_time, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIME_LIMIT_SHFT_V4_5, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIME_LIMIT_BMSK_V4_5); + + IPA_SETFIELD_IN_REG(*val, ep_holb->pulse_generator, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_GRAN_SEL_SHFT_V4_5, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_GRAN_SEL_BMSK_V4_5); +} + +static void ipareg_construct_endp_init_ctrl_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_ctrl *ep_ctrl = + (struct ipa_ep_cfg_ctrl *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_suspend, + IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT, + IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay, + IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT, + IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK); +} + +static void ipareg_parse_endp_init_ctrl_n(enum ipahal_reg_name reg, + void *fields, u32 val) +{ + struct ipa_ep_cfg_ctrl *ep_ctrl = + (struct ipa_ep_cfg_ctrl *)fields; + + ep_ctrl->ipa_ep_suspend = + ((val & IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK) >> + IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT); + + ep_ctrl->ipa_ep_delay = + ((val & IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK) >> + IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT); +} + +static void ipareg_construct_endp_init_ctrl_n_v4_0(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_ctrl *ep_ctrl = + (struct ipa_ep_cfg_ctrl *)fields; + + WARN_ON(ep_ctrl->ipa_ep_suspend); + + IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay, + IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT, + IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK); +} + +static void ipareg_construct_endp_init_ctrl_scnd_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_ep_cfg_ctrl_scnd *ep_ctrl_scnd = + (struct ipahal_ep_cfg_ctrl_scnd *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_ctrl_scnd->endp_delay, + IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_SHFT, + IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_BMSK); +} + +static void ipareg_construct_endp_init_nat_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_nat *ep_nat = + (struct ipa_ep_cfg_nat *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_nat->nat_en, + IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT, + IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK); +} + +static void ipareg_construct_endp_init_conn_track_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_conn_track *ep_ipv6ct = + (struct ipa_ep_cfg_conn_track *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_ipv6ct->conn_track_en, + IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_SHFT, + IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_BMSK); +} + +static void ipareg_construct_endp_init_mode_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_endp_init_mode *init_mode = + (struct ipahal_reg_endp_init_mode *)fields; + + IPA_SETFIELD_IN_REG(*val, init_mode->ep_mode.mode, + IPA_ENDP_INIT_MODE_n_MODE_SHFT, + IPA_ENDP_INIT_MODE_n_MODE_BMSK); + + IPA_SETFIELD_IN_REG(*val, init_mode->dst_pipe_number, + IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT, + IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK); +} + +static void ipareg_construct_endp_init_mode_n_v4_5(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_endp_init_mode *init_mode = + (struct ipahal_reg_endp_init_mode *)fields; + + IPA_SETFIELD_IN_REG(*val, init_mode->ep_mode.mode, + IPA_ENDP_INIT_MODE_n_MODE_SHFT_V4_5, + IPA_ENDP_INIT_MODE_n_MODE_BMSK_V4_5); + + IPA_SETFIELD_IN_REG(*val, init_mode->dst_pipe_number, + IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT_V4_5, + IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK_V4_5); +} + +static void ipareg_construct_endp_init_route_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_endp_init_route *ep_init_rt = + (struct ipahal_reg_endp_init_route *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_init_rt->route_table_index, + IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT, + IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK); + +} + +static void ipareg_parse_endp_init_aggr_n(enum ipahal_reg_name reg, + void *fields, u32 val) +{ + struct ipa_ep_cfg_aggr *ep_aggr = + (struct ipa_ep_cfg_aggr *)fields; + + memset(ep_aggr, 0, sizeof(struct ipa_ep_cfg_aggr)); + + ep_aggr->aggr_en = + (((val & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) >> + IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT) + == IPA_ENABLE_AGGR); + ep_aggr->aggr = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK) >> + IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT); + ep_aggr->aggr_byte_limit = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK) >> + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT); + ep_aggr->aggr_time_limit = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK) >> + IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT); + ep_aggr->aggr_time_limit *= 1000; /* HW works in msec */ + ep_aggr->aggr_pkt_limit = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK) >> + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT); + ep_aggr->aggr_sw_eof_active = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK) >> + IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT); + ep_aggr->aggr_hard_byte_limit_en = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK) + >> + IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT); +} + +static void ipareg_parse_endp_init_aggr_n_v4_5(enum ipahal_reg_name reg, + void *fields, u32 val) +{ + struct ipa_ep_cfg_aggr *ep_aggr = + (struct ipa_ep_cfg_aggr *)fields; + + memset(ep_aggr, 0, sizeof(struct ipa_ep_cfg_aggr)); + + ep_aggr->aggr_en = + (((val & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK_V4_5) >> + IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT_V4_5) + == IPA_ENABLE_AGGR); + ep_aggr->aggr = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK_V4_5) >> + IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT_V4_5); + ep_aggr->aggr_byte_limit = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK_V4_5) >> + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT_V4_5); + ep_aggr->scaled_time = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK_V4_5) >> + IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT_V4_5); + ep_aggr->aggr_pkt_limit = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK_V4_5) >> + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT_V4_5); + ep_aggr->aggr_sw_eof_active = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK_V4_5) >> + IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT_V4_5); + ep_aggr->aggr_hard_byte_limit_en = + ((val & + IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK_V4_5) + >> + IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT_V4_5); + ep_aggr->pulse_generator = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_BMSK_V4_5) >> + IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_SHFT_V4_5); +} + +static void ipareg_construct_endp_init_aggr_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_aggr *ep_aggr = + (struct ipa_ep_cfg_aggr *)fields; + u32 byte_limit; + u32 pkt_limit; + u32 max_byte_limit; + u32 max_pkt_limit; + + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_en, + IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr, + IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK); + + /* make sure aggregation byte limit does not cross HW boundaries */ + max_byte_limit = IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK >> + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT; + byte_limit = (ep_aggr->aggr_byte_limit > max_byte_limit) ? + max_byte_limit : ep_aggr->aggr_byte_limit; + IPA_SETFIELD_IN_REG(*val, byte_limit, + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK); + + /* HW works in msec */ + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_time_limit / 1000, + IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK); + + /* make sure aggregation pkt limit does not cross HW boundaries */ + max_pkt_limit = IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT; + pkt_limit = (ep_aggr->aggr_pkt_limit > max_pkt_limit) ? + max_pkt_limit : ep_aggr->aggr_pkt_limit; + IPA_SETFIELD_IN_REG(*val, pkt_limit, + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_sw_eof_active, + IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK); + + /*IPA 3.5.1 and above target versions hard byte limit enable supported*/ + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_hard_byte_limit_en, + IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK); +} + +static void ipareg_construct_endp_init_aggr_n_v4_5(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_aggr *ep_aggr = + (struct ipa_ep_cfg_aggr *)fields; + u32 byte_limit; + u32 pkt_limit; + u32 max_byte_limit; + u32 max_pkt_limit; + + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_en, + IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT_V4_5, + IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK_V4_5); + + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr, + IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT_V4_5, + IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK_V4_5); + + /* make sure aggregation byte limit does not cross HW boundaries */ + max_byte_limit = IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK_V4_5 >> + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT_V4_5; + byte_limit = (ep_aggr->aggr_byte_limit > max_byte_limit) ? + max_byte_limit : ep_aggr->aggr_byte_limit; + IPA_SETFIELD_IN_REG(*val, byte_limit, + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT_V4_5, + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK_V4_5); + + IPA_SETFIELD_IN_REG(*val, ep_aggr->scaled_time, + IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT_V4_5, + IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK_V4_5); + + /* make sure aggregation pkt limit does not cross HW boundaries */ + max_pkt_limit = IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK_V4_5 >> + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT_V4_5; + pkt_limit = (ep_aggr->aggr_pkt_limit > max_pkt_limit) ? + max_pkt_limit : ep_aggr->aggr_pkt_limit; + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_pkt_limit, + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT_V4_5, + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK_V4_5); + + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_sw_eof_active, + IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT_V4_5, + IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK_V4_5); + + /* At IPAv3 hard_byte_limit is not supported */ + if (ep_aggr->aggr_hard_byte_limit_en) { + IPAHAL_ERR("hard byte limit aggr is not supported\n"); + WARN_ON(1); + } + ep_aggr->aggr_hard_byte_limit_en = 0; + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_hard_byte_limit_en, + IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT_V4_5, + IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK_V4_5); + + IPA_SETFIELD_IN_REG(*val, ep_aggr->pulse_generator, + IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_SHFT_V4_5, + IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_BMSK_V4_5); +} + +static void ipareg_construct_endp_init_hdr_ext_n_common( + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext, u32 *val) +{ + u8 hdr_endianness; + + hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1; + IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_pad_to_alignment, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_offset, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_payload_len_inc_padding, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_valid, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK); + + IPA_SETFIELD_IN_REG(*val, hdr_endianness, + IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK); +} + +static void ipareg_construct_endp_init_hdr_ext_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + ipareg_construct_endp_init_hdr_ext_n_common(fields, val); +} + +static void ipareg_construct_endp_init_hdr_ext_n_v4_5(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext = + (const struct ipa_ep_cfg_hdr_ext *)fields; + u32 msb; + + ipareg_construct_endp_init_hdr_ext_n_common(ep_hdr_ext, val); + + msb = ep_hdr_ext->hdr_total_len_or_pad_offset >> + hweight_long( + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK); + IPA_SETFIELD_IN_REG(*val, msb, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_SHFT_v4_5, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_BMSK_v4_5); + + if (!ep_hdr_ext->hdr) { + IPAHAL_ERR("No header info, skipping it.\n"); + return; + } + + msb = ep_hdr_ext->hdr->hdr_ofst_pkt_size >> + hweight_long(IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK_v4_5); + IPA_SETFIELD_IN_REG(*val, msb, + IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_SHFT_v4_5, + IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_BMSK_v4_5); + + msb = ep_hdr_ext->hdr->hdr_additional_const_len >> + hweight_long( + IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK_v4_5); + IPA_SETFIELD_IN_REG(*val, msb, + IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_SHFT_v4_5, + IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_BMSK_v4_5 + ); +} + +static void ipareg_construct_endp_init_hdr_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_hdr *ep_hdr; + + ep_hdr = (struct ipa_ep_cfg_hdr *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_metadata_reg_valid, + IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_remove_additional, + IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_a5_mux, + IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size_valid, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_additional_const_len, + IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata_valid, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_len, + IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK); +} + +static void ipareg_construct_endp_init_hdr_n_v4_5(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_hdr *ep_hdr; + u32 msb; + + ep_hdr = (struct ipa_ep_cfg_hdr *)fields; + + msb = ep_hdr->hdr_ofst_metadata >> + hweight_long(IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK_v4_5); + IPA_SETFIELD_IN_REG(*val, msb, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_BMSK_v4_5); + + msb = ep_hdr->hdr_len >> + hweight_long(IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK_v4_5); + IPA_SETFIELD_IN_REG(*val, msb, + IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_remove_additional, + IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_a5_mux, + IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size_valid, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_additional_const_len, + IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata_valid, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_len, + IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT_v4_5, + IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK_v4_5); +} + +static void ipareg_construct_route(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_route *route; + + route = (struct ipahal_reg_route *)fields; + + IPA_SETFIELD_IN_REG(*val, route->route_dis, + IPA_ROUTE_ROUTE_DIS_SHFT, + IPA_ROUTE_ROUTE_DIS_BMSK); + + IPA_SETFIELD_IN_REG(*val, route->route_def_pipe, + IPA_ROUTE_ROUTE_DEF_PIPE_SHFT, + IPA_ROUTE_ROUTE_DEF_PIPE_BMSK); + + IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_table, + IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT, + IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK); + + IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_ofst, + IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT, + IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK); + + IPA_SETFIELD_IN_REG(*val, route->route_frag_def_pipe, + IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT, + IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK); + + IPA_SETFIELD_IN_REG(*val, route->route_def_retain_hdr, + IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT, + IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK); +} + +static void ipareg_construct_qsb_max_writes(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_qsb_max_writes *max_writes; + + max_writes = (struct ipahal_reg_qsb_max_writes *)fields; + + IPA_SETFIELD_IN_REG(*val, max_writes->qmb_0_max_writes, + IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT, + IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK); + IPA_SETFIELD_IN_REG(*val, max_writes->qmb_1_max_writes, + IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT, + IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK); +} + +static void ipareg_construct_qsb_max_reads(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_qsb_max_reads *max_reads; + + max_reads = (struct ipahal_reg_qsb_max_reads *)fields; + + IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_reads, + IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT, + IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK); + IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_reads, + IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT, + IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK); +} + +static void ipareg_construct_qsb_max_reads_v4_0(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_qsb_max_reads *max_reads; + + max_reads = (struct ipahal_reg_qsb_max_reads *)fields; + + IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_reads, + IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT, + IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK); + IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_reads, + IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT, + IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK); + IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_read_beats, + IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_SHFT_V4_0, + IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_BMSK_V4_0); + IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_read_beats, + IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_SHFT_V4_0, + IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_BMSK_V4_0); +} + +static void ipareg_parse_tx_cfg(enum ipahal_reg_name reg, + void *fields, u32 val) +{ + struct ipahal_reg_tx_cfg *tx_cfg; + + tx_cfg = (struct ipahal_reg_tx_cfg *)fields; + + tx_cfg->tx0_prefetch_disable = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5, + IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5); + + tx_cfg->tx1_prefetch_disable = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5, + IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5); + + tx_cfg->tx0_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5); + + tx_cfg->tx1_prefetch_almost_empty_size = + tx_cfg->tx0_prefetch_almost_empty_size; +} + +static void ipareg_parse_tx_cfg_v4_0(enum ipahal_reg_name reg, + void *fields, u32 val) +{ + struct ipahal_reg_tx_cfg *tx_cfg; + + tx_cfg = (struct ipahal_reg_tx_cfg *)fields; + + tx_cfg->tx0_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0); + + tx_cfg->tx1_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0); + + tx_cfg->dmaw_scnd_outsd_pred_en = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0); + + tx_cfg->dmaw_scnd_outsd_pred_threshold = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0); + + tx_cfg->dmaw_max_beats_256_dis = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0, + IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0); + + tx_cfg->pa_mask_en = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0, + IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0); +} + +static void ipareg_parse_tx_cfg_v4_5(enum ipahal_reg_name reg, + void *fields, u32 val) +{ + struct ipahal_reg_tx_cfg *tx_cfg; + + ipareg_parse_tx_cfg_v4_0(reg, fields, val); + + tx_cfg = (struct ipahal_reg_tx_cfg *)fields; + + tx_cfg->dual_tx_enable = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_DUAL_TX_ENABLE_SHFT_V4_5, + IPA_TX_CFG_DUAL_TX_ENABLE_BMSK_V4_5); +} + +static void ipareg_construct_tx_cfg(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_tx_cfg *tx_cfg; + + tx_cfg = (struct ipahal_reg_tx_cfg *)fields; + + if (tx_cfg->tx0_prefetch_almost_empty_size != + tx_cfg->tx1_prefetch_almost_empty_size) + ipa_assert(); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_disable, + IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5, + IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->tx1_prefetch_disable, + IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5, + IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_almost_empty_size, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5); +} + +static void ipareg_construct_tx_cfg_v4_0(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_tx_cfg *tx_cfg; + + tx_cfg = (struct ipahal_reg_tx_cfg *)fields; + + IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_almost_empty_size, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->tx1_prefetch_almost_empty_size, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_scnd_outsd_pred_threshold, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_max_beats_256_dis, + IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0, + IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_scnd_outsd_pred_en, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->pa_mask_en, + IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0, + IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0); +} + +static void ipareg_construct_tx_cfg_v4_5(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_tx_cfg *tx_cfg; + + ipareg_construct_tx_cfg_v4_0(reg, fields, val); + + tx_cfg = (struct ipahal_reg_tx_cfg *)fields; + + IPA_SETFIELD_IN_REG(*val, tx_cfg->dual_tx_enable, + IPA_TX_CFG_DUAL_TX_ENABLE_SHFT_V4_5, + IPA_TX_CFG_DUAL_TX_ENABLE_BMSK_V4_5); +} + +static void ipareg_construct_idle_indication_cfg(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_idle_indication_cfg *idle_indication_cfg; + + idle_indication_cfg = (struct ipahal_reg_idle_indication_cfg *)fields; + + IPA_SETFIELD_IN_REG(*val, + idle_indication_cfg->enter_idle_debounce_thresh, + IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_SHFT_V3_5, + IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_BMSK_V3_5); + + IPA_SETFIELD_IN_REG(*val, + idle_indication_cfg->const_non_idle_enable, + IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_SHFT_V3_5, + IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_BMSK_V3_5); +} + +static void ipareg_construct_hps_queue_weights(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_rx_hps_weights *hps_weights; + + hps_weights = (struct ipahal_reg_rx_hps_weights *)fields; + + IPA_SETFIELD_IN_REG(*val, + hps_weights->hps_queue_weight_0, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK); + + IPA_SETFIELD_IN_REG(*val, + hps_weights->hps_queue_weight_1, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK); + + IPA_SETFIELD_IN_REG(*val, + hps_weights->hps_queue_weight_2, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK); + + IPA_SETFIELD_IN_REG(*val, + hps_weights->hps_queue_weight_3, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK); +} + +static void ipareg_parse_hps_queue_weights( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_rx_hps_weights *hps_weights = + (struct ipahal_reg_rx_hps_weights *)fields; + + memset(hps_weights, 0, sizeof(struct ipahal_reg_rx_hps_weights)); + + hps_weights->hps_queue_weight_0 = IPA_GETFIELD_FROM_REG(val, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK); + + hps_weights->hps_queue_weight_1 = IPA_GETFIELD_FROM_REG(val, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK); + + hps_weights->hps_queue_weight_2 = IPA_GETFIELD_FROM_REG(val, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK); + + hps_weights->hps_queue_weight_3 = IPA_GETFIELD_FROM_REG(val, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK); +} + +static void ipareg_construct_counter_cfg(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_counter_cfg *counter_cfg = + (struct ipahal_reg_counter_cfg *)fields; + + IPA_SETFIELD_IN_REG(*val, counter_cfg->aggr_granularity, + IPA_COUNTER_CFG_AGGR_GRANULARITY_SHFT, + IPA_COUNTER_CFG_AGGR_GRANULARITY_BMSK); +} + +static void ipareg_parse_counter_cfg( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_counter_cfg *counter_cfg = + (struct ipahal_reg_counter_cfg *)fields; + + memset(counter_cfg, 0, sizeof(*counter_cfg)); + + counter_cfg->aggr_granularity = IPA_GETFIELD_FROM_REG(val, + IPA_COUNTER_CFG_AGGR_GRANULARITY_SHFT, + IPA_COUNTER_CFG_AGGR_GRANULARITY_BMSK); +} + +/* + * struct ipahal_reg_obj - Register H/W information for specific IPA version + * @construct - CB to construct register value from abstracted structure + * @parse - CB to parse register value to abstracted structure + * @offset - register offset relative to base address + * @n_ofst - N parameterized register sub-offset + * @n_start - starting n for n_registers + * @n_end - ending n for n_registers + * @en_print - enable this register to be printed when the device crashes + */ +struct ipahal_reg_obj { + void (*construct)(enum ipahal_reg_name reg, const void *fields, + u32 *val); + void (*parse)(enum ipahal_reg_name reg, void *fields, + u32 val); + u32 offset; + u32 n_ofst; + int n_start; + int n_end; + bool en_print; +}; + +/* + * This table contains the info regarding each register for IPAv3 and later. + * Information like: offset and construct/parse functions. + * All the information on the register on IPAv3 are statically defined below. + * If information is missing regarding some register on some IPA version, + * the init function will fill it with the information from the previous + * IPA version. + * Information is considered missing if all of the fields are 0. + * If offset is -1, this means that the register is removed on the + * specific version. + */ +static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = { + /* IPAv3 */ + [IPA_HW_v3_0][IPA_ROUTE] = { + ipareg_construct_route, ipareg_parse_dummy, + 0x00000048, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_IRQ_STTS_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003008, 0x1000, 0, 0, 0}, + [IPA_HW_v3_0][IPA_IRQ_EN_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000300c, 0x1000, 0, 0, 0}, + [IPA_HW_v3_0][IPA_IRQ_CLR_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003010, 0x1000, 0, 0, 0}, + [IPA_HW_v3_0][IPA_SUSPEND_IRQ_INFO_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003098, 0x1000, 0, 0, 0}, + [IPA_HW_v3_0][IPA_BCR] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000001D0, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENABLED_PIPES] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000038, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_VERSION] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000034, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_TAG_TIMER] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000060, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_COMP_HW_VERSION] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000030, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_COMP_CFG] = { + ipareg_construct_comp_cfg, ipareg_parse_comp_cfg, + 0x0000003C, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_STATE_AGGR_ACTIVE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000010C, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_n] = { + ipareg_construct_endp_init_hdr_n, ipareg_parse_dummy, + 0x00000810, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_EXT_n] = { + ipareg_construct_endp_init_hdr_ext_n, ipareg_parse_dummy, + 0x00000814, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_AGGR_n] = { + ipareg_construct_endp_init_aggr_n, + ipareg_parse_endp_init_aggr_n, + 0x00000824, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_AGGR_FORCE_CLOSE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000001EC, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_ROUTE_n] = { + ipareg_construct_endp_init_route_n, ipareg_parse_dummy, + 0x00000828, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_MODE_n] = { + ipareg_construct_endp_init_mode_n, ipareg_parse_dummy, + 0x00000820, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_NAT_n] = { + ipareg_construct_endp_init_nat_n, ipareg_parse_dummy, + 0x0000080C, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_n] = { + ipareg_construct_endp_init_ctrl_n, + ipareg_parse_endp_init_ctrl_n, + 0x00000800, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_SCND_n] = { + ipareg_construct_endp_init_ctrl_scnd_n, ipareg_parse_dummy, + 0x00000804, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_EN_n] = { + ipareg_construct_endp_init_hol_block_en_n, + ipareg_parse_dummy, + 0x0000082c, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = { + ipareg_construct_endp_init_hol_block_timer_n, + ipareg_parse_dummy, + 0x00000830, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_DEAGGR_n] = { + ipareg_construct_endp_init_deaggr_n, + ipareg_parse_dummy, + 0x00000834, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_SEQ_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000083C, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_DEBUG_CNT_REG_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000600, 0x4, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_CFG_n] = { + ipareg_construct_endp_init_cfg_n, ipareg_parse_dummy, + 0x00000808, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_IRQ_EE_UC_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000301c, 0x1000, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_MASK_n] = { + ipareg_construct_endp_init_hdr_metadata_mask_n, + ipareg_parse_dummy, + 0x00000818, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_n] = { + ipareg_construct_endp_init_hdr_metadata_n, + ipareg_parse_dummy, + 0x0000081c, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_RSRC_GRP_n] = { + ipareg_construct_endp_init_rsrc_grp_n, + ipareg_parse_dummy, + 0x00000838, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_SHARED_MEM_SIZE] = { + ipareg_construct_dummy, ipareg_parse_shared_mem_size, + 0x00000054, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_SW_AREA_RAM_DIRECT_ACCESS_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00007000, 0x4, 0, 0, 0}, + [IPA_HW_v3_0][IPA_DEBUG_CNT_CTRL_n] = { + ipareg_construct_debug_cnt_ctrl_n, ipareg_parse_dummy, + 0x00000640, 0x4, 0, 0, 0}, + [IPA_HW_v3_0][IPA_UC_MAILBOX_m_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00032000, 0x4, 0, 0, 0}, + [IPA_HW_v3_0][IPA_FILT_ROUT_HASH_FLUSH] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000090, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_SINGLE_NDP_MODE] = { + ipareg_construct_single_ndp_mode, ipareg_parse_single_ndp_mode, + 0x00000068, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_QCNCM] = { + ipareg_construct_qcncm, ipareg_parse_qcncm, + 0x00000064, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_SYS_PKT_PROC_CNTXT_BASE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000001e0, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_LOCAL_PKT_PROC_CNTXT_BASE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000001e8, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_STATUS_n] = { + ipareg_construct_endp_status_n, ipareg_parse_dummy, + 0x00000840, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_ENDP_FILTER_ROUTER_HSH_CFG_n] = { + ipareg_construct_hash_cfg_n, ipareg_parse_hash_cfg_n, + 0x0000085C, 0x70, 0, 0, 0}, + [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x00000400, 0x20, 0, 0, 0}, + [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x00000404, 0x20, 0, 0, 0}, + [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x00000408, 0x20, 0, 0, 0}, + [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x0000040C, 0x20, 0, 0, 0}, + [IPA_HW_v3_0][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x00000500, 0x20, 0, 0, 0}, + [IPA_HW_v3_0][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x00000504, 0x20, 0, 0, 0}, + [IPA_HW_v3_0][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x00000508, 0x20, 0, 0, 0}, + [IPA_HW_v3_0][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x0000050c, 0x20, 0, 0, 0}, + [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = { + ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy, + 0x000023C4, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = { + ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy, + 0x000023C8, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = { + ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy, + 0x000023CC, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = { + ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy, + 0x000023D0, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_QSB_MAX_WRITES] = { + ipareg_construct_qsb_max_writes, ipareg_parse_dummy, + 0x00000074, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_QSB_MAX_READS] = { + ipareg_construct_qsb_max_reads, ipareg_parse_dummy, + 0x00000078, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_DPS_SEQUENCER_FIRST] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0001e000, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_DPS_SEQUENCER_LAST] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0001e07c, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_HPS_SEQUENCER_FIRST] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0001e080, 0, 0, 0, 0}, + [IPA_HW_v3_0][IPA_HPS_SEQUENCER_LAST] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0001e26c, 0, 0, 0, 0}, + + + /* IPAv3.1 */ + [IPA_HW_v3_1][IPA_SUSPEND_IRQ_INFO_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003030, 0x1000, 0, 0, 0}, + [IPA_HW_v3_1][IPA_SUSPEND_IRQ_EN_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003034, 0x1000, 0, 0, 0}, + [IPA_HW_v3_1][IPA_SUSPEND_IRQ_CLR_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003038, 0x1000, 0, 0, 0}, + + + /* IPAv3.5 */ + [IPA_HW_v3_5][IPA_TX_CFG] = { + ipareg_construct_tx_cfg, ipareg_parse_tx_cfg, + 0x000001FC, 0, 0, 0, 0}, + [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy, + 0x00000400, 0x20, 0, 0, 0}, + [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy, + 0x00000404, 0x20, 0, 0, 0}, + [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v3_5][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy, + 0x00000500, 0x20, 0, 0, 0}, + [IPA_HW_v3_5][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy, + 0x00000504, 0x20, 0, 0, 0}, + [IPA_HW_v3_5][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v3_5][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v3_5][IPA_ENDP_INIT_RSRC_GRP_n] = { + ipareg_construct_endp_init_rsrc_grp_n_v3_5, + ipareg_parse_dummy, + 0x00000838, 0x70, 0, 0, 0}, + [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = { + ipareg_construct_rx_hps_clients_depth0_v3_5, + ipareg_parse_dummy, + 0x000023C4, 0, 0, 0, 0}, + [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = { + ipareg_construct_rx_hps_clients_depth0_v3_5, + ipareg_parse_dummy, + 0x000023CC, 0, 0, 0, 0}, + [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v3_5][IPA_IDLE_INDICATION_CFG] = { + ipareg_construct_idle_indication_cfg, ipareg_parse_dummy, + 0x00000220, 0, 0, 0, 0}, + [IPA_HW_v3_5][IPA_HPS_FTCH_ARB_QUEUE_WEIGHT] = { + ipareg_construct_hps_queue_weights, + ipareg_parse_hps_queue_weights, 0x000005a4, 0, 0, 0, 0}, + [IPA_HW_v3_5][IPA_COUNTER_CFG] = { + ipareg_construct_counter_cfg, ipareg_parse_counter_cfg, + 0x000001F0, 0, 0, 0, 0}, + [IPA_HW_v3_5][IPA_ENDP_GSI_CFG1_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00002794, 0x4, 0, 0, 0 }, + [IPA_HW_v3_5][IPA_ENDP_GSI_CFG_AOS_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000029A8, 0x4, 0, 0, 0 }, + [IPA_HW_v3_5][IPA_ENDP_GSI_CFG_TLV_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00002924, 0x4, 0, 0, 0 }, + [IPA_HW_v3_5][IPA_HPS_SEQUENCER_LAST] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0001e1fc, 0, 0, 0, 0}, + + /* IPAv4.0 */ + [IPA_HW_v4_0][IPA_SUSPEND_IRQ_INFO_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003030, 0x1000, 0, 1, 1}, + [IPA_HW_v4_0][IPA_SUSPEND_IRQ_EN_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003034, 0x1000, 0, 1, 1}, + [IPA_HW_v4_0][IPA_SUSPEND_IRQ_CLR_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003038, 0x1000, 0, 1, 1}, + [IPA_HW_v4_0][IPA_IRQ_EN_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000300c, 0x1000, 0, 1, 1}, + [IPA_HW_v4_0][IPA_TAG_TIMER] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000060, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_CTRL_n] = { + ipareg_construct_endp_init_ctrl_n_v4_0, ipareg_parse_dummy, + 0x00000800, 0x70, 0, 23, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_HDR_EXT_n] = { + ipareg_construct_endp_init_hdr_ext_n, ipareg_parse_dummy, + 0x00000814, 0x70, 0, 23, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_AGGR_n] = { + ipareg_construct_endp_init_aggr_n, + ipareg_parse_endp_init_aggr_n, + 0x00000824, 0x70, 0, 23, 1}, + [IPA_HW_v4_0][IPA_TX_CFG] = { + ipareg_construct_tx_cfg_v4_0, ipareg_parse_tx_cfg_v4_0, + 0x000001FC, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_DEBUG_CNT_REG_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_DEBUG_CNT_CTRL_n] = { + ipareg_construct_debug_cnt_ctrl_n, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_QCNCM] = { + ipareg_construct_qcncm, ipareg_parse_qcncm, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_SINGLE_NDP_MODE] = { + ipareg_construct_single_ndp_mode, ipareg_parse_single_ndp_mode, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_QSB_MAX_READS] = { + ipareg_construct_qsb_max_reads_v4_0, ipareg_parse_dummy, + 0x00000078, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_FILT_ROUT_HASH_FLUSH] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000014c, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_ENDP_INIT_HDR_n] = { + ipareg_construct_endp_init_hdr_n, ipareg_parse_dummy, + 0x00000810, 0x70, 0, 23, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_ROUTE_n] = { + ipareg_construct_endp_init_route_n, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_ENDP_INIT_MODE_n] = { + ipareg_construct_endp_init_mode_n, ipareg_parse_dummy, + 0x00000820, 0x70, 0, 10, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_NAT_n] = { + ipareg_construct_endp_init_nat_n, ipareg_parse_dummy, + 0x0000080C, 0x70, 0, 10, 1}, + [IPA_HW_v4_0][IPA_ENDP_STATUS_n] = { + ipareg_construct_endp_status_n_v4_0, ipareg_parse_dummy, + 0x00000840, 0x70, 0, 23, 1}, + [IPA_HW_v4_0][IPA_ENDP_FILTER_ROUTER_HSH_CFG_n] = { + ipareg_construct_hash_cfg_n, ipareg_parse_hash_cfg_n, + 0x0000085C, 0x70, 0, 32, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_CONN_TRACK_n] = { + ipareg_construct_endp_init_conn_track_n, + ipareg_parse_dummy, + 0x00000850, 0x70, 0, 10, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_CTRL_SCND_n] = { + ipareg_construct_endp_init_ctrl_scnd_n, ipareg_parse_dummy, + 0x00000804, 0x70, 0, 23, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_HOL_BLOCK_EN_n] = { + ipareg_construct_endp_init_hol_block_en_n, + ipareg_parse_dummy, + 0x0000082c, 0x70, 10, 23, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = { + ipareg_construct_endp_init_hol_block_timer_n, + ipareg_parse_dummy, + 0x00000830, 0x70, 10, 23, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_DEAGGR_n] = { + ipareg_construct_endp_init_deaggr_n, + ipareg_parse_dummy, + 0x00000834, 0x70, 0, 10, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_SEQ_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000083C, 0x70, 0, 10, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_CFG_n] = { + ipareg_construct_endp_init_cfg_n, ipareg_parse_dummy, + 0x00000808, 0x70, 0, 23, 1}, + [IPA_HW_v4_0][IPA_IRQ_EE_UC_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000301c, 0x1000, 0, 0, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_HDR_METADATA_MASK_n] = { + ipareg_construct_endp_init_hdr_metadata_mask_n, + ipareg_parse_dummy, + 0x00000818, 0x70, 10, 23, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_HDR_METADATA_n] = { + ipareg_construct_endp_init_hdr_metadata_n, + ipareg_parse_dummy, + 0x0000081c, 0x70, 0, 10, 1}, + [IPA_HW_v4_0][IPA_CLKON_CFG] = { + ipareg_construct_clkon_cfg, ipareg_parse_clkon_cfg, + 0x00000044, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_QUOTA_BASE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000700, 0x4, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_QUOTA_MASK_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000708, 0x4, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_TETHERING_BASE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000710, 0x4, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_TETHERING_MASK_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000718, 0x4, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_BASE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000720, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_BASE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000724, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_BASE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000728, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_BASE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000072C, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_START_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000730, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_START_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000734, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_START_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000738, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_START_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000073C, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_END_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000740, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_END_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000744, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_END_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000748, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_END_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000074C, 0, 0, 0, 0}, + [IPA_HW_v4_0][IPA_STAT_DROP_CNT_BASE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000750, 0x4, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STAT_DROP_CNT_MASK_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000758, 0x4, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STATE_TX_WRAPPER] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000090, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STATE_TX1] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000094, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STATE_FETCHER] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000098, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STATE_FETCHER_MASK] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000009C, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STATE_DFETCHER] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000A0, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STATE_ACL] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000A4, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STATE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000A8, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STATE_RX_ACTIVE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000AC, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STATE_TX0] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000B0, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STATE_AGGR_ACTIVE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000B4, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STATE_GSI_TLV] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000B8, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STATE_GSI_AOS] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000BC, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STATE_GSI_IF] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000C0, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_STATE_GSI_SKIP] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000C4, 0, 0, 0, 1}, + [IPA_HW_v4_0][IPA_SNOC_FEC_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003018, 0x1000, 0, 0, 1}, + [IPA_HW_v4_0][IPA_FEC_ADDR_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003020, 0x1000, 0, 0, 1}, + [IPA_HW_v4_0][IPA_FEC_ADDR_MSB_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003024, 0x1000, 0, 0, 1}, + [IPA_HW_v4_0][IPA_FEC_ATTR_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003028, 0x1000, 0, 0, 1}, + [IPA_HW_v4_0][IPA_HOLB_DROP_IRQ_INFO_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000303C, 0x1000, 0, 0, 1}, + [IPA_HW_v4_0][IPA_HOLB_DROP_IRQ_EN_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003040, 0x1000, 0, 0, 1}, + [IPA_HW_v4_0][IPA_HOLB_DROP_IRQ_CLR_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003044, 0x1000, 0, 0, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_CTRL_STATUS_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000864, 0x70, 0, 23, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_PROD_CFG_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000CC8, 0x70, 10, 23, 1}, + [IPA_HW_v4_0][IPA_ENDP_INIT_RSRC_GRP_n] = { + ipareg_construct_endp_init_rsrc_grp_n_v3_5, + ipareg_parse_dummy, + 0x00000838, 0x70, 0, 23, 1}, + [IPA_HW_v4_0][IPA_ENDP_YELLOW_RED_MARKER_CFG_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000CC0, 0x70, 10, 23, 1}, + + /* IPA4.2 */ + [IPA_HW_v4_2][IPA_IDLE_INDICATION_CFG] = { + ipareg_construct_idle_indication_cfg, ipareg_parse_dummy, + 0x00000240, 0, 0, 0, 0}, + [IPA_HW_v4_2][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = { + ipareg_construct_endp_init_hol_block_timer_n_v4_2, + ipareg_parse_dummy, + 0x00000830, 0x70, 8, 17, 1}, + [IPA_HW_v4_2][IPA_ENDP_FILTER_ROUTER_HSH_CFG_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_2][IPA_HPS_FTCH_ARB_QUEUE_WEIGHT] = { + ipareg_construct_dummy, + ipareg_parse_dummy, -1, 0, 0, 0, 0}, + [IPA_HW_v4_2][IPA_FILT_ROUT_HASH_EN] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000148, 0, 0, 0, 0}, + + /* IPA4.5 */ + [IPA_HW_v4_5][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy, + 0x00000400, 0x20, 0, 0, 0}, + [IPA_HW_v4_5][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy, + 0x00000404, 0x20, 0, 0, 0}, + [IPA_HW_v4_5][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy, + 0x00000408, 0x20, 0, 0, 0}, + [IPA_HW_v4_5][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy, + 0x00000500, 0x20, 0, 0, 0}, + [IPA_HW_v4_5][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy, + 0x00000504, 0x20, 0, 0, 0}, + [IPA_HW_v4_5][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy, + 0x00000508, 0x20, 0, 0, 0}, + [IPA_HW_v4_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = { + ipareg_construct_rx_hps_clients_depth0_v4_5, + ipareg_parse_dummy, + 0x000023c4, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = { + ipareg_construct_rx_hps_clients_depth0_v4_5, + ipareg_parse_dummy, + 0x000023cc, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_BCR] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_COMP_CFG] = { + ipareg_construct_comp_cfg_v4_5, ipareg_parse_comp_cfg_v4_5, + 0x0000003C, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_STATE_FETCHER_MASK] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_STATE_FETCHER_MASK_0] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000009C, 0, 0, 0, 1}, + [IPA_HW_v4_5][IPA_STATE_FETCHER_MASK_1] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000CC, 0, 0, 0, 1}, + [IPA_HW_v4_5][IPA_COUNTER_CFG] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_STATE_GSI_IF_CONS] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000C8, 0, 0, 0, 1}, + [IPA_HW_v4_5][IPA_STATE_DPL_FIFO] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000D0, 0, 0, 0, 1}, + [IPA_HW_v4_5][IPA_STATE_COAL_MASTER] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000D4, 0, 0, 0, 1}, + [IPA_HW_v4_5][IPA_GENERIC_RAM_ARBITER_PRIORITY] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000D8, 0, 0, 0, 1}, + [IPA_HW_v4_5][IPA_STATE_NLO_AGGR] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000DC, 0, 0, 0, 1}, + [IPA_HW_v4_5][IPA_STATE_COAL_MASTER_1] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000E0, 0, 0, 0, 1}, + [IPA_HW_v4_5][IPA_ENDP_YELLOW_RED_MARKER_CFG_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000860, 0x70, 13, 31, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_MODE_n] = { + ipareg_construct_endp_init_mode_n_v4_5, ipareg_parse_dummy, + 0x00000820, 0x70, 0, 13, 1}, + [IPA_HW_v4_5][IPA_TX_CFG] = { + ipareg_construct_tx_cfg_v4_5, ipareg_parse_tx_cfg_v4_5, + 0x000001FC, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_CLKON_CFG] = { + ipareg_construct_clkon_cfg_v4_5, ipareg_parse_clkon_cfg_v4_5, + 0x00000044, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_QTIME_TIMESTAMP_CFG] = { + ipareg_construct_qtime_timestamp_cfg, ipareg_parse_dummy, + 0x00000024c, 0, 0, 0, 1}, + [IPA_HW_v4_5][IPA_TIMERS_PULSE_GRAN_CFG] = { + ipareg_construct_timers_pulse_gran_cfg, + ipareg_parse_timers_pulse_gran_cfg, + 0x000000254, 0, 0, 0, 1}, + [IPA_HW_v4_5][IPA_TIMERS_XO_CLK_DIV_CFG] = { + ipareg_construct_timers_xo_clk_div_cfg, + ipareg_parse_timers_xo_clk_div_cfg, + 0x000000250, 0, 0, 0, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_SEQ_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000083C, 0x70, 0, 13, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_CFG_n] = { + ipareg_construct_endp_init_cfg_n, ipareg_parse_dummy, + 0x00000808, 0x70, 0, 31, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_DEAGGR_n] = { + ipareg_construct_endp_init_deaggr_n, + ipareg_parse_dummy, + 0x00000834, 0x70, 0, 13, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_CTRL_n] = { + ipareg_construct_endp_init_ctrl_n_v4_0, ipareg_parse_dummy, + 0x00000800, 0x70, 0, 31, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_CTRL_SCND_n] = { + ipareg_construct_endp_init_ctrl_scnd_n, ipareg_parse_dummy, + 0x00000804, 0x70, 0, 31, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_CTRL_STATUS_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000864, 0x70, 0, 31, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_PROD_CFG_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000CC8, 0x70, 13, 31, 1}, + [IPA_HW_v4_5][IPA_ENDP_FILTER_ROUTER_HSH_CFG_n] = { + ipareg_construct_hash_cfg_n, ipareg_parse_hash_cfg_n, + 0x0000085C, 0x70, 0, 32, 1}, + [IPA_HW_v4_5][IPA_ENDP_STATUS_n] = { + ipareg_construct_endp_status_n_v4_5, ipareg_parse_dummy, + 0x00000840, 0x70, 0, 31, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_NAT_n] = { + ipareg_construct_endp_init_nat_n, ipareg_parse_dummy, + 0x0000080C, 0x70, 0, 13, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_CONN_TRACK_n] = { + ipareg_construct_endp_init_conn_track_n, + ipareg_parse_dummy, + 0x00000850, 0x70, 0, 13, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_RSRC_GRP_n] = { + ipareg_construct_endp_init_rsrc_grp_n_v4_5, + ipareg_parse_dummy, + 0x00000838, 0x70, 0, 31, 1}, + [IPA_HW_v4_5][IPA_STAT_FILTER_IPV4_START_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_STAT_FILTER_IPV6_START_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_STAT_ROUTER_IPV4_START_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_STAT_ROUTER_IPV6_START_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_STAT_FILTER_IPV4_END_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_STAT_FILTER_IPV6_END_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_STAT_ROUTER_IPV4_END_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_STAT_ROUTER_IPV6_END_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_DPS_SEQUENCER_FIRST] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00002570, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_DPS_SEQUENCER_LAST] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00002574, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_HPS_SEQUENCER_FIRST] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00002578, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_HPS_SEQUENCER_LAST] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000257c, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_NAT_TIMER] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000058, 0, 0, 0, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_HOL_BLOCK_EN_n] = { + ipareg_construct_endp_init_hol_block_en_n, + ipareg_parse_dummy, + 0x0000082c, 0x70, 13, 31, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = { + ipareg_construct_endp_init_hol_block_timer_n_v4_5, + ipareg_parse_dummy, + 0x00000830, 0x70, 13, 31, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_AGGR_n] = { + ipareg_construct_endp_init_aggr_n_v4_5, + ipareg_parse_endp_init_aggr_n_v4_5, + 0x00000824, 0x70, 0, 31, 1}, + [IPA_HW_v4_5][IPA_SW_AREA_RAM_DIRECT_ACCESS_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000010000, 0x4, 0, 0, 0}, + [IPA_HW_v4_5][IPA_ENDP_INIT_HDR_n] = { + ipareg_construct_endp_init_hdr_n_v4_5, ipareg_parse_dummy, + 0x00000810, 0x70, 0, 31, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_HDR_EXT_n] = { + ipareg_construct_endp_init_hdr_ext_n_v4_5, ipareg_parse_dummy, + 0x00000814, 0x70, 0, 31, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_HDR_METADATA_n] = { + ipareg_construct_endp_init_hdr_metadata_n, + ipareg_parse_dummy, + 0x0000081c, 0x70, 0, 13, 1}, + [IPA_HW_v4_5][IPA_ENDP_INIT_HDR_METADATA_MASK_n] = { + ipareg_construct_endp_init_hdr_metadata_mask_n, + ipareg_parse_dummy, + 0x00000818, 0x70, 13, 31, 1}, + [IPA_HW_v4_5][IPA_UC_MAILBOX_m_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00082000, 0x4, 0, 0, 0}, +}; + +/* + * ipahal_print_all_regs() - Loop and read and print all the valid registers + * Parameterized registers are also printed for all the valid ranges. + * Print to dmsg and IPC logs + */ +void ipahal_print_all_regs(bool print_to_dmesg) +{ + int i, j; + struct ipahal_reg_obj *reg; + + IPAHAL_DBG("Printing all registers for ipa_hw_type %d\n", + ipahal_ctx->hw_type); + + if ((ipahal_ctx->hw_type < IPA_HW_v4_0) || + (ipahal_ctx->hw_type >= IPA_HW_MAX)) { + IPAHAL_ERR("invalid IPA HW type (%d)\n", ipahal_ctx->hw_type); + return; + } + + for (i = 0; i < IPA_REG_MAX ; i++) { + reg = &(ipahal_reg_objs[ipahal_ctx->hw_type][i]); + + /* skip obsolete registers */ + if (reg->offset == -1) + continue; + + if (!reg->en_print) + continue; + + j = reg->n_start; + + if (j == reg->n_end) { + if (print_to_dmesg) + IPAHAL_DBG_REG("%s=0x%x\n", + ipahal_reg_name_str(i), + ipahal_read_reg_n(i, j)); + else + IPAHAL_DBG_REG_IPC_ONLY("%s=0x%x\n", + ipahal_reg_name_str(i), + ipahal_read_reg_n(i, j)); + } + + for (; j < reg->n_end; j++) { + if (print_to_dmesg) + IPAHAL_DBG_REG("%s_%u=0x%x\n", + ipahal_reg_name_str(i), + j, ipahal_read_reg_n(i, j)); + else + IPAHAL_DBG_REG_IPC_ONLY("%s_%u=0x%x\n", + ipahal_reg_name_str(i), + j, ipahal_read_reg_n(i, j)); + } + } +} + +/* + * ipahal_reg_init() - Build the registers information table + * See ipahal_reg_objs[][] comments + * + * Note: As global variables are initialized with zero, any un-overridden + * register entry will be zero. By this we recognize them. + */ +int ipahal_reg_init(enum ipa_hw_type ipa_hw_type) +{ + int i; + int j; + struct ipahal_reg_obj zero_obj; + + IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type); + + if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) { + IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type); + return -EINVAL; + } + + memset(&zero_obj, 0, sizeof(zero_obj)); + for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) { + for (j = 0; j < IPA_REG_MAX ; j++) { + if (!memcmp(&ipahal_reg_objs[i+1][j], &zero_obj, + sizeof(struct ipahal_reg_obj))) { + memcpy(&ipahal_reg_objs[i+1][j], + &ipahal_reg_objs[i][j], + sizeof(struct ipahal_reg_obj)); + } else { + /* + * explicitly overridden register. + * Check validity + */ + if (!ipahal_reg_objs[i+1][j].offset) { + IPAHAL_ERR( + "reg=%s with zero offset ipa_ver=%d\n", + ipahal_reg_name_str(j), i+1); + WARN_ON(1); + } + if (!ipahal_reg_objs[i+1][j].construct) { + IPAHAL_ERR( + "reg=%s with NULL construct func ipa_ver=%d\n", + ipahal_reg_name_str(j), i+1); + WARN_ON(1); + } + if (!ipahal_reg_objs[i+1][j].parse) { + IPAHAL_ERR( + "reg=%s with NULL parse func ipa_ver=%d\n", + ipahal_reg_name_str(j), i+1); + WARN_ON(1); + } + } + } + } + + return 0; +} + +/* + * ipahal_reg_name_str() - returns string that represent the register + * @reg_name: [in] register name + */ +const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name) +{ + if (reg_name < 0 || reg_name >= IPA_REG_MAX) { + IPAHAL_ERR("requested name of invalid reg=%d\n", reg_name); + return "Invalid Register"; + } + + return ipareg_name_to_str[reg_name]; +} + +/* + * ipahal_read_reg_n() - Get n parameterized reg value + */ +u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n) +{ + u32 offset; + + if (reg >= IPA_REG_MAX) { + IPAHAL_ERR("Invalid register reg=%u\n", reg); + WARN_ON(1); + return -EINVAL; + } + + IPAHAL_DBG_LOW("read from %s n=%u\n", + ipahal_reg_name_str(reg), n); + + offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset; + if (offset == -1) { + IPAHAL_ERR("Read access to obsolete reg=%s\n", + ipahal_reg_name_str(reg)); + WARN_ON(1); + return -EPERM; + } + offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n; + return ioread32(ipahal_ctx->base + offset); +} + +/* + * ipahal_read_reg_mn() - Get mn parameterized reg value + */ +u32 ipahal_read_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n) +{ + u32 offset; + + if (reg >= IPA_REG_MAX) { + IPAHAL_ERR("Invalid register reg=%u\n", reg); + WARN_ON(1); + return -EINVAL; + } + + IPAHAL_DBG_LOW("read %s m=%u n=%u\n", + ipahal_reg_name_str(reg), m, n); + offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset; + if (offset == -1) { + IPAHAL_ERR("Read access to obsolete reg=%s\n", + ipahal_reg_name_str(reg)); + WARN_ON_ONCE(1); + return -EPERM; + } + /* + * Currently there is one register with m and n parameters + * IPA_UC_MAILBOX_m_n. The m value of it is 0x80. + * If more such registers will be added in the future, + * we can move the m parameter to the table above. + */ + offset += 0x80 * m; + offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n; + return ioread32(ipahal_ctx->base + offset); +} + +/* + * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value + */ +void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val) +{ + u32 offset; + + if (reg >= IPA_REG_MAX) { + IPAHAL_ERR("Invalid register reg=%u\n", reg); + WARN_ON(1); + return; + } + + IPAHAL_DBG_LOW("write to %s m=%u n=%u val=%u\n", + ipahal_reg_name_str(reg), m, n, val); + offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset; + if (offset == -1) { + IPAHAL_ERR("Write access to obsolete reg=%s\n", + ipahal_reg_name_str(reg)); + WARN_ON(1); + return; + } + /* + * Currently there is one register with m and n parameters + * IPA_UC_MAILBOX_m_n. The m value of it is 0x80. + * If more such registers will be added in the future, + * we can move the m parameter to the table above. + */ + offset += 0x80 * m; + offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n; + iowrite32(val, ipahal_ctx->base + offset); +} + +/* + * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg + */ +u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields) +{ + u32 val = 0; + u32 offset; + + if (!fields) { + IPAHAL_ERR("Input error fields\n"); + WARN_ON(1); + return -EINVAL; + } + + if (reg >= IPA_REG_MAX) { + IPAHAL_ERR("Invalid register reg=%u\n", reg); + WARN_ON(1); + return -EINVAL; + } + + IPAHAL_DBG_LOW("read from %s n=%u and parse it\n", + ipahal_reg_name_str(reg), n); + offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset; + if (offset == -1) { + IPAHAL_ERR("Read access to obsolete reg=%s\n", + ipahal_reg_name_str(reg)); + WARN_ON(1); + return -EPERM; + } + offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n; + val = ioread32(ipahal_ctx->base + offset); + ipahal_reg_objs[ipahal_ctx->hw_type][reg].parse(reg, fields, val); + + return val; +} + +/* + * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value + */ +void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n, + const void *fields) +{ + u32 val = 0; + u32 offset; + + if (!fields) { + IPAHAL_ERR("Input error fields=%pK\n", fields); + WARN_ON(1); + return; + } + + if (reg >= IPA_REG_MAX) { + IPAHAL_ERR("Invalid register reg=%u\n", reg); + WARN_ON(1); + return; + } + + IPAHAL_DBG_LOW("write to %s n=%u after constructing it\n", + ipahal_reg_name_str(reg), n); + offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset; + if (offset == -1) { + IPAHAL_ERR("Write access to obsolete reg=%s\n", + ipahal_reg_name_str(reg)); + WARN_ON(1); + return; + } + offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n; + ipahal_reg_objs[ipahal_ctx->hw_type][reg].construct(reg, fields, &val); + + iowrite32(val, ipahal_ctx->base + offset); +} + +/* + * Get the offset of a m/n parameterized register + */ +u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n) +{ + u32 offset; + + if (reg >= IPA_REG_MAX) { + IPAHAL_ERR("Invalid register reg=%u\n", reg); + WARN_ON(1); + return -EINVAL; + } + + IPAHAL_DBG_LOW("get offset of %s m=%u n=%u\n", + ipahal_reg_name_str(reg), m, n); + offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset; + if (offset == -1) { + IPAHAL_ERR("Access to obsolete reg=%s\n", + ipahal_reg_name_str(reg)); + WARN_ON(1); + return -EPERM; + } + /* + * Currently there is one register with m and n parameters + * IPA_UC_MAILBOX_m_n. The m value of it is 0x80. + * If more such registers will be added in the future, + * we can move the m parameter to the table above. + */ + offset += 0x80 * m; + offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n; + + return offset; +} + +u32 ipahal_get_reg_base(void) +{ + return 0x00040000; +} + + +/* + * Specific functions + * These functions supply specific register values for specific operations + * that cannot be reached by generic functions. + * E.g. To disable aggregation, need to write to specific bits of the AGGR + * register. The other bits should be untouched. This oeprate is very specific + * and cannot be generically defined. For such operations we define these + * specific functions. + */ + +void ipahal_get_aggr_force_close_valmask(int ep_idx, + struct ipahal_reg_valmask *valmask) +{ + u32 shft; + u32 bmsk; + + if (!valmask) { + IPAHAL_ERR("Input error\n"); + return; + } + + memset(valmask, 0, sizeof(struct ipahal_reg_valmask)); + + if (ipahal_ctx->hw_type <= IPA_HW_v3_1) { + shft = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT; + bmsk = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK; + } else if (ipahal_ctx->hw_type <= IPA_HW_v3_5_1) { + shft = + IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5; + bmsk = + IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5; + } else if (ipahal_ctx->hw_type <= IPA_HW_v4_1) { + shft = + IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_0; + bmsk = + IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_0; + } else if (ipahal_ctx->hw_type <= IPA_HW_v4_2) { + shft = + IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_2; + bmsk = + IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_2; + } else if (ipahal_ctx->hw_type <= IPA_HW_v4_5) { + shft = + IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_5; + bmsk = + IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_5; + } + + if (ep_idx > (sizeof(valmask->val) * 8 - 1)) { + IPAHAL_ERR("too big ep_idx %d\n", ep_idx); + ipa_assert(); + return; + } + IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk); + valmask->mask = bmsk; +} + +void ipahal_get_fltrt_hash_flush_valmask( + struct ipahal_reg_fltrt_hash_flush *flush, + struct ipahal_reg_valmask *valmask) +{ + if (!flush || !valmask) { + IPAHAL_ERR("Input error: flush=%pK ; valmask=%pK\n", + flush, valmask); + return; + } + + memset(valmask, 0, sizeof(struct ipahal_reg_valmask)); + + if (flush->v6_rt) + valmask->val |= + (1<v6_flt) + valmask->val |= + (1<v4_rt) + valmask->val |= + (1<v4_flt) + valmask->val |= + (1<mask = valmask->val; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h new file mode 100644 index 000000000000..5b1c9176252d --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h @@ -0,0 +1,745 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPAHAL_REG_H_ +#define _IPAHAL_REG_H_ + +#include + +/* + * Registers names + * + * NOTE:: Any change to this enum, need to change to ipareg_name_to_str + * array as well. + */ +enum ipahal_reg_name { + IPA_ROUTE, + IPA_IRQ_STTS_EE_n, + IPA_IRQ_EN_EE_n, + IPA_IRQ_CLR_EE_n, + IPA_SUSPEND_IRQ_INFO_EE_n, + IPA_SUSPEND_IRQ_EN_EE_n, + IPA_SUSPEND_IRQ_CLR_EE_n, + IPA_HOLB_DROP_IRQ_INFO_EE_n, + IPA_HOLB_DROP_IRQ_EN_EE_n, + IPA_HOLB_DROP_IRQ_CLR_EE_n, + IPA_BCR, + IPA_ENABLED_PIPES, + IPA_VERSION, + IPA_TAG_TIMER, + IPA_NAT_TIMER, + IPA_COMP_HW_VERSION, + IPA_COMP_CFG, + IPA_STATE_TX_WRAPPER, + IPA_STATE_TX1, + IPA_STATE_FETCHER, + IPA_STATE_FETCHER_MASK, + IPA_STATE_FETCHER_MASK_0, + IPA_STATE_FETCHER_MASK_1, + IPA_STATE_DFETCHER, + IPA_STATE_ACL, + IPA_STATE, + IPA_STATE_RX_ACTIVE, + IPA_STATE_TX0, + IPA_STATE_AGGR_ACTIVE, + IPA_COUNTER_CFG, + IPA_STATE_GSI_TLV, + IPA_STATE_GSI_AOS, + IPA_STATE_GSI_IF, + IPA_STATE_GSI_SKIP, + IPA_STATE_GSI_IF_CONS, + IPA_STATE_DPL_FIFO, + IPA_STATE_COAL_MASTER, + IPA_GENERIC_RAM_ARBITER_PRIORITY, + IPA_STATE_NLO_AGGR, + IPA_STATE_COAL_MASTER_1, + IPA_ENDP_INIT_HDR_n, + IPA_ENDP_INIT_HDR_EXT_n, + IPA_ENDP_INIT_AGGR_n, + IPA_AGGR_FORCE_CLOSE, + IPA_ENDP_INIT_ROUTE_n, + IPA_ENDP_INIT_MODE_n, + IPA_ENDP_INIT_NAT_n, + IPA_ENDP_INIT_CONN_TRACK_n, + IPA_ENDP_INIT_CTRL_n, + IPA_ENDP_INIT_CTRL_SCND_n, + IPA_ENDP_INIT_CTRL_STATUS_n, + IPA_ENDP_INIT_HOL_BLOCK_EN_n, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, + IPA_ENDP_INIT_DEAGGR_n, + IPA_ENDP_INIT_SEQ_n, + IPA_DEBUG_CNT_REG_n, + IPA_ENDP_INIT_CFG_n, + IPA_IRQ_EE_UC_n, + IPA_ENDP_INIT_HDR_METADATA_MASK_n, + IPA_ENDP_INIT_HDR_METADATA_n, + IPA_ENDP_INIT_PROD_CFG_n, + IPA_ENDP_INIT_RSRC_GRP_n, + IPA_SHARED_MEM_SIZE, + IPA_SW_AREA_RAM_DIRECT_ACCESS_n, + IPA_DEBUG_CNT_CTRL_n, + IPA_UC_MAILBOX_m_n, + IPA_FILT_ROUT_HASH_FLUSH, + IPA_FILT_ROUT_HASH_EN, + IPA_SINGLE_NDP_MODE, + IPA_QCNCM, + IPA_SYS_PKT_PROC_CNTXT_BASE, + IPA_LOCAL_PKT_PROC_CNTXT_BASE, + IPA_ENDP_STATUS_n, + IPA_ENDP_YELLOW_RED_MARKER_CFG_n, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n, + IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n, + IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n, + IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n, + IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n, + IPA_DST_RSRC_GRP_01_RSRC_TYPE_n, + IPA_DST_RSRC_GRP_23_RSRC_TYPE_n, + IPA_DST_RSRC_GRP_45_RSRC_TYPE_n, + IPA_DST_RSRC_GRP_67_RSRC_TYPE_n, + IPA_RX_HPS_CLIENTS_MIN_DEPTH_0, + IPA_RX_HPS_CLIENTS_MIN_DEPTH_1, + IPA_RX_HPS_CLIENTS_MAX_DEPTH_0, + IPA_RX_HPS_CLIENTS_MAX_DEPTH_1, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHT, + IPA_QSB_MAX_WRITES, + IPA_QSB_MAX_READS, + IPA_TX_CFG, + IPA_IDLE_INDICATION_CFG, + IPA_DPS_SEQUENCER_FIRST, + IPA_DPS_SEQUENCER_LAST, + IPA_HPS_SEQUENCER_FIRST, + IPA_HPS_SEQUENCER_LAST, + IPA_CLKON_CFG, + IPA_QTIME_TIMESTAMP_CFG, + IPA_TIMERS_PULSE_GRAN_CFG, + IPA_TIMERS_XO_CLK_DIV_CFG, + IPA_STAT_QUOTA_BASE_n, + IPA_STAT_QUOTA_MASK_n, + IPA_STAT_TETHERING_BASE_n, + IPA_STAT_TETHERING_MASK_n, + IPA_STAT_FILTER_IPV4_BASE, + IPA_STAT_FILTER_IPV6_BASE, + IPA_STAT_ROUTER_IPV4_BASE, + IPA_STAT_ROUTER_IPV6_BASE, + IPA_STAT_FILTER_IPV4_START_ID, + IPA_STAT_FILTER_IPV6_START_ID, + IPA_STAT_ROUTER_IPV4_START_ID, + IPA_STAT_ROUTER_IPV6_START_ID, + IPA_STAT_FILTER_IPV4_END_ID, + IPA_STAT_FILTER_IPV6_END_ID, + IPA_STAT_ROUTER_IPV4_END_ID, + IPA_STAT_ROUTER_IPV6_END_ID, + IPA_STAT_DROP_CNT_BASE_n, + IPA_STAT_DROP_CNT_MASK_n, + IPA_SNOC_FEC_EE_n, + IPA_FEC_ADDR_EE_n, + IPA_FEC_ADDR_MSB_EE_n, + IPA_FEC_ATTR_EE_n, + IPA_ENDP_GSI_CFG1_n, + IPA_ENDP_GSI_CFG_AOS_n, + IPA_ENDP_GSI_CFG_TLV_n, + IPA_REG_MAX, +}; + +/* + * struct ipahal_reg_route - IPA route register + * @route_dis: route disable + * @route_def_pipe: route default pipe + * @route_def_hdr_table: route default header table + * @route_def_hdr_ofst: route default header offset table + * @route_frag_def_pipe: Default pipe to route fragmented exception + * packets and frag new rule statues, if source pipe does not have + * a notification status pipe defined. + * @route_def_retain_hdr: default value of retain header. It is used + * when no rule was hit + */ +struct ipahal_reg_route { + u32 route_dis; + u32 route_def_pipe; + u32 route_def_hdr_table; + u32 route_def_hdr_ofst; + u8 route_frag_def_pipe; + u32 route_def_retain_hdr; +}; + +/* + * struct ipahal_reg_endp_init_route - IPA ENDP_INIT_ROUTE_n register + * @route_table_index: Default index of routing table (IPA Consumer). + */ +struct ipahal_reg_endp_init_route { + u32 route_table_index; +}; + +/* + * struct ipahal_reg_endp_init_rsrc_grp - IPA_ENDP_INIT_RSRC_GRP_n register + * @rsrc_grp: Index of group for this ENDP. If this ENDP is a source-ENDP, + * index is for source-resource-group. If destination ENPD, index is + * for destination-resoruce-group. + */ +struct ipahal_reg_endp_init_rsrc_grp { + u32 rsrc_grp; +}; + +/* + * struct ipahal_reg_endp_init_mode - IPA ENDP_INIT_MODE_n register + * @dst_pipe_number: This parameter specifies destination output-pipe-packets + * will be routed to. Valid for DMA mode only and for Input + * Pipes only (IPA Consumer) + */ +struct ipahal_reg_endp_init_mode { + u32 dst_pipe_number; + struct ipa_ep_cfg_mode ep_mode; +}; + +/* + * struct ipahal_reg_shared_mem_size - IPA_SHARED_MEM_SIZE register + * @shared_mem_sz: Available size [in 8Bytes] of SW partition within + * IPA shared memory. + * @shared_mem_baddr: Offset of SW partition within IPA + * shared memory[in 8Bytes]. To get absolute address of SW partition, + * add this offset to IPA_SW_AREA_RAM_DIRECT_ACCESS_n baddr. + */ +struct ipahal_reg_shared_mem_size { + u32 shared_mem_sz; + u32 shared_mem_baddr; +}; + +/* + * struct ipahal_reg_ep_cfg_status - status configuration in IPA end-point + * @status_en: Determines if end point supports Status Indications. SW should + * set this bit in order to enable Statuses. Output Pipe - send + * Status indications only if bit is set. Input Pipe - forward Status + * indication to STATUS_ENDP only if bit is set. Valid for Input + * and Output Pipes (IPA Consumer and Producer) + * @status_ep: Statuses generated for this endpoint will be forwarded to the + * specified Status End Point. Status endpoint needs to be + * configured with STATUS_EN=1 Valid only for Input Pipes (IPA + * Consumer) + * @status_location: Location of PKT-STATUS on destination pipe. + * If set to 0 (default), PKT-STATUS will be appended before the packet + * for this endpoint. If set to 1, PKT-STATUS will be appended after the + * packet for this endpoint. Valid only for Output Pipes (IPA Producer) + * @status_pkt_suppress: Disable notification status, when statistics is enabled + */ +struct ipahal_reg_ep_cfg_status { + bool status_en; + u8 status_ep; + bool status_location; + u8 status_pkt_suppress; +}; + +/* + * struct ipahal_reg_clkon_cfg- Enables SW bypass clock-gating for the IPA core + * + * @all: Enables SW bypass clock-gating controls for this sub-module; + * 0: CGC is enabled by internal logic, 1: No CGC (clk is always 'ON'). + * sub-module affected is based on var name -> ex: open_rx refers + * to IPA_RX sub-module and open_global refers to global IPA 1x clock + */ +struct ipahal_reg_clkon_cfg { + bool open_dpl_fifo; + bool open_global_2x_clk; + bool open_global; + bool open_gsi_if; + bool open_weight_arb; + bool open_qmb; + bool open_ram_slaveway; + bool open_aggr_wrapper; + bool open_qsb2axi_cmdq_l; + bool open_fnr; + bool open_tx_1; + bool open_tx_0; + bool open_ntf_tx_cmdqs; + bool open_dcmp; + bool open_h_dcph; + bool open_d_dcph; + bool open_ack_mngr; + bool open_ctx_handler; + bool open_rsrc_mngr; + bool open_dps_tx_cmdqs; + bool open_hps_dps_cmdqs; + bool open_rx_hps_cmdqs; + bool open_dps; + bool open_hps; + bool open_ftch_dps; + bool open_ftch_hps; + bool open_ram_arb; + bool open_misc; + bool open_tx_wrapper; + bool open_proc; + bool open_rx; +}; + +/* + * struct ipahal_reg_qtime_timestamp_cfg - IPA timestamp configuration + * Relevant starting IPA 4.5. + * IPA timestamps are based on QTIMER which is 56bit length which is + * based on XO clk running at 19.2MHz (52nsec resolution). + * Specific timestamps (TAG, NAT, DPL) my require lower resolution. + * This can be achieved by omitting LSB bits from 56bit QTIMER. + * e.g. if we omit (shift) 24 bit then we get (2^24)*(52n)=0.87sec resolution. + * + * @dpl_timestamp_lsb: Shifting Qtime value. Value will be used as LSB of + * DPL timestamp. + * @dpl_timestamp_sel: if false, DPL timestamp will be based on legacy + * DPL_TIMER which counts in 1ms. if true, it will be based on QTIME + * value shifted by dpl_timestamp_lsb. + * @tag_timestamp_lsb: Shifting Qtime value. Value will be used as LSB of + * TAG timestamp. + * @nat_timestamp_lsb: Shifting Qtime value. Value will be used as LSB of + * NAT timestamp. + */ +struct ipahal_reg_qtime_timestamp_cfg { + u32 dpl_timestamp_lsb; + bool dpl_timestamp_sel; + u32 tag_timestamp_lsb; + u32 nat_timestamp_lsb; +}; + +/* + * enum ipa_timers_time_gran_type - Time granularity to be used with timers + * + * e.g. for HOLB and Aggregation timers + */ +enum ipa_timers_time_gran_type { + IPA_TIMERS_TIME_GRAN_10_USEC, + IPA_TIMERS_TIME_GRAN_20_USEC, + IPA_TIMERS_TIME_GRAN_50_USEC, + IPA_TIMERS_TIME_GRAN_100_USEC, + IPA_TIMERS_TIME_GRAN_1_MSEC, + IPA_TIMERS_TIME_GRAN_10_MSEC, + IPA_TIMERS_TIME_GRAN_100_MSEC, + IPA_TIMERS_TIME_GRAN_NEAR_HALF_SEC, /* 0.65536s */ + IPA_TIMERS_TIME_GRAN_MAX, +}; + +/* + * struct ipahal_reg_timers_pulse_gran_cfg - Counters tick granularities + * Relevant starting IPA 4.5. + * IPA timers are based on XO CLK running 19.2MHz (52ns resolution) deviced + * by clock divider (see IPA_TIMERS_XO_CLK_DIV_CFG) - default 100Khz (10usec). + * IPA timers instances (e.g. HOLB or AGGR) may require different resolutions. + * There are 3 global pulse generators with configurable granularity. Each + * timer instance can choose one of the three generators to work with. + * Each generator granularity can be one of supported ones. + * + * @gran_X: granularity tick of counterX + */ +struct ipahal_reg_timers_pulse_gran_cfg { + enum ipa_timers_time_gran_type gran_0; + enum ipa_timers_time_gran_type gran_1; + enum ipa_timers_time_gran_type gran_2; +}; + +/* + * struct ipahal_reg_timers_xo_clk_div_cfg - IPA timers clock divider + * Used to control clock divider which gets XO_CLK of 19.2MHz as input. + * Output of CDIV is used to generate IPA timers granularity + * + * @enable: Enable of the clock divider for all IPA and GSI timers. + * clock is disabled by default, and need to be enabled when system is up. + * @value: Divided value to be used by CDIV. POR value is set to 191 + * to generate 100KHz clk based on XO_CLK. + * Values of ipahal_reg_timers_pulse_gran_cfg are based on this default. + */ +struct ipahal_reg_timers_xo_clk_div_cfg { + bool enable; + u32 value; +}; + +/* + * struct ipahal_reg_comp_cfg- IPA Core QMB/Master Port selection + * + * @enable / @ipa_dcmp_fast_clk_en: are not relevant starting IPA4.5 + * @ipa_full_flush_wait_rsc_closure_en: relevant starting IPA4.5 + */ +struct ipahal_reg_comp_cfg { + bool ipa_full_flush_wait_rsc_closure_en; + u8 ipa_atomic_fetcher_arb_lock_dis; + bool ipa_qmb_select_by_address_global_en; + bool gsi_multi_axi_masters_dis; + bool gsi_snoc_cnoc_loop_protection_disable; + bool gen_qmb_0_snoc_cnoc_loop_protection_disable; + bool gen_qmb_1_multi_inorder_wr_dis; + bool gen_qmb_0_multi_inorder_wr_dis; + bool gen_qmb_1_multi_inorder_rd_dis; + bool gen_qmb_0_multi_inorder_rd_dis; + bool gsi_multi_inorder_wr_dis; + bool gsi_multi_inorder_rd_dis; + bool ipa_qmb_select_by_address_prod_en; + bool ipa_qmb_select_by_address_cons_en; + bool ipa_dcmp_fast_clk_en; + bool gen_qmb_1_snoc_bypass_dis; + bool gen_qmb_0_snoc_bypass_dis; + bool gsi_snoc_bypass_dis; + bool enable; +}; + +/* + * struct ipa_hash_tuple - Hash tuple members for flt and rt + * the fields tells if to be masked or not + * @src_id: pipe number for flt, table index for rt + * @src_ip_addr: IP source address + * @dst_ip_addr: IP destination address + * @src_port: L4 source port + * @dst_port: L4 destination port + * @protocol: IP protocol field + * @meta_data: packet meta-data + * + */ +struct ipahal_reg_hash_tuple { + /* src_id: pipe in flt, tbl index in rt */ + bool src_id; + bool src_ip_addr; + bool dst_ip_addr; + bool src_port; + bool dst_port; + bool protocol; + bool meta_data; +}; + +/* + * struct ipahal_reg_fltrt_hash_tuple - IPA hash tuple register + * @flt: Hash tuple info for filtering + * @rt: Hash tuple info for routing + * @undefinedX: Undefined/Unused bit fields set of the register + */ +struct ipahal_reg_fltrt_hash_tuple { + struct ipahal_reg_hash_tuple flt; + struct ipahal_reg_hash_tuple rt; + u32 undefined1; + u32 undefined2; +}; + +/* + * enum ipahal_reg_dbg_cnt_type - Debug Counter Type + * DBG_CNT_TYPE_IPV4_FLTR - Count IPv4 filtering rules + * DBG_CNT_TYPE_IPV4_ROUT - Count IPv4 routing rules + * DBG_CNT_TYPE_GENERAL - General counter + * DBG_CNT_TYPE_IPV6_FLTR - Count IPv6 filtering rules + * DBG_CNT_TYPE_IPV4_ROUT - Count IPv6 routing rules + */ +enum ipahal_reg_dbg_cnt_type { + DBG_CNT_TYPE_IPV4_FLTR, + DBG_CNT_TYPE_IPV4_ROUT, + DBG_CNT_TYPE_GENERAL, + DBG_CNT_TYPE_IPV6_FLTR, + DBG_CNT_TYPE_IPV6_ROUT, +}; + +/* + * struct ipahal_reg_debug_cnt_ctrl - IPA_DEBUG_CNT_CTRL_n register + * @en - Enable debug counter + * @type - Type of debugging couting + * @product - False->Count Bytes . True->Count #packets + * @src_pipe - Specific Pipe to match. If FF, no need to match + * specific pipe + * @rule_idx_pipe_rule - Global Rule or Pipe Rule. If pipe, then indicated by + * src_pipe. Starting at IPA V3_5, + * no support on Global Rule. This field will be ignored. + * @rule_idx - Rule index. Irrelevant for type General + */ +struct ipahal_reg_debug_cnt_ctrl { + bool en; + enum ipahal_reg_dbg_cnt_type type; + bool product; + u8 src_pipe; + bool rule_idx_pipe_rule; + u16 rule_idx; +}; + +/* + * struct ipahal_reg_rsrc_grp_cfg - Min/Max values for two rsrc groups + * @x_min - first group min value + * @x_max - first group max value + * @y_min - second group min value + * @y_max - second group max value + */ +struct ipahal_reg_rsrc_grp_cfg { + u32 x_min; + u32 x_max; + u32 y_min; + u32 y_max; +}; + +/* + * struct ipahal_reg_rx_hps_clients - Min or Max values for RX HPS clients + * @client_minmax - Min or Max values. In case of depth 0 the 4 or 5 values + * are used. In case of depth 1, only the first 2 values are used + */ +struct ipahal_reg_rx_hps_clients { + u32 client_minmax[5]; +}; + +/* + * struct ipahal_reg_rx_hps_weights - weight values for RX HPS clients + * @hps_queue_weight_0 - 4 bit Weight for RX_HPS_CMDQ #0 (3:0) + * @hps_queue_weight_1 - 4 bit Weight for RX_HPS_CMDQ #1 (7:4) + * @hps_queue_weight_2 - 4 bit Weight for RX_HPS_CMDQ #2 (11:8) + * @hps_queue_weight_3 - 4 bit Weight for RX_HPS_CMDQ #3 (15:12) + */ +struct ipahal_reg_rx_hps_weights { + u32 hps_queue_weight_0; + u32 hps_queue_weight_1; + u32 hps_queue_weight_2; + u32 hps_queue_weight_3; +}; + +/* + * struct ipahal_reg_counter_cfg - granularity of counter registers + * @aggr_granularity -Defines the granularity of AGGR timers + * granularity [msec]=(x+1)/(32) + */ +struct ipahal_reg_counter_cfg { + enum { + GRAN_VALUE_125_USEC = 3, + GRAN_VALUE_250_USEC = 7, + GRAN_VALUE_500_USEC = 15, + GRAN_VALUE_MSEC = 31, + } aggr_granularity; +}; + + +/* + * struct ipahal_reg_valmask - holding values and masking for registers + * HAL application may require only value and mask of it for some + * register fields. + * @val - The value + * @mask - Tha mask of the value + */ +struct ipahal_reg_valmask { + u32 val; + u32 mask; +}; + +/* + * struct ipahal_reg_fltrt_hash_flush - Flt/Rt flush configuration + * @v6_rt - Flush IPv6 Routing cache + * @v6_flt - Flush IPv6 Filtering cache + * @v4_rt - Flush IPv4 Routing cache + * @v4_flt - Flush IPv4 Filtering cache + */ +struct ipahal_reg_fltrt_hash_flush { + bool v6_rt; + bool v6_flt; + bool v4_rt; + bool v4_flt; +}; + +/* + * struct ipahal_reg_single_ndp_mode - IPA SINGLE_NDP_MODE register + * @single_ndp_en: When set to '1', IPA builds MBIM frames with up to 1 + * NDP-header. + * @unused: undefined bits of the register + */ +struct ipahal_reg_single_ndp_mode { + bool single_ndp_en; + u32 undefined; +}; + +/* + * struct ipahal_reg_qcncm - IPA QCNCM register + * @mode_en: When QCNCM_MODE_EN=1, IPA will use QCNCM signature. + * @mode_val: Used only when QCNCM_MODE_EN=1 and sets SW Signature in + * the NDP header. + * @unused: undefined bits of the register + */ +struct ipahal_reg_qcncm { + bool mode_en; + u32 mode_val; + u32 undefined; +}; + +/* + * struct ipahal_reg_qsb_max_writes - IPA QSB Max Writes register + * @qmb_0_max_writes: Max number of outstanding writes for GEN_QMB_0 + * @qmb_1_max_writes: Max number of outstanding writes for GEN_QMB_1 + */ +struct ipahal_reg_qsb_max_writes { + u32 qmb_0_max_writes; + u32 qmb_1_max_writes; +}; + +/* + * struct ipahal_reg_qsb_max_reads - IPA QSB Max Reads register + * @qmb_0_max_reads: Max number of outstanding reads for GEN_QMB_0 + * @qmb_1_max_reads: Max number of outstanding reads for GEN_QMB_1 + * @qmb_0_max_read_beats: Max number of outstanding read beats for GEN_QMB_0 + * @qmb_1_max_read_beats: Max number of outstanding read beats for GEN_QMB_1 + */ +struct ipahal_reg_qsb_max_reads { + u32 qmb_0_max_reads; + u32 qmb_1_max_reads; + u32 qmb_0_max_read_beats; + u32 qmb_1_max_read_beats; +}; + +/* + * struct ipahal_reg_tx_cfg - IPA TX_CFG register + * @tx0_prefetch_disable: Disable prefetch on TX0 + * @tx1_prefetch_disable: Disable prefetch on TX1 + * @tx0_prefetch_almost_empty_size: Prefetch almost empty size on TX0 + * @tx1_prefetch_almost_empty_size: Prefetch almost empty size on TX1 + * @dmaw_scnd_outsd_pred_threshold: threshold for DMAW_SCND_OUTSD_PRED_EN + * @dmaw_max_beats_256_dis: + * @dmaw_scnd_outsd_pred_en: + * @pa_mask_en: + * @dual_tx_enable: When 1 TX0 and TX1 are enabled. When 0 only TX0 is enabled + * Relevant starting IPA4.5 + */ +struct ipahal_reg_tx_cfg { + bool tx0_prefetch_disable; + bool tx1_prefetch_disable; + u32 tx0_prefetch_almost_empty_size; + u32 tx1_prefetch_almost_empty_size; + u32 dmaw_scnd_outsd_pred_threshold; + u32 dmaw_max_beats_256_dis; + u32 dmaw_scnd_outsd_pred_en; + u32 pa_mask_en; + bool dual_tx_enable; +}; + +/* + * struct ipahal_reg_idle_indication_cfg - IPA IDLE_INDICATION_CFG register + * @const_non_idle_enable: enable the asserting of the IDLE value and DCD + * @enter_idle_debounce_thresh: configure the debounce threshold + */ +struct ipahal_reg_idle_indication_cfg { + u16 enter_idle_debounce_thresh; + bool const_non_idle_enable; +}; + +/* + * struct ipa_ep_cfg_ctrl_scnd - PA_ENDP_INIT_CTRL_SCND_n register + * @endp_delay: delay endpoint + */ +struct ipahal_ep_cfg_ctrl_scnd { + bool endp_delay; +}; + +/* + * ipahal_print_all_regs() - Loop and read and print all the valid registers + * Parameterized registers are also printed for all the valid ranges. + * Print to dmsg and IPC logs + */ +void ipahal_print_all_regs(bool print_to_dmesg); + +/* + * ipahal_reg_name_str() - returns string that represent the register + * @reg_name: [in] register name + */ +const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name); + +/* + * ipahal_read_reg_n() - Get the raw value of n parameterized reg + */ +u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n); + +/* + * ipahal_read_reg_mn() - Get mn parameterized reg value + */ +u32 ipahal_read_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n); + +/* + * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value + */ +void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val); + +/* + * ipahal_write_reg_n() - Write to n parameterized reg a raw value + */ +static inline void ipahal_write_reg_n(enum ipahal_reg_name reg, + u32 n, u32 val) +{ + ipahal_write_reg_mn(reg, 0, n, val); +} + +/* + * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg + */ +u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields); + +/* + * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value + */ +void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n, + const void *fields); + +/* + * ipahal_read_reg() - Get the raw value of a reg + */ +static inline u32 ipahal_read_reg(enum ipahal_reg_name reg) +{ + return ipahal_read_reg_n(reg, 0); +} + +/* + * ipahal_write_reg() - Write to reg a raw value + */ +static inline void ipahal_write_reg(enum ipahal_reg_name reg, + u32 val) +{ + ipahal_write_reg_mn(reg, 0, 0, val); +} + +/* + * ipahal_read_reg_fields() - Get the parsed value of a reg + */ +static inline u32 ipahal_read_reg_fields(enum ipahal_reg_name reg, void *fields) +{ + return ipahal_read_reg_n_fields(reg, 0, fields); +} + +/* + * ipahal_write_reg_fields() - Write to reg a parsed value + */ +static inline void ipahal_write_reg_fields(enum ipahal_reg_name reg, + const void *fields) +{ + ipahal_write_reg_n_fields(reg, 0, fields); +} + +/* + * Get the offset of a m/n parameterized register + */ +u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n); + +/* + * Get the offset of a n parameterized register + */ +static inline u32 ipahal_get_reg_n_ofst(enum ipahal_reg_name reg, u32 n) +{ + return ipahal_get_reg_mn_ofst(reg, 0, n); +} + +/* + * Get the offset of a register + */ +static inline u32 ipahal_get_reg_ofst(enum ipahal_reg_name reg) +{ + return ipahal_get_reg_mn_ofst(reg, 0, 0); +} + +/* + * Get the register base address + */ +u32 ipahal_get_reg_base(void); + +/* + * Specific functions + * These functions supply specific register values for specific operations + * that cannot be reached by generic functions. + * E.g. To disable aggregation, need to write to specific bits of the AGGR + * register. The other bits should be untouched. This operation is very + * specific and cannot be generically defined. For such operations we define + * these specific functions. + */ +void ipahal_get_aggr_force_close_valmask(int ep_idx, + struct ipahal_reg_valmask *valmask); +void ipahal_get_fltrt_hash_flush_valmask( + struct ipahal_reg_fltrt_hash_flush *flush, + struct ipahal_reg_valmask *valmask); + +#endif /* _IPAHAL_REG_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h new file mode 100644 index 000000000000..a6704b86b50c --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h @@ -0,0 +1,605 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPAHAL_REG_I_H_ +#define _IPAHAL_REG_I_H_ + +int ipahal_reg_init(enum ipa_hw_type ipa_hw_type); + +#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask)) +#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \ + (reg |= ((val) << (shift)) & (mask)) +#define IPA_GETFIELD_FROM_REG(reg, shift, mask) \ + (((reg) & (mask)) >> (shift)) + + +/* IPA_ROUTE register */ +#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0 +#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1 +#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1 +#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e +#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6 +#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0X40 +#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7 +#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80 +#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000 +#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11 +#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK 0x1000000 +#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT 0x18 + +/* IPA_ENDP_INIT_HDR_n register */ +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80 +#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000 +#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14 +#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000 +#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK 0x8000000 +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT 0x1b +#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK 0x10000000 +#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT 0x1c + +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK_v4_5 0x3f +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT_v4_5 0x0 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK_v4_5 0x40 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT_v4_5 0x6 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT_v4_5 0x7 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK_v4_5 0x1f80 +#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK_v4_5 0x7e000 +#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT_v4_5 0xd +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK_v4_5 0x80000 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT_v4_5 0x13 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK_v4_5 0x3f00000 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT_v4_5 0x14 +#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK_v4_5 0x4000000 +#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT_v4_5 0x1a +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v4_5 0x8000000 +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v4_5 0x1b +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_BMSK_v4_5 0x30000000 +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_SHFT_v4_5 0x1c +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_BMSK_v4_5 0xc0000000 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_SHFT_v4_5 0x1e + +/* IPA_ENDP_INIT_HDR_EXT_n register */ +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK 0x3c00 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_SHFT_v4_5 0x10 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_BMSK_v4_5 \ + 0x30000 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_SHFT_v4_5 0x12 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_BMSK_v4_5 0xC0000 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_SHFT_v4_5 0x14 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_BMSK_v4_5 0x300000 + +/* IPA_ENDP_INIT_AGGR_n register */ +#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK 0x1000000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT 0x18 +#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x400000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x16 +#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK 0x200000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x15 +#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x1f8000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0xf +#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x7c00 +#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xa +#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x3e0 +#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5 +#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c +#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2 +#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3 +#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0 + +#define IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_BMSK_V4_5 0x8000000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_SHFT_V4_5 27 +#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK_V4_5 0x4000000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT_V4_5 26 +#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK_V4_5 0x1000000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT_V4_5 24 +#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK_V4_5 0x800000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT_V4_5 23 +#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK_V4_5 0x7e0000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT_V4_5 17 +#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK_V4_5 0x1f000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT_V4_5 12 +#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK_V4_5 0x7e0 +#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT_V4_5 5 +#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK_V4_5 0x1c +#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT_V4_5 2 +#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK_V4_5 0x3 +#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT_V4_5 0 + +/* IPA_AGGR_FORCE_CLOSE register */ +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK 0x3fffffff +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0 +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5 0xfffff +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5 0 +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_0 0x7fffff +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_0 0 +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_2 0x1ffff +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_2 0 +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_5 0x7fffffff +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_5 0 + +/* IPA_ENDP_INIT_ROUTE_n register */ +#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f +#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT 0x0 + +/* IPA_ENDP_INIT_MODE_n register */ +#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_BMSK 0x40000000 +#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_SHFT 0x1e +#define IPA_ENDP_INIT_MODE_n_PAD_EN_BMSK 0x20000000 +#define IPA_ENDP_INIT_MODE_n_PAD_EN_SHFT 0x1d +#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_BMSK 0x10000000 +#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_SHFT 0x1c +#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_BMSK 0xffff000 +#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_SHFT 0xc +#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x1f0 +#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x4 +#define IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x7 +#define IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0 + +#define IPA_ENDP_INIT_MODE_n_PAD_EN_BMSK_V4_5 0x20000000 +#define IPA_ENDP_INIT_MODE_n_PAD_EN_SHFT_V4_5 0x1d +#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_BMSK_V4_5 0x10000000 +#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_SHFT_V4_5 0x1c +#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_BMSK_V4_5 0xffff000 +#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_SHFT_V4_5 0xc +#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK_V4_5 0x1f0 +#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT_V4_5 0x4 +#define IPA_ENDP_INIT_MODE_n_DCPH_ENABLE_BMSK_V4_5 0x8 +#define IPA_ENDP_INIT_MODE_n_DCPH_ENABLE_SHFT_V4_5 0x3 +#define IPA_ENDP_INIT_MODE_n_MODE_BMSK_V4_5 0x7 +#define IPA_ENDP_INIT_MODE_n_MODE_SHFT_V4_5 0x0 + +/* IPA_ENDP_INIT_NAT_n register */ +#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3 +#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0 + +/* IPA_ENDP_INIT_CONN_TRACK_n register */ +#define IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_BMSK 0x1 +#define IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_SHFT 0x0 + +/* IPA_ENDP_INIT_CTRL_n register */ +#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK 0x1 +#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT 0x0 +#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK 0x2 +#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT 0x1 + +/* IPA_ENDP_INIT_CTRL_SCND_n register */ +#define IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_BMSK 0x2 +#define IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_SHFT 0x1 + +/* IPA_ENDP_INIT_HOL_BLOCK_EN_n register */ +#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK 0x1 +#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX 19 +#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX_V_4_0 22 +#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK 0x1 +#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT 0x0 + +/* IPA_ENDP_INIT_HOL_BLOCK_TIMER_n register */ +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK 0xffffffff +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT 0x0 + +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_BASE_VALUE_SHFT_V_4_2 0 +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_BASE_VALUE_BMSK_V_4_2 0x1f +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_SCALE_SHFT_V_4_2 0x8 +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_SCALE_BMSK_V_4_2 0x1f00 + +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIME_LIMIT_BMSK_V4_5 0x1F +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIME_LIMIT_SHFT_V4_5 0 +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_GRAN_SEL_BMSK_V4_5 0x100 +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_GRAN_SEL_SHFT_V4_5 8 + +/* IPA_ENDP_INIT_DEAGGR_n register */ +#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000 +#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x80 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x7 +#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F +#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0 + +/* IPA_IPA_ENDP_INIT_SEQ_n register */ +#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_BMSK 0xf000 +#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_SHFT 0xc +#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_BMSK 0xf00 +#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_SHFT 0x8 +#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_BMSK 0xf0 +#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_SHFT 0x4 +#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_BMSK 0xf +#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_SHFT 0x0 + +/* IPA_DEBUG_CNT_REG_m register */ +#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff +#define IPA_DEBUG_CNT_REG_N_MAX 15 +#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff +#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0 + +/* IPA_ENDP_INIT_CFG_n register */ +#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK 0x100 +#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT 0x8 +#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78 +#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3 +#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6 +#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1 +#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1 +#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0 + +/* IPA_ENDP_INIT_HDR_METADATA_MASK_n register */ +#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff +#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0 + +/* IPA_IPA_ENDP_INIT_HDR_METADATA_n register */ +#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK 0xffffffff +#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT 0x0 + +/* IPA_ENDP_INIT_RSRC_GRP_n register */ +#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7 +#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0 +#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5 0x3 +#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5 0 +#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v4_5 0x7 +#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v4_5 0 + +/* IPA_SHARED_MEM_SIZE register */ +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK 0xffff0000 +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT 0x10 +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK 0xffff +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT 0x0 + +/* IPA_DEBUG_CNT_CTRL_n register */ +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK 0x10000000 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT 0x1c +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK 0x0ff00000 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5 0x1ff00000 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT 0x14 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT 0xc +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK 0x100 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT 0x8 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK 0x70 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT 0x4 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK 0x1 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT 0x0 + +/* IPA_FILT_ROUT_HASH_FLUSH register */ +#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT 12 +#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT 8 +#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT 4 +#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT 0 + +/* IPA_SINGLE_NDP_MODE register */ +#define IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK 0xfffffffe +#define IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT 0x1 +#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK 0x1 +#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT 0 + +/* IPA_QCNCM register */ +#define IPA_QCNCM_MODE_UNDEFINED2_BMSK 0xf0000000 +#define IPA_QCNCM_MODE_UNDEFINED2_SHFT 0x1c +#define IPA_QCNCM_MODE_VAL_BMSK 0xffffff0 +#define IPA_QCNCM_MODE_VAL_SHFT 0x4 +#define IPA_QCNCM_UNDEFINED1_BMSK 0xe +#define IPA_QCNCM_UNDEFINED1_SHFT 0x1 +#define IPA_QCNCM_MODE_EN_BMSK 0x1 +#define IPA_QCNCM_MODE_EN_SHFT 0 + +/* IPA_ENDP_STATUS_n register */ +#define IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_BMSK 0x200 +#define IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_SHFT 0x9 +#define IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK 0x100 +#define IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT 0x8 +#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e +#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1 +#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1 +#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0 + +/* IPA_CLKON_CFG register */ +#define IPA_CLKON_CFG_CGC_OPEN_DPL_FIFO_BMSK_V4_5 0x40000000 +#define IPA_CLKON_CFG_CGC_OPEN_DPL_FIFO_SHFT_V4_5 30 +#define IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_BMSK 0x20000000 +#define IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_SHFT 29 +#define IPA_CLKON_CFG_OPEN_GLOBAL_BMSK 0x10000000 +#define IPA_CLKON_CFG_OPEN_GLOBAL_SHFT 28 +#define IPA_CLKON_CFG_OPEN_GSI_IF_BMSK 0x8000000 +#define IPA_CLKON_CFG_OPEN_GSI_IF_SHFT 27 +#define IPA_CLKON_CFG_OPEN_WEIGHT_ARB_SHFT 26 +#define IPA_CLKON_CFG_OPEN_WEIGHT_ARB_BMSK 0x4000000 +#define IPA_CLKON_CFG_OPEN_QMB_SHFT 25 +#define IPA_CLKON_CFG_OPEN_QMB_BMSK 0x2000000 +#define IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_SHFT 24 +#define IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_BMSK 0x1000000 +#define IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_SHFT 23 +#define IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_BMSK 0x800000 +#define IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_SHFT 22 +#define IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_BMSK 0x400000 +#define IPA_CLKON_CFG_OPEN_FNR_SHFT 21 +#define IPA_CLKON_CFG_OPEN_FNR_BMSK 0x200000 +#define IPA_CLKON_CFG_OPEN_TX_1_SHFT 20 +#define IPA_CLKON_CFG_OPEN_TX_1_BMSK 0x100000 +#define IPA_CLKON_CFG_OPEN_TX_0_SHFT 19 +#define IPA_CLKON_CFG_OPEN_TX_0_BMSK 0x80000 +#define IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_SHFT 18 +#define IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_BMSK 0x40000 +#define IPA_CLKON_CFG_OPEN_DCMP_SHFT 17 +#define IPA_CLKON_CFG_OPEN_DCMP_BMSK 0x20000 +#define IPA_CLKON_CFG_OPEN_H_DCPH_SHFT 16 +#define IPA_CLKON_CFG_OPEN_H_DCPH_BMSK 0x10000 +#define IPA_CLKON_CFG_OPEN_D_DCPH_SHFT 15 +#define IPA_CLKON_CFG_OPEN_D_DCPH_BMSK 0x8000 +#define IPA_CLKON_CFG_OPEN_ACK_MNGR_SHFT 14 +#define IPA_CLKON_CFG_OPEN_ACK_MNGR_BMSK 0x4000 +#define IPA_CLKON_CFG_OPEN_CTX_HANDLER_SHFT 13 +#define IPA_CLKON_CFG_OPEN_CTX_HANDLER_BMSK 0x2000 +#define IPA_CLKON_CFG_OPEN_RSRC_MNGR_SHFT 12 +#define IPA_CLKON_CFG_OPEN_RSRC_MNGR_BMSK 0x1000 +#define IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_SHFT 11 +#define IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_BMSK 0x800 +#define IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_SHFT 10 +#define IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_BMSK 0x400 +#define IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_SHFT 9 +#define IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_BMSK 0x200 +#define IPA_CLKON_CFG_OPEN_DPS_SHFT 8 +#define IPA_CLKON_CFG_OPEN_DPS_BMSK 0x100 +#define IPA_CLKON_CFG_OPEN_HPS_SHFT 7 +#define IPA_CLKON_CFG_OPEN_HPS_BMSK 0x80 +#define IPA_CLKON_CFG_OPEN_FTCH_DPS_SHFT 6 +#define IPA_CLKON_CFG_OPEN_FTCH_DPS_BMSK 0x40 +#define IPA_CLKON_CFG_OPEN_FTCH_HPS_SHFT 5 +#define IPA_CLKON_CFG_OPEN_FTCH_HPS_BMSK 0x20 +#define IPA_CLKON_CFG_OPEN_RAM_ARB_SHFT 4 +#define IPA_CLKON_CFG_OPEN_RAM_ARB_BMSK 0x10 +#define IPA_CLKON_CFG_OPEN_MISC_SHFT 3 +#define IPA_CLKON_CFG_OPEN_MISC_BMSK 0x8 +#define IPA_CLKON_CFG_OPEN_TX_WRAPPER_SHFT 2 +#define IPA_CLKON_CFG_OPEN_TX_WRAPPER_BMSK 0x4 +#define IPA_CLKON_CFG_OPEN_PROC_SHFT 1 +#define IPA_CLKON_CFG_OPEN_PROC_BMSK 0x2 +#define IPA_CLKON_CFG_OPEN_RX_BMSK 0x1 +#define IPA_CLKON_CFG_OPEN_RX_SHFT 0 + +/* IPA_QTIME_TIMESTAMP_CFG register */ +#define IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_LSB_SHFT 0 +#define IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_LSB_BMSK 0x1F +#define IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_SEL_SHFT 7 +#define IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_SEL_BMSK 0x80 +#define IPA_QTIME_TIMESTAMP_CFG_TAG_TIMESTAMP_LSB_SHFT 8 +#define IPA_QTIME_TIMESTAMP_CFG_TAG_TIMESTAMP_LSB_BMSK 0x1F00 +#define IPA_QTIME_TIMESTAMP_CFG_NAT_TIMESTAMP_LSB_SHFT 16 +#define IPA_QTIME_TIMESTAMP_CFG_NAT_TIMESTAMP_LSB_BMSK 0x1F0000 + +/* IPA_TIMERS_PULSE_GRAN_CFG register */ +#define IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_SHFT(x) (3 * (x)) +#define IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_BMSK(x) (0x7 << (3 * (x))) + +/* IPA_TIMERS_XO_CLK_DIV_CFG register */ +#define IPA_TIMERS_XO_CLK_DIV_CFG_VALUE_SHFT 0 +#define IPA_TIMERS_XO_CLK_DIV_CFG_VALUE_BMSK 0x1FF +#define IPA_TIMERS_XO_CLK_DIV_CFG_ENABLE_SHFT 31 +#define IPA_TIMERS_XO_CLK_DIV_CFG_ENABLE_BMSK 0x80000000 + +/* IPA_ENDP_FILTER_ROUTER_HSH_CFG_n register */ +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT 0 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK 0x1 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT 1 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK 0x2 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT 2 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK 0x4 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT 3 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK 0x8 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT 4 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK 0x10 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT 5 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK 0x20 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT 6 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK 0x40 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT 7 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK 0xff80 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT 16 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK 0x10000 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT 17 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK 0x20000 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT 18 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK 0x40000 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT 19 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK 0x80000 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT 20 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK 0x100000 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT 21 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK 0x200000 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT 22 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK 0x400000 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT 23 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK 0xff800000 + +/* IPA_RSRC_GRP_XY_RSRC_TYPE_n register */ +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK 0xFF000000 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT 24 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK 0xFF0000 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT 16 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK 0xFF00 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT 8 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK 0xFF +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT 0 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5 0x3F000000 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5 24 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5 0x3F0000 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5 16 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5 0x3F00 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5 8 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5 0x3F +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5 0 + +/* IPA_RX_HPS_CLIENTS_MIN/MAX_DEPTH_0/1 registers */ +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n))) +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(n) \ + (0xF << (8 * (n))) +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(n) (8 * (n)) +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_4_BMSK_v4_5 0xF0000000 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_4_SHFT_v4_5 28 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_3_BMSK_v4_5 0xF000000 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_3_SHFT_v4_5 24 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_2_BMSK_v4_5 0xF0000 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_2_SHFT_v4_5 16 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_1_BMSK_v4_5 0xF00 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_1_SHFT_v4_5 8 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_0_BMSK_v4_5 0xF +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_0_SHFT_v4_5 0 + +/* IPA_QSB_MAX_WRITES register */ +#define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK (0xf) +#define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT (0) +#define IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK (0xf0) +#define IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT (4) + +/* IPA_QSB_MAX_READS register */ +#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK (0xf) +#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT (0) +#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK (0xf0) +#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT (4) + +/* IPA_QSB_MAX_READS_BEATS register */ +#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_BMSK_V4_0 (0xff0000) +#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_SHFT_V4_0 (0x10) +#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_BMSK_V4_0 (0xff000000) +#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_SHFT_V4_0 (0x18) + +/* IPA_TX_CFG register */ +#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5 (0x1) +#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5 (0) +#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5 (0x2) +#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5 (1) +#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5 (0x1C) +#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5 (2) + +#define IPA_TX_CFG_DUAL_TX_ENABLE_BMSK_V4_5 (0x20000) +#define IPA_TX_CFG_DUAL_TX_ENABLE_SHFT_V4_5 (0x11) +#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0 (0x1e000) +#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0 (0xd) +#define IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0 (0x1000) +#define IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0 (0xc) +#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0 (0x800) +#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0 (0xb) +#define IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0 (0x400) +#define IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0 (0xa) +#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0 (0x3c0) +#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0 (0x6) +#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0 (0x3c) +#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0 (0x2) + +/* IPA_IDLE_INDICATION_CFG regiser */ +#define IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_BMSK_V3_5 (0xffff) +#define IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_SHFT_V3_5 (0) +#define IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_BMSK_V3_5 (0x10000) +#define IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_SHFT_V3_5 (16) + +/* IPA_HPS_FTCH_QUEUE_WEIGHT register */ +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK (0xf) +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT (0x0) +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK (0xf0) +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT (0x4) +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK (0xf00) +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT (0x8) +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK (0xf000) +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT (0xc) + +/* IPA_COUNTER_CFG register */ +#define IPA_COUNTER_CFG_AGGR_GRANULARITY_BMSK (0x1f0) +#define IPA_COUNTER_CFG_AGGR_GRANULARITY_SHFT (0x4) + +/* IPA_COMP_CFG register*/ +#define IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK 0x1E0000 +#define IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT 17 +#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_BMSK 0x10000 +#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_SHFT 16 +#define IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_BMSK 0x8000 +#define IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_SHFT 15 +#define IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK 0x4000 +#define IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT 14 +#define IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK 0x2000 +#define IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT 13 +#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_BMSK 0x1000 +#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_SHFT 12 +#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_BMSK 0x800 +#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_SHFT 11 +#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_BMSK 0x400 +#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_SHFT 10 +#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_BMSK 0x200 +#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_SHFT 9 +#define IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_BMSK 0x100 +#define IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_SHFT 8 +#define IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_BMSK 0x80 +#define IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_SHFT 7 +#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_BMSK 0x40 +#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_SHFT 6 +#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_BMSK 0x20 +#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_SHFT 5 +#define IPA_COMP_CFG_IPA_DCMP_FAST_CLK_EN_BMSK 0x10 +#define IPA_COMP_CFG_IPA_DCMP_FAST_CLK_EN_SHFT 4 +#define IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_BMSK 0x8 +#define IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_SHFT 3 +#define IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_BMSK 0x4 +#define IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_SHFT 2 +#define IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_BMSK 0x2 +#define IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_SHFT 1 +#define IPA_COMP_CFG_ENABLE_BMSK 0x1 +#define IPA_COMP_CFG_ENABLE_SHFT 0 + +#define IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK_v4_5 0x200000 +#define IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_SHFT_v4_5 21 +#define IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK_v4_5 0x1E0000 +#define IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT_v4_5 17 +#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_BMSK_v4_5 0x10000 +#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_SHFT_v4_5 16 +#define IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_BMSK_v4_5 0x8000 +#define IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_SHFT_v4_5 15 +#define IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK_v4_5 0x4000 +#define IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT_v4_5 14 +#define IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK_v4_5 \ + 0x2000 +#define IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT_v4_5 13 +#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_BMSK_v4_5 0x1000 +#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_SHFT_v4_5 12 +#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_BMSK_v4_5 0x800 +#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_SHFT_v4_5 11 +#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_BMSK_v4_5 0x400 +#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_SHFT_v4_5 10 +#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_BMSK_v4_5 0x200 +#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_SHFT_v4_5 9 +#define IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_BMSK_v4_5 0x100 +#define IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_SHFT_v4_5 8 +#define IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_BMSK_v4_5 0x80 +#define IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_SHFT_v4_5 7 +#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_BMSK_v4_5 0x40 +#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_SHFT_v4_5 6 +#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_BMSK_v4_5 0x20 +#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_SHFT_v4_5 5 +#define IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_BMSK_v4_5 0x8 +#define IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_SHFT_v4_5 3 +#define IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_BMSK_v4_5 0x4 +#define IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_SHFT_v4_5 2 +#define IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_BMSK_v4_5 0x2 +#define IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_SHFT_v4_5 1 + +#endif /* _IPAHAL_REG_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c new file mode 100644 index 000000000000..83fdaf276d99 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -0,0 +1,4278 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. + */ + +/* + * WWAN Transport Network Driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_qmi_service.h" +#include +#include +#include +#include +#include "ipa_mhi_proxy.h" + +#include "ipa_trace.h" +#include "ipa_odl.h" + + +#define OUTSTANDING_HIGH_DEFAULT 256 +#define OUTSTANDING_HIGH_CTL_DEFAULT (OUTSTANDING_HIGH_DEFAULT + 32) +#define OUTSTANDING_LOW_DEFAULT 128 + +#define WWAN_METADATA_SHFT 24 +#define WWAN_METADATA_MASK 0xFF000000 +#define WWAN_DATA_LEN 9216 +#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */ +#define HEADROOM_FOR_QMAP 8 /* for mux header */ +#define TAILROOM 0 /* for padding by mux layer */ +#define MAX_NUM_OF_MUX_CHANNEL 15 /* max mux channels */ +#define UL_FILTER_RULE_HANDLE_START 69 + +#define IPA_WWAN_DEV_NAME "rmnet_ipa%d" +#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0" +#define IPA_UPSTEAM_WLAN1_IFACE_NAME "wlan1" + +#define IPA_WWAN_RX_SOFTIRQ_THRESH 16 + +#define INVALID_MUX_ID 0xFF +#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64 +#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64 +#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */ + +#define IPA_NETDEV() \ + ((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \ + rmnet_ipa3_ctx->wwan_priv->net : NULL) + +#define IPA_WWAN_CONS_DESC_FIFO_SZ 256 + +static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type); +static void rmnet_ipa_get_stats_and_update(void); + +static int ipa3_wwan_add_ul_flt_rule_to_ipa(void); +static int ipa3_wwan_del_ul_flt_rule_to_ipa(void); +static void ipa3_wwan_msg_free_cb(void*, u32, u32); +static int ipa3_rmnet_poll(struct napi_struct *napi, int budget); + +static void ipa3_wake_tx_queue(struct work_struct *work); +static DECLARE_WORK(ipa3_tx_wakequeue_work, ipa3_wake_tx_queue); + +static void tethering_stats_poll_queue(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work, + tethering_stats_poll_queue); + +enum ipa3_wwan_device_status { + WWAN_DEVICE_INACTIVE = 0, + WWAN_DEVICE_ACTIVE = 1 +}; + +struct ipa3_rmnet_plat_drv_res { + bool ipa_rmnet_ssr; + bool is_platform_type_msm; + bool ipa_advertise_sg_support; + bool ipa_napi_enable; + u32 wan_rx_desc_size; +}; + +/** + * struct ipa3_wwan_private - WWAN private data + * @net: network interface struct implemented by this driver + * @stats: iface statistics + * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed + * @ch_id: channel id + * @lock: spinlock for mutual exclusion + * @device_status: holds device status + * + * WWAN private - holds all relevant info about WWAN driver + */ +struct ipa3_wwan_private { + struct net_device *net; + struct net_device_stats stats; + atomic_t outstanding_pkts; + uint32_t ch_id; + spinlock_t lock; + struct completion resource_granted_completion; + enum ipa3_wwan_device_status device_status; + struct napi_struct napi; +}; + +struct rmnet_ipa_debugfs { + struct dentry *dent; + struct dentry *dfile_outstanding_high; + struct dentry *dfile_outstanding_high_ctl; + struct dentry *dfile_outstanding_low; +}; + +struct rmnet_ipa3_context { + struct ipa3_wwan_private *wwan_priv; + struct ipa_sys_connect_params apps_to_ipa_ep_cfg; + struct ipa_sys_connect_params ipa_to_apps_ep_cfg; + u32 qmap_hdr_hdl; + u32 dflt_v4_wan_rt_hdl; + u32 dflt_v6_wan_rt_hdl; + struct ipa3_rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL]; + int num_q6_rules; + int old_num_q6_rules; + int rmnet_index; + bool egress_set; + bool a7_ul_flt_set; + struct workqueue_struct *rm_q6_wq; + atomic_t is_initialized; + atomic_t is_ssr; + void *subsys_notify_handle; + u32 apps_to_ipa3_hdl; + u32 ipa3_to_apps_hdl; + struct mutex pipe_handle_guard; + struct mutex add_mux_channel_lock; + u32 pm_hdl; + u32 q6_pm_hdl; + u32 q6_teth_pm_hdl; + struct mutex per_client_stats_guard; + struct ipa_tether_device_info + tether_device + [IPACM_MAX_CLIENT_DEVICE_TYPES]; + u32 outstanding_high; + u32 outstanding_high_ctl; + u32 outstanding_low; + struct rmnet_ipa_debugfs dbgfs; +}; + +static struct rmnet_ipa3_context *rmnet_ipa3_ctx; +static struct ipa3_rmnet_plat_drv_res ipa3_rmnet_res; + +/** + * ipa3_setup_a7_qmap_hdr() - Setup default a7 qmap hdr + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate memory + * -EPERM: failed to add the tables + */ +static int ipa3_setup_a7_qmap_hdr(void) +{ + struct ipa_ioc_add_hdr *hdr; + struct ipa_hdr_add *hdr_entry; + u32 pyld_sz; + int ret; + + /* install the basic exception header */ + pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 * + sizeof(struct ipa_hdr_add); + hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + hdr->num_hdrs = 1; + hdr->commit = 1; + hdr_entry = &hdr->hdr[0]; + + strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */ + + if (ipa3_add_hdr(hdr)) { + IPAWANERR("fail to add IPA_A7_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + + if (hdr_entry->status) { + IPAWANERR("fail to add IPA_A7_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + rmnet_ipa3_ctx->qmap_hdr_hdl = hdr_entry->hdr_hdl; + + ret = 0; +bail: + kfree(hdr); + return ret; +} + +static void ipa3_del_a7_qmap_hdr(void) +{ + struct ipa_ioc_del_hdr *del_hdr; + struct ipa_hdr_del *hdl_entry; + u32 pyld_sz; + int ret; + + pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 * + sizeof(struct ipa_hdr_del); + del_hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!del_hdr) { + IPAWANERR("fail to alloc exception hdr_del\n"); + return; + } + + del_hdr->commit = 1; + del_hdr->num_hdls = 1; + hdl_entry = &del_hdr->hdl[0]; + hdl_entry->hdl = rmnet_ipa3_ctx->qmap_hdr_hdl; + + ret = ipa3_del_hdr(del_hdr); + if (ret || hdl_entry->status) + IPAWANERR("ipa3_del_hdr failed\n"); + else + IPAWANDBG("hdrs deletion done\n"); + + rmnet_ipa3_ctx->qmap_hdr_hdl = 0; + kfree(del_hdr); +} + +static void ipa3_del_qmap_hdr(uint32_t hdr_hdl) +{ + struct ipa_ioc_del_hdr *del_hdr; + struct ipa_hdr_del *hdl_entry; + u32 pyld_sz; + int ret; + + if (hdr_hdl == 0) { + IPAWANERR("Invalid hdr_hdl provided\n"); + return; + } + + pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 * + sizeof(struct ipa_hdr_del); + del_hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!del_hdr) { + IPAWANERR("fail to alloc exception hdr_del\n"); + return; + } + + del_hdr->commit = 1; + del_hdr->num_hdls = 1; + hdl_entry = &del_hdr->hdl[0]; + hdl_entry->hdl = hdr_hdl; + + ret = ipa3_del_hdr(del_hdr); + if (ret || hdl_entry->status) + IPAWANERR("ipa3_del_hdr failed\n"); + else + IPAWANDBG("header deletion done\n"); + + rmnet_ipa3_ctx->qmap_hdr_hdl = 0; + kfree(del_hdr); +} + +static void ipa3_del_mux_qmap_hdrs(void) +{ + int index; + + for (index = 0; index < rmnet_ipa3_ctx->rmnet_index; index++) { + ipa3_del_qmap_hdr(rmnet_ipa3_ctx->mux_channel[index].hdr_hdl); + rmnet_ipa3_ctx->mux_channel[index].hdr_hdl = 0; + } +} + +static int ipa3_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl) +{ + struct ipa_ioc_add_hdr *hdr; + struct ipa_hdr_add *hdr_entry; + char hdr_name[IPA_RESOURCE_NAME_MAX]; + u32 pyld_sz; + int ret; + + pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 * + sizeof(struct ipa_hdr_add); + hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + hdr->num_hdrs = 1; + hdr->commit = 1; + hdr_entry = &hdr->hdr[0]; + + snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d", + A2_MUX_HDR_NAME_V4_PREF, + mux_id); + strlcpy(hdr_entry->name, hdr_name, + IPA_RESOURCE_NAME_MAX); + + hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */ + hdr_entry->hdr[1] = (uint8_t) mux_id; + IPAWANDBG("header (%s) with mux-id: (%d)\n", + hdr_name, + hdr_entry->hdr[1]); + if (ipa3_add_hdr(hdr)) { + IPAWANERR("fail to add IPA_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + + if (hdr_entry->status) { + IPAWANERR("fail to add IPA_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + + ret = 0; + *hdr_hdl = hdr_entry->hdr_hdl; +bail: + kfree(hdr); + return ret; +} + +/** + * ipa3_setup_dflt_wan_rt_tables() - Setup default wan routing tables + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate memory + * -EPERM: failed to add the tables + */ +static int ipa3_setup_dflt_wan_rt_tables(void) +{ + struct ipa_ioc_add_rt_rule *rt_rule; + struct ipa_rt_rule_add *rt_rule_entry; + + rt_rule = + kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 * + sizeof(struct ipa_rt_rule_add), GFP_KERNEL); + if (!rt_rule) + return -ENOMEM; + + /* setup a default v4 route to point to Apps */ + rt_rule->num_rules = 1; + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v4; + strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME, + IPA_RESOURCE_NAME_MAX); + + rt_rule_entry = &rt_rule->rules[0]; + rt_rule_entry->at_rear = 1; + rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS; + rt_rule_entry->rule.hdr_hdl = rmnet_ipa3_ctx->qmap_hdr_hdl; + + if (ipa3_add_rt_rule(rt_rule)) { + IPAWANERR("fail to add dflt_wan v4 rule\n"); + kfree(rt_rule); + return -EPERM; + } + + IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl; + + /* setup a default v6 route to point to A5 */ + rt_rule->ip = IPA_IP_v6; + if (ipa3_add_rt_rule(rt_rule)) { + IPAWANERR("fail to add dflt_wan v6 rule\n"); + kfree(rt_rule); + return -EPERM; + } + IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl; + + kfree(rt_rule); + return 0; +} + +static void ipa3_del_dflt_wan_rt_tables(void) +{ + struct ipa_ioc_del_rt_rule *rt_rule; + struct ipa_rt_rule_del *rt_rule_entry; + int len; + + len = sizeof(struct ipa_ioc_del_rt_rule) + 1 * + sizeof(struct ipa_rt_rule_del); + rt_rule = kzalloc(len, GFP_KERNEL); + if (!rt_rule) + return; + + memset(rt_rule, 0, len); + rt_rule->commit = 1; + rt_rule->num_hdls = 1; + rt_rule->ip = IPA_IP_v4; + + rt_rule_entry = &rt_rule->hdl[0]; + rt_rule_entry->status = -1; + rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl; + + IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n", + rt_rule_entry->hdl, IPA_IP_v4); + if (ipa3_del_rt_rule(rt_rule) || + (rt_rule_entry->status)) { + IPAWANERR("Routing rule deletion failed\n"); + } + + rt_rule->ip = IPA_IP_v6; + rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl; + IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n", + rt_rule_entry->hdl, IPA_IP_v6); + if (ipa3_del_rt_rule(rt_rule) || + (rt_rule_entry->status)) { + IPAWANERR("Routing rule deletion failed\n"); + } + + kfree(rt_rule); +} + +static void ipa3_copy_qmi_flt_rule_ex( + struct ipa_ioc_ext_intf_prop *q6_ul_flt_rule_ptr, + struct ipa_filter_spec_ex_type_v01 *flt_spec_ptr) +{ + int j; + struct ipa_ipfltr_range_eq_16 *q6_ul_filter_nat_ptr; + struct ipa_ipfltr_range_eq_16_type_v01 *filter_spec_nat_ptr; + + q6_ul_flt_rule_ptr->ip = flt_spec_ptr->ip_type; + q6_ul_flt_rule_ptr->action = flt_spec_ptr->filter_action; + if (flt_spec_ptr->is_routing_table_index_valid == true) + q6_ul_flt_rule_ptr->rt_tbl_idx = + flt_spec_ptr->route_table_index; + if (flt_spec_ptr->is_mux_id_valid == true) + q6_ul_flt_rule_ptr->mux_id = + flt_spec_ptr->mux_id; + q6_ul_flt_rule_ptr->rule_id = + flt_spec_ptr->rule_id; + q6_ul_flt_rule_ptr->is_rule_hashable = + flt_spec_ptr->is_rule_hashable; + q6_ul_flt_rule_ptr->eq_attrib.rule_eq_bitmap = + flt_spec_ptr->filter_rule.rule_eq_bitmap; + q6_ul_flt_rule_ptr->eq_attrib.tos_eq_present = + flt_spec_ptr->filter_rule.tos_eq_present; + q6_ul_flt_rule_ptr->eq_attrib.tos_eq = + flt_spec_ptr->filter_rule.tos_eq; + q6_ul_flt_rule_ptr->eq_attrib.protocol_eq_present = + flt_spec_ptr->filter_rule.protocol_eq_present; + q6_ul_flt_rule_ptr->eq_attrib.protocol_eq = + flt_spec_ptr->filter_rule.protocol_eq; + q6_ul_flt_rule_ptr->eq_attrib.num_ihl_offset_range_16 = + flt_spec_ptr->filter_rule.num_ihl_offset_range_16; + + for (j = 0; + j < q6_ul_flt_rule_ptr->eq_attrib.num_ihl_offset_range_16; + j++) { + q6_ul_filter_nat_ptr = + &q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_range_16[j]; + filter_spec_nat_ptr = + &flt_spec_ptr->filter_rule.ihl_offset_range_16[j]; + q6_ul_filter_nat_ptr->offset = + filter_spec_nat_ptr->offset; + q6_ul_filter_nat_ptr->range_low = + filter_spec_nat_ptr->range_low; + q6_ul_filter_nat_ptr->range_high = + filter_spec_nat_ptr->range_high; + } + q6_ul_flt_rule_ptr->eq_attrib.num_offset_meq_32 = + flt_spec_ptr->filter_rule.num_offset_meq_32; + for (j = 0; + j < q6_ul_flt_rule_ptr->eq_attrib.num_offset_meq_32; + j++) { + q6_ul_flt_rule_ptr->eq_attrib.offset_meq_32[j].offset = + flt_spec_ptr->filter_rule.offset_meq_32[j].offset; + q6_ul_flt_rule_ptr->eq_attrib.offset_meq_32[j].mask = + flt_spec_ptr->filter_rule.offset_meq_32[j].mask; + q6_ul_flt_rule_ptr->eq_attrib.offset_meq_32[j].value = + flt_spec_ptr->filter_rule.offset_meq_32[j].value; + } + + q6_ul_flt_rule_ptr->eq_attrib.tc_eq_present = + flt_spec_ptr->filter_rule.tc_eq_present; + q6_ul_flt_rule_ptr->eq_attrib.tc_eq = + flt_spec_ptr->filter_rule.tc_eq; + q6_ul_flt_rule_ptr->eq_attrib.fl_eq_present = + flt_spec_ptr->filter_rule.flow_eq_present; + q6_ul_flt_rule_ptr->eq_attrib.fl_eq = + flt_spec_ptr->filter_rule.flow_eq; + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_eq_16_present = + flt_spec_ptr->filter_rule.ihl_offset_eq_16_present; + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_eq_16.offset = + flt_spec_ptr->filter_rule.ihl_offset_eq_16.offset; + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_eq_16.value = + flt_spec_ptr->filter_rule.ihl_offset_eq_16.value; + + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_eq_32_present = + flt_spec_ptr->filter_rule.ihl_offset_eq_32_present; + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_eq_32.offset = + flt_spec_ptr->filter_rule.ihl_offset_eq_32.offset; + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_eq_32.value = + flt_spec_ptr->filter_rule.ihl_offset_eq_32.value; + + q6_ul_flt_rule_ptr->eq_attrib.num_ihl_offset_meq_32 = + flt_spec_ptr->filter_rule.num_ihl_offset_meq_32; + for (j = 0; + j < q6_ul_flt_rule_ptr->eq_attrib.num_ihl_offset_meq_32; + j++) { + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_meq_32[j].offset = + flt_spec_ptr->filter_rule.ihl_offset_meq_32[j].offset; + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_meq_32[j].mask = + flt_spec_ptr->filter_rule.ihl_offset_meq_32[j].mask; + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_meq_32[j].value = + flt_spec_ptr->filter_rule.ihl_offset_meq_32[j].value; + } + q6_ul_flt_rule_ptr->eq_attrib.num_offset_meq_128 = + flt_spec_ptr->filter_rule.num_offset_meq_128; + for (j = 0; + j < q6_ul_flt_rule_ptr->eq_attrib.num_offset_meq_128; + j++) { + q6_ul_flt_rule_ptr->eq_attrib.offset_meq_128[j].offset = + flt_spec_ptr->filter_rule.offset_meq_128[j].offset; + memcpy(q6_ul_flt_rule_ptr->eq_attrib.offset_meq_128[j].mask, + flt_spec_ptr->filter_rule.offset_meq_128[j].mask, 16); + memcpy(q6_ul_flt_rule_ptr->eq_attrib.offset_meq_128[j].value, + flt_spec_ptr->filter_rule.offset_meq_128[j].value, 16); + } + + q6_ul_flt_rule_ptr->eq_attrib.metadata_meq32_present = + flt_spec_ptr->filter_rule.metadata_meq32_present; + q6_ul_flt_rule_ptr->eq_attrib.metadata_meq32.offset = + flt_spec_ptr->filter_rule.metadata_meq32.offset; + q6_ul_flt_rule_ptr->eq_attrib.metadata_meq32.mask = + flt_spec_ptr->filter_rule.metadata_meq32.mask; + q6_ul_flt_rule_ptr->eq_attrib.metadata_meq32.value = + flt_spec_ptr->filter_rule.metadata_meq32.value; + q6_ul_flt_rule_ptr->eq_attrib.ipv4_frag_eq_present = + flt_spec_ptr->filter_rule.ipv4_frag_eq_present; +} + + +int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01 + *rule_req) +{ + int i; + + /* prevent multi-threads accessing rmnet_ipa3_ctx->num_q6_rules */ + mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock); + if (rule_req->filter_spec_ex_list_valid == true) { + rmnet_ipa3_ctx->num_q6_rules = + rule_req->filter_spec_ex_list_len; + IPAWANDBG("Received (%d) install_flt_req\n", + rmnet_ipa3_ctx->num_q6_rules); + } else { + rmnet_ipa3_ctx->num_q6_rules = 0; + IPAWANERR("got no UL rules from modem\n"); + mutex_unlock( + &rmnet_ipa3_ctx->add_mux_channel_lock); + return -EINVAL; + } + + /* copy UL filter rules from Modem*/ + for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) { + /* check if rules overside the cache*/ + if (i == MAX_NUM_Q6_RULE) { + IPAWANERR("Reaching (%d) max cache ", + MAX_NUM_Q6_RULE); + IPAWANERR(" however total (%d)\n", + rmnet_ipa3_ctx->num_q6_rules); + goto failure; + } + ipa3_copy_qmi_flt_rule_ex(&ipa3_qmi_ctx->q6_ul_filter_rule[i], + &rule_req->filter_spec_ex_list[i]); + } + + if (rule_req->xlat_filter_indices_list_valid) { + if (rule_req->xlat_filter_indices_list_len > + rmnet_ipa3_ctx->num_q6_rules) { + IPAWANERR("Number of xlat indices is not valid: %d\n", + rule_req->xlat_filter_indices_list_len); + goto failure; + } + IPAWANDBG("Receive %d XLAT indices: ", + rule_req->xlat_filter_indices_list_len); + for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) + IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]); + IPAWANDBG("\n"); + + for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) { + if (rule_req->xlat_filter_indices_list[i] + >= rmnet_ipa3_ctx->num_q6_rules) { + IPAWANERR("Xlat rule idx is wrong: %d\n", + rule_req->xlat_filter_indices_list[i]); + goto failure; + } else { + ipa3_qmi_ctx->q6_ul_filter_rule + [rule_req->xlat_filter_indices_list[i]] + .is_xlat_rule = 1; + IPAWANDBG("Rule %d is xlat rule\n", + rule_req->xlat_filter_indices_list[i]); + } + } + } + goto success; + +failure: + rmnet_ipa3_ctx->num_q6_rules = 0; + memset(ipa3_qmi_ctx->q6_ul_filter_rule, 0, + sizeof(ipa3_qmi_ctx->q6_ul_filter_rule)); + mutex_unlock( + &rmnet_ipa3_ctx->add_mux_channel_lock); + return -EINVAL; + +success: + mutex_unlock( + &rmnet_ipa3_ctx->add_mux_channel_lock); + return 0; +} + +static int ipa3_wwan_add_ul_flt_rule_to_ipa(void) +{ + u32 pyld_sz; + int i, retval = 0; + struct ipa_ioc_add_flt_rule *param; + struct ipa_flt_rule_add flt_rule_entry; + struct ipa_fltr_installed_notif_req_msg_v01 *req; + + pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) + + sizeof(struct ipa_flt_rule_add); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) + return -ENOMEM; + + req = kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01), + GFP_KERNEL); + if (!req) { + kfree(param); + return -ENOMEM; + } + + param->commit = 1; + param->ep = IPA_CLIENT_APPS_WAN_PROD; + param->global = false; + param->num_rules = (uint8_t)1; + + memset(req, 0, sizeof(struct ipa_fltr_installed_notif_req_msg_v01)); + + for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) { + param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip; + memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add)); + flt_rule_entry.at_rear = true; + flt_rule_entry.rule.action = + ipa3_qmi_ctx->q6_ul_filter_rule[i].action; + flt_rule_entry.rule.rt_tbl_idx + = ipa3_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx; + flt_rule_entry.rule.retain_hdr = true; + flt_rule_entry.rule.hashable = + ipa3_qmi_ctx->q6_ul_filter_rule[i].is_rule_hashable; + flt_rule_entry.rule.rule_id = + ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id; + + /* debug rt-hdl*/ + IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n", + i, flt_rule_entry.rule.rt_tbl_idx); + flt_rule_entry.rule.eq_attrib_type = true; + memcpy(&(flt_rule_entry.rule.eq_attrib), + &ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib, + sizeof(struct ipa_ipfltri_rule_eq)); + memcpy(&(param->rules[0]), &flt_rule_entry, + sizeof(struct ipa_flt_rule_add)); + if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) { + retval = -EFAULT; + IPAWANERR("add A7 UL filter rule(%d) failed\n", i); + } else { + /* store the rule handler */ + ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i] = + param->rules[0].flt_rule_hdl; + } + } + + /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/ + req->source_pipe_index = + ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD); + if (req->source_pipe_index == IPA_EP_NOT_ALLOCATED) { + IPAWANERR("ep mapping failed\n"); + retval = -EFAULT; + } + + req->install_status = QMI_RESULT_SUCCESS_V01; + req->rule_id_valid = 1; + req->rule_id_len = rmnet_ipa3_ctx->num_q6_rules; + for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) { + req->rule_id[i] = + ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id; + } + if (ipa3_qmi_filter_notify_send(req)) { + IPAWANDBG("add filter rule index on A7-RX failed\n"); + retval = -EFAULT; + } + rmnet_ipa3_ctx->old_num_q6_rules = rmnet_ipa3_ctx->num_q6_rules; + IPAWANDBG("add (%d) filter rule index on A7-RX\n", + rmnet_ipa3_ctx->old_num_q6_rules); + kfree(param); + kfree(req); + return retval; +} + +static int ipa3_wwan_del_ul_flt_rule_to_ipa(void) +{ + u32 pyld_sz; + int i, retval = 0; + struct ipa_ioc_del_flt_rule *param; + struct ipa_flt_rule_del flt_rule_entry; + + pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) + + sizeof(struct ipa_flt_rule_del); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) + return -ENOMEM; + + + param->commit = 1; + param->num_hdls = (uint8_t) 1; + + for (i = 0; i < rmnet_ipa3_ctx->old_num_q6_rules; i++) { + param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip; + memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del)); + flt_rule_entry.hdl = ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i]; + /* debug rt-hdl*/ + IPAWANDBG("delete-IPA rule index(%d)\n", i); + memcpy(&(param->hdl[0]), &flt_rule_entry, + sizeof(struct ipa_flt_rule_del)); + if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) { + IPAWANERR("del A7 UL filter rule(%d) failed\n", i); + kfree(param); + return -EFAULT; + } + } + + /* set UL filter-rule add-indication */ + rmnet_ipa3_ctx->a7_ul_flt_set = false; + rmnet_ipa3_ctx->old_num_q6_rules = 0; + + kfree(param); + return retval; +} + +static int ipa3_find_mux_channel_index(uint32_t mux_id) +{ + int i; + + for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) { + if (mux_id == rmnet_ipa3_ctx->mux_channel[i].mux_id) + return i; + } + return MAX_NUM_OF_MUX_CHANNEL; +} + +static int find_vchannel_name_index(const char *vchannel_name) +{ + int i; + + for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) { + if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name, + vchannel_name) == 0) + return i; + } + return MAX_NUM_OF_MUX_CHANNEL; +} + +static enum ipa_upstream_type find_upstream_type(const char *upstreamIface) +{ + int i; + + for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) { + if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name, + upstreamIface) == 0) + return IPA_UPSTEAM_MODEM; + } + + if ((strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0) || + (strcmp(IPA_UPSTEAM_WLAN1_IFACE_NAME, upstreamIface) == 0)) + return IPA_UPSTEAM_WLAN; + else + return MAX_NUM_OF_MUX_CHANNEL; +} + +static int ipa3_wwan_register_to_ipa(int index) +{ + struct ipa_tx_intf tx_properties = {0}; + struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} }; + struct ipa_ioc_tx_intf_prop *tx_ipv4_property; + struct ipa_ioc_tx_intf_prop *tx_ipv6_property; + struct ipa_rx_intf rx_properties = {0}; + struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} }; + struct ipa_ioc_rx_intf_prop *rx_ipv4_property; + struct ipa_ioc_rx_intf_prop *rx_ipv6_property; + struct ipa_ext_intf ext_properties = {0}; + struct ipa_ioc_ext_intf_prop *ext_ioc_properties; + u32 pyld_sz; + int ret = 0, i; + + IPAWANDBG("index(%d) device[%s]:\n", index, + rmnet_ipa3_ctx->mux_channel[index].vchannel_name); + if (!rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set) { + ret = ipa3_add_qmap_hdr( + rmnet_ipa3_ctx->mux_channel[index].mux_id, + &rmnet_ipa3_ctx->mux_channel[index].hdr_hdl); + if (ret) { + IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index); + return ret; + } + rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set = true; + } + tx_properties.prop = tx_ioc_properties; + tx_ipv4_property = &tx_properties.prop[0]; + tx_ipv4_property->ip = IPA_IP_v4; + tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS; + snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d", + A2_MUX_HDR_NAME_V4_PREF, + rmnet_ipa3_ctx->mux_channel[index].mux_id); + tx_ipv6_property = &tx_properties.prop[1]; + tx_ipv6_property->ip = IPA_IP_v6; + tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS; + /* no need use A2_MUX_HDR_NAME_V6_PREF, same header */ + snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d", + A2_MUX_HDR_NAME_V4_PREF, + rmnet_ipa3_ctx->mux_channel[index].mux_id); + tx_properties.num_props = 2; + + rx_properties.prop = rx_ioc_properties; + rx_ipv4_property = &rx_properties.prop[0]; + rx_ipv4_property->ip = IPA_IP_v4; + rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_ipv4_property->attrib.meta_data = + rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT; + rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK; + rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_WAN_PROD; + rx_ipv6_property = &rx_properties.prop[1]; + rx_ipv6_property->ip = IPA_IP_v6; + rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_ipv6_property->attrib.meta_data = + rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT; + rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK; + rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_WAN_PROD; + rx_properties.num_props = 2; + + pyld_sz = rmnet_ipa3_ctx->num_q6_rules * + sizeof(struct ipa_ioc_ext_intf_prop); + ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL); + if (!ext_ioc_properties) + return -ENOMEM; + + + ext_properties.prop = ext_ioc_properties; + ext_properties.excp_pipe_valid = true; + ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS; + ext_properties.num_props = rmnet_ipa3_ctx->num_q6_rules; + for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) { + memcpy(&(ext_properties.prop[i]), + &(ipa3_qmi_ctx->q6_ul_filter_rule[i]), + sizeof(struct ipa_ioc_ext_intf_prop)); + ext_properties.prop[i].mux_id = + rmnet_ipa3_ctx->mux_channel[index].mux_id; + IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i, + ext_properties.prop[i].ip, + ext_properties.prop[i].rt_tbl_idx); + IPAWANDBG("action: %d mux:%d\n", + ext_properties.prop[i].action, + ext_properties.prop[i].mux_id); + } + ret = ipa3_register_intf_ext( + rmnet_ipa3_ctx->mux_channel[index].vchannel_name, + &tx_properties, + &rx_properties, + &ext_properties); + if (ret) { + IPAWANERR("[%s]:ipa3_register_intf failed %d\n", + rmnet_ipa3_ctx->mux_channel[index].vchannel_name, ret); + goto fail; + } + rmnet_ipa3_ctx->mux_channel[index].ul_flt_reg = true; +fail: + kfree(ext_ioc_properties); + return ret; +} + +static void ipa3_cleanup_deregister_intf(void) +{ + int i; + int ret; + int8_t *v_name; + + for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) { + v_name = rmnet_ipa3_ctx->mux_channel[i].vchannel_name; + + if (rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg) { + ret = ipa3_deregister_intf(v_name); + if (ret < 0) { + IPAWANERR("de-register device %s(%d) failed\n", + v_name, + i); + return; + } + IPAWANDBG("de-register device %s(%d) success\n", + v_name, + i); + } + rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = false; + } +} + +int ipa3_wwan_update_mux_channel_prop(void) +{ + int ret = 0, i; + /* install UL filter rules */ + if (rmnet_ipa3_ctx->egress_set) { + if (!ipa3_qmi_ctx->modem_cfg_emb_pipe_flt) { + IPAWANDBG("setup UL filter rules\n"); + if (rmnet_ipa3_ctx->a7_ul_flt_set) { + IPAWANDBG("del previous UL filter rules\n"); + /* delete rule hdlers */ + ret = ipa3_wwan_del_ul_flt_rule_to_ipa(); + if (ret) { + IPAWANERR("failed to del old rules\n"); + return -EINVAL; + } + IPAWANDBG("deleted old UL rules\n"); + } + ret = ipa3_wwan_add_ul_flt_rule_to_ipa(); + } + if (ret) + IPAWANERR("failed to install UL rules\n"); + else + rmnet_ipa3_ctx->a7_ul_flt_set = true; + } + /* update Tx/Rx/Ext property */ + IPAWANDBG("update Tx/Rx/Ext property in IPA\n"); + if (rmnet_ipa3_ctx->rmnet_index == 0) { + IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n"); + return ret; + } + + ipa3_cleanup_deregister_intf(); + + for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) { + ret = ipa3_wwan_register_to_ipa(i); + if (ret < 0) { + IPAWANERR("failed to re-regist %s, mux %d, index %d\n", + rmnet_ipa3_ctx->mux_channel[i].vchannel_name, + rmnet_ipa3_ctx->mux_channel[i].mux_id, + i); + return -ENODEV; + } + IPAWANERR("dev(%s) has registered to IPA\n", + rmnet_ipa3_ctx->mux_channel[i].vchannel_name); + rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = true; + } + return ret; +} + +#ifdef INIT_COMPLETION +#define reinit_completion(x) INIT_COMPLETION(*(x)) +#endif /* INIT_COMPLETION */ + +static int __ipa_wwan_open(struct net_device *dev) +{ + struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev); + + IPAWANDBG("[%s] __wwan_open()\n", dev->name); + if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE) + reinit_completion(&wwan_ptr->resource_granted_completion); + wwan_ptr->device_status = WWAN_DEVICE_ACTIVE; + + if (ipa3_rmnet_res.ipa_napi_enable) + napi_enable(&(wwan_ptr->napi)); + return 0; +} + +/** + * wwan_open() - Opens the wwan network interface. Opens logical + * channel on A2 MUX driver and starts the network stack queue + * + * @dev: network device + * + * Return codes: + * 0: success + * -ENODEV: Error while opening logical channel on A2 MUX driver + */ +static int ipa3_wwan_open(struct net_device *dev) +{ + int rc = 0; + + IPAWANDBG("[%s] wwan_open()\n", dev->name); + rc = __ipa_wwan_open(dev); + if (rc == 0) + netif_start_queue(dev); + return rc; +} + +static int __ipa_wwan_close(struct net_device *dev) +{ + struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev); + int rc = 0; + + if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) { + wwan_ptr->device_status = WWAN_DEVICE_INACTIVE; + /* do not close wwan port once up, this causes + * remote side to hang if tried to open again + */ + reinit_completion(&wwan_ptr->resource_granted_completion); + rc = ipa3_deregister_intf(dev->name); + if (rc) { + IPAWANERR("[%s]: ipa3_deregister_intf failed %d\n", + dev->name, rc); + return rc; + } + return rc; + } else { + return -EBADF; + } +} + +/** + * ipa3_wwan_stop() - Stops the wwan network interface. Closes + * logical channel on A2 MUX driver and stops the network stack + * queue + * + * @dev: network device + * + * Return codes: + * 0: success + * -ENODEV: Error while opening logical channel on A2 MUX driver + */ +static int ipa3_wwan_stop(struct net_device *dev) +{ + struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev); + + IPAWANDBG("[%s]\n", dev->name); + __ipa_wwan_close(dev); + if (ipa3_rmnet_res.ipa_napi_enable) + napi_disable(&(wwan_ptr->napi)); + netif_stop_queue(dev); + return 0; +} + +static int ipa3_wwan_change_mtu(struct net_device *dev, int new_mtu) +{ + if (0 > new_mtu || WWAN_DATA_LEN < new_mtu) + return -EINVAL; + IPAWANDBG("[%s] MTU change: old=%d new=%d\n", + dev->name, dev->mtu, new_mtu); + dev->mtu = new_mtu; + return 0; +} + +/** + * ipa3_wwan_xmit() - Transmits an skb. + * + * @skb: skb to be transmitted + * @dev: network device + * + * Return codes: + * 0: success + * NETDEV_TX_BUSY: Error while transmitting the skb. Try again + * later + * -EFAULT: Error while transmitting the skb + */ +static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev) +{ + int ret = 0; + bool qmap_check; + struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev); + + if (skb->protocol != htons(ETH_P_MAP)) { + IPAWANDBG_LOW + ("SW filtering out none QMAP packet received from %s", + current->comm); + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + qmap_check = RMNET_MAP_GET_CD_BIT(skb); + if (netif_queue_stopped(dev)) { + if (qmap_check && + atomic_read(&wwan_ptr->outstanding_pkts) < + rmnet_ipa3_ctx->outstanding_high_ctl) { + pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name); + goto send; + } else { + pr_err("[%s]fatal: %s stopped\n", dev->name, __func__); + return NETDEV_TX_BUSY; + } + } + /* checking High WM hit */ + if (atomic_read(&wwan_ptr->outstanding_pkts) >= + rmnet_ipa3_ctx->outstanding_high) { + if (!qmap_check) { + IPAWANDBG_LOW("pending(%d)/(%d)- stop(%d)\n", + atomic_read(&wwan_ptr->outstanding_pkts), + rmnet_ipa3_ctx->outstanding_high, + netif_queue_stopped(dev)); + IPAWANDBG_LOW("qmap_chk(%d)\n", qmap_check); + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + } + +send: + /* IPA_RM checking start */ + if (ipa3_ctx->use_ipa_pm) { + /* activate the modem pm for clock scaling */ + ipa_pm_activate(rmnet_ipa3_ctx->q6_pm_hdl); + ret = ipa_pm_activate(rmnet_ipa3_ctx->pm_hdl); + } else { + ret = ipa_rm_inactivity_timer_request_resource( + IPA_RM_RESOURCE_WWAN_0_PROD); + } + if (ret == -EINPROGRESS) { + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + if (ret) { + pr_err("[%s] fatal: ipa rm timer request resource failed %d\n", + dev->name, ret); + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return -EFAULT; + } + /* IPA_RM checking end */ + + /* + * both data packets and command will be routed to + * IPA_CLIENT_Q6_WAN_CONS based on status configuration + */ + ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_PROD, skb, NULL); + if (ret) { + ret = NETDEV_TX_BUSY; + goto out; + } + + atomic_inc(&wwan_ptr->outstanding_pkts); + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + ret = NETDEV_TX_OK; +out: + if (atomic_read(&wwan_ptr->outstanding_pkts) == 0) { + if (ipa3_ctx->use_ipa_pm) { + ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->pm_hdl); + ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->q6_pm_hdl); + } else { + ipa_rm_inactivity_timer_release_resource( + IPA_RM_RESOURCE_WWAN_0_PROD); + } + } + return ret; +} + +static void ipa3_wwan_tx_timeout(struct net_device *dev) +{ + struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev); + + if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) + IPAWANERR("[%s] data stall in UL, %d outstanding\n", + dev->name, atomic_read(&wwan_ptr->outstanding_pkts)); +} + +/** + * apps_ipa_tx_complete_notify() - Rx notify + * + * @priv: driver context + * @evt: event type + * @data: data provided with event + * + * Check that the packet is the one we sent and release it + * This function will be called in defered context in IPA wq. + */ +static void apps_ipa_tx_complete_notify(void *priv, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + struct net_device *dev = (struct net_device *)priv; + struct ipa3_wwan_private *wwan_ptr; + + if (dev != IPA_NETDEV()) { + IPAWANDBG("Received pre-SSR packet completion\n"); + dev_kfree_skb_any(skb); + return; + } + + if (evt != IPA_WRITE_DONE) { + IPAWANERR("unsupported evt on Tx callback, Drop the packet\n"); + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return; + } + + wwan_ptr = netdev_priv(dev); + atomic_dec(&wwan_ptr->outstanding_pkts); + __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0)); + if (!atomic_read(&rmnet_ipa3_ctx->is_ssr) && + netif_queue_stopped(wwan_ptr->net) && + atomic_read(&wwan_ptr->outstanding_pkts) < + rmnet_ipa3_ctx->outstanding_low) { + IPAWANDBG_LOW("Outstanding low (%d) - waking up queue\n", + rmnet_ipa3_ctx->outstanding_low); + netif_wake_queue(wwan_ptr->net); + } + + if (atomic_read(&wwan_ptr->outstanding_pkts) == 0) { + if (ipa3_ctx->use_ipa_pm) { + ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->pm_hdl); + ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->q6_pm_hdl); + } else { + ipa_rm_inactivity_timer_release_resource( + IPA_RM_RESOURCE_WWAN_0_PROD); + } + } + __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0)); + dev_kfree_skb_any(skb); +} + +/** + * apps_ipa_packet_receive_notify() - Rx notify + * + * @priv: driver context + * @evt: event type + * @data: data provided with event + * + * IPA will pass a packet to the Linux network stack with skb->data + */ +static void apps_ipa_packet_receive_notify(void *priv, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct net_device *dev = (struct net_device *)priv; + + if (evt == IPA_RECEIVE) { + struct sk_buff *skb = (struct sk_buff *)data; + int result; + unsigned int packet_len = skb->len; + + IPAWANDBG_LOW("Rx packet was received"); + skb->dev = IPA_NETDEV(); + skb->protocol = htons(ETH_P_MAP); + + if (ipa3_rmnet_res.ipa_napi_enable) { + trace_rmnet_ipa_netif_rcv_skb3(dev->stats.rx_packets); + result = netif_receive_skb(skb); + } else { + if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH + == 0) { + trace_rmnet_ipa_netifni3(dev->stats.rx_packets); + result = netif_rx_ni(skb); + } else { + trace_rmnet_ipa_netifrx3(dev->stats.rx_packets); + result = netif_rx(skb); + } + } + + if (result) { + pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n", + __func__, __LINE__); + dev->stats.rx_dropped++; + } + dev->stats.rx_packets++; + dev->stats.rx_bytes += packet_len; + } else { + IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt); + } +} + +static int handle3_ingress_format(struct net_device *dev, + struct rmnet_ioctl_extended_s *in) +{ + int ret = 0; + struct ipa_sys_connect_params *ipa_wan_ep_cfg; + int ep_idx; + + IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n"); + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + if (ep_idx == IPA_EP_NOT_ALLOCATED) { + IPAWANDBG("Embedded datapath not supported\n"); + return -EFAULT; + } + + ipa_wan_ep_cfg = &rmnet_ipa3_ctx->ipa_to_apps_ep_cfg; + if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM) + ipa_wan_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en = + IPA_ENABLE_CS_OFFLOAD_DL; + + if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) { + IPAWANDBG("get AGG size %d count %d\n", + in->u.ingress_format.agg_size, + in->u.ingress_format.agg_count); + + ret = ipa_disable_apps_wan_cons_deaggr( + in->u.ingress_format.agg_size, + in->u.ingress_format.agg_count); + + if (!ret) { + ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_byte_limit = + in->u.ingress_format.agg_size; + ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit = + in->u.ingress_format.agg_count; + } + } + + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 4; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 1; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2; + + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = true; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_little_endian = 0; + ipa_wan_ep_cfg->ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000; + + ipa_wan_ep_cfg->client = IPA_CLIENT_APPS_WAN_CONS; + ipa_wan_ep_cfg->notify = apps_ipa_packet_receive_notify; + ipa_wan_ep_cfg->priv = dev; + + if (ipa3_rmnet_res.ipa_napi_enable) + ipa_wan_ep_cfg->napi_obj = &(rmnet_ipa3_ctx->wwan_priv->napi); + ipa_wan_ep_cfg->desc_fifo_sz = + ipa3_rmnet_res.wan_rx_desc_size * IPA_FIFO_ELEMENT_SIZE; + + mutex_lock(&rmnet_ipa3_ctx->pipe_handle_guard); + + if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) { + IPAWANDBG("In SSR sequence/recovery\n"); + mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); + return -EFAULT; + } + ret = ipa3_setup_sys_pipe(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg, + &rmnet_ipa3_ctx->ipa3_to_apps_hdl); + + mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); + + if (ret) + IPAWANERR("failed to configure ingress\n"); + + return ret; +} + +/** + * handle3_egress_format() - Egress data format configuration + * + * Setup IPA egress system pipe and Configure: + * header handling, checksum, de-aggregation and fifo size + * + * @dev: network device + * @e: egress configuration + */ +static int handle3_egress_format(struct net_device *dev, + struct rmnet_ioctl_extended_s *e) +{ + int rc; + struct ipa_sys_connect_params *ipa_wan_ep_cfg; + int ep_idx; + + IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n"); + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD); + if (ep_idx == IPA_EP_NOT_ALLOCATED) { + IPAWANDBG("Embedded datapath not supported\n"); + return -EFAULT; + } + + ipa_wan_ep_cfg = &rmnet_ipa3_ctx->apps_to_ipa_ep_cfg; + if ((e->u.data) & RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) { + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 8; + ipa_wan_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en = + IPA_ENABLE_CS_OFFLOAD_UL; + ipa_wan_ep_cfg->ipa_ep_cfg.cfg.cs_metadata_hdr_offset = 1; + } else { + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 4; + } + + if ((e->u.data) & RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION) { + IPAWANDBG("WAN UL Aggregation enabled\n"); + ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_DEAGGR; + ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr = IPA_QCMAP; + + ipa_wan_ep_cfg->ipa_ep_cfg.deaggr.packet_offset_valid = false; + + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2; + + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = + true; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = + IPA_HDR_PAD; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = + 2; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = + true; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = + 0; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_little_endian = + false; + } else { + IPAWANDBG("WAN UL Aggregation disabled\n"); + ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR; + } + + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1; + /* modem want offset at 0! */ + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 0; + + ipa_wan_ep_cfg->ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_WAN_PROD; + ipa_wan_ep_cfg->ipa_ep_cfg.mode.mode = IPA_BASIC; + + ipa_wan_ep_cfg->client = IPA_CLIENT_APPS_WAN_PROD; + ipa_wan_ep_cfg->notify = apps_ipa_tx_complete_notify; + ipa_wan_ep_cfg->desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ; + ipa_wan_ep_cfg->priv = dev; + + mutex_lock(&rmnet_ipa3_ctx->pipe_handle_guard); + if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) { + IPAWANDBG("In SSR sequence/recovery\n"); + mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); + return -EFAULT; + } + rc = ipa3_setup_sys_pipe( + ipa_wan_ep_cfg, &rmnet_ipa3_ctx->apps_to_ipa3_hdl); + if (rc) { + IPAWANERR("failed to config egress endpoint\n"); + mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); + return rc; + } + mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); + + if (rmnet_ipa3_ctx->num_q6_rules != 0) { + /* already got Q6 UL filter rules*/ + if (!ipa3_qmi_ctx->modem_cfg_emb_pipe_flt) { + /* prevent multi-threads accessing num_q6_rules */ + mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock); + rc = ipa3_wwan_add_ul_flt_rule_to_ipa(); + mutex_unlock( + &rmnet_ipa3_ctx->add_mux_channel_lock); + } + if (rc) + IPAWANERR("install UL rules failed\n"); + else + rmnet_ipa3_ctx->a7_ul_flt_set = true; + } else { + /* wait Q6 UL filter rules*/ + IPAWANDBG("no UL-rules\n"); + } + rmnet_ipa3_ctx->egress_set = true; + + return rc; +} + +/** + * ipa3_wwan_ioctl() - I/O control for wwan network driver. + * + * @dev: network device + * @ifr: ignored + * @cmd: cmd to be excecuded. can be one of the following: + * IPA_WWAN_IOCTL_OPEN - Open the network interface + * IPA_WWAN_IOCTL_CLOSE - Close the network interface + * + * Return codes: + * 0: success + * NETDEV_TX_BUSY: Error while transmitting the skb. Try again + * later + * -EFAULT: Error while transmitting the skb + */ +static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + int rc = 0; + int mru = 1000, epid = 1, mux_index, len; + struct ipa_msg_meta msg_meta; + struct ipa_wan_msg *wan_msg = NULL; + struct rmnet_ioctl_extended_s ext_ioctl_data; + struct rmnet_ioctl_data_s ioctl_data; + struct ipa3_rmnet_mux_val *mux_channel; + int rmnet_index; + uint32_t mux_id; + int8_t *v_name; + struct mutex *mux_mutex_ptr; + int wan_ep; + + IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd); + switch (cmd) { + /* Set Ethernet protocol */ + case RMNET_IOCTL_SET_LLP_ETHERNET: + break; + /* Set RAWIP protocol */ + case RMNET_IOCTL_SET_LLP_IP: + break; + /* Get link protocol */ + case RMNET_IOCTL_GET_LLP: + ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + /* Set QoS header enabled */ + case RMNET_IOCTL_SET_QOS_ENABLE: + return -EINVAL; + /* Set QoS header disabled */ + case RMNET_IOCTL_SET_QOS_DISABLE: + break; + /* Get QoS header state */ + case RMNET_IOCTL_GET_QOS: + ioctl_data.u.operation_mode = RMNET_MODE_NONE; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + /* Get operation mode */ + case RMNET_IOCTL_GET_OPMODE: + ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + /* Open transport port */ + case RMNET_IOCTL_OPEN: + break; + /* Close transport port */ + case RMNET_IOCTL_CLOSE: + break; + /* Flow enable */ + case RMNET_IOCTL_FLOW_ENABLE: + IPAWANERR("RMNET_IOCTL_FLOW_ENABLE not supported\n"); + rc = -EFAULT; + break; + /* Flow disable */ + case RMNET_IOCTL_FLOW_DISABLE: + IPAWANERR("RMNET_IOCTL_FLOW_DISABLE not supported\n"); + rc = -EFAULT; + break; + /* Set flow handle */ + case RMNET_IOCTL_FLOW_SET_HNDL: + break; + + /* Extended IOCTLs */ + case RMNET_IOCTL_EXTENDED: + if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; + IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n"); + if (copy_from_user(&ext_ioctl_data, + (u8 *)ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s))) { + IPAWANERR("failed to copy extended ioctl data\n"); + rc = -EFAULT; + break; + } + switch (ext_ioctl_data.extended_ioctl) { + /* Get features */ + case RMNET_IOCTL_GET_SUPPORTED_FEATURES: + IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n"); + ext_ioctl_data.u.data = + (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL | + RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT | + RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT); + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &ext_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* Set MRU */ + case RMNET_IOCTL_SET_MRU: + mru = ext_ioctl_data.u.data; + IPAWANDBG("get MRU size %d\n", + ext_ioctl_data.u.data); + break; + /* Get MRU */ + case RMNET_IOCTL_GET_MRU: + ext_ioctl_data.u.data = mru; + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &ext_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* GET SG support */ + case RMNET_IOCTL_GET_SG_SUPPORT: + ext_ioctl_data.u.data = + ipa3_rmnet_res.ipa_advertise_sg_support; + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &ext_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* Get endpoint ID */ + case RMNET_IOCTL_GET_EPID: + IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n"); + ext_ioctl_data.u.data = epid; + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &ext_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + if (copy_from_user(&ext_ioctl_data, + (u8 *)ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s))) { + IPAWANERR("copy extended ioctl data failed\n"); + rc = -EFAULT; + break; + } + IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n", + ext_ioctl_data.u.data); + break; + /* Endpoint pair */ + case RMNET_IOCTL_GET_EP_PAIR: + IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n"); + wan_ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + if (wan_ep == IPA_EP_NOT_ALLOCATED) { + IPAWANERR("Embedded datapath not supported\n"); + rc = -EFAULT; + break; + } + ext_ioctl_data.u.ipa_ep_pair.producer_pipe_num = + wan_ep; + + wan_ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD); + if (wan_ep == IPA_EP_NOT_ALLOCATED) { + IPAWANERR("Embedded datapath not supported\n"); + rc = -EFAULT; + break; + } + ext_ioctl_data.u.ipa_ep_pair.consumer_pipe_num = + wan_ep; + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &ext_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + if (copy_from_user(&ext_ioctl_data, + (u8 *)ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s))) { + IPAWANERR("copy extended ioctl data failed\n"); + rc = -EFAULT; + break; + } + IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n", + ext_ioctl_data.u.ipa_ep_pair.consumer_pipe_num, + ext_ioctl_data.u.ipa_ep_pair.producer_pipe_num); + break; + /* Get driver name */ + case RMNET_IOCTL_GET_DRIVER_NAME: + memcpy(&ext_ioctl_data.u.if_name, + IPA_NETDEV()->name, IFNAMSIZ); + ext_ioctl_data.u.if_name[IFNAMSIZ - 1] = '\0'; + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &ext_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* Add MUX ID */ + case RMNET_IOCTL_ADD_MUX_CHANNEL: + mux_id = ext_ioctl_data.u.rmnet_mux_val.mux_id; + mux_index = ipa3_find_mux_channel_index( + ext_ioctl_data.u.rmnet_mux_val.mux_id); + if (mux_index < MAX_NUM_OF_MUX_CHANNEL) { + IPAWANDBG("already setup mux(%d)\n", mux_id); + return rc; + } + mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock); + if (rmnet_ipa3_ctx->rmnet_index + >= MAX_NUM_OF_MUX_CHANNEL) { + IPAWANERR("Exceed mux_channel limit(%d)\n", + rmnet_ipa3_ctx->rmnet_index); + mutex_unlock( + &rmnet_ipa3_ctx->add_mux_channel_lock); + return -EFAULT; + } + ext_ioctl_data.u.rmnet_mux_val.vchannel_name + [IFNAMSIZ-1] = '\0'; + IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n", + ext_ioctl_data.u.rmnet_mux_val.mux_id, + ext_ioctl_data.u.rmnet_mux_val.vchannel_name); + /* cache the mux name and id */ + mux_channel = rmnet_ipa3_ctx->mux_channel; + rmnet_index = rmnet_ipa3_ctx->rmnet_index; + + mux_channel[rmnet_index].mux_id = + ext_ioctl_data.u.rmnet_mux_val.mux_id; + memcpy(mux_channel[rmnet_index].vchannel_name, + ext_ioctl_data.u.rmnet_mux_val.vchannel_name, + sizeof(mux_channel[rmnet_index] + .vchannel_name)); + mux_channel[rmnet_index].vchannel_name[ + IFNAMSIZ - 1] = '\0'; + + IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n", + mux_channel[rmnet_index].vchannel_name, + mux_channel[rmnet_index].mux_id, + rmnet_index); + /* check if UL filter rules coming*/ + v_name = + ext_ioctl_data.u.rmnet_mux_val.vchannel_name; + if (rmnet_ipa3_ctx->num_q6_rules != 0) { + mux_mutex_ptr = + &rmnet_ipa3_ctx->add_mux_channel_lock; + IPAWANERR("dev(%s) register to IPA\n", + v_name); + rc = ipa3_wwan_register_to_ipa( + rmnet_ipa3_ctx->rmnet_index); + if (rc < 0) { + IPAWANERR("device %s reg IPA failed\n", + v_name); + mutex_unlock(mux_mutex_ptr); + return -ENODEV; + } + mux_channel[rmnet_index].mux_channel_set = + true; + mux_channel[rmnet_index].ul_flt_reg = + true; + } else { + IPAWANDBG("dev(%s) haven't registered to IPA\n", + v_name); + mux_channel[rmnet_index].mux_channel_set = + true; + mux_channel[rmnet_index].ul_flt_reg = + false; + } + rmnet_ipa3_ctx->rmnet_index++; + mutex_unlock(&rmnet_ipa3_ctx->add_mux_channel_lock); + break; + case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT: + rc = handle3_egress_format(dev, &ext_ioctl_data); + break; + case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */ + rc = handle3_ingress_format(dev, &ext_ioctl_data); + break; + case RMNET_IOCTL_SET_XLAT_DEV_INFO: + wan_msg = kzalloc(sizeof(struct ipa_wan_msg), + GFP_KERNEL); + if (!wan_msg) + return -ENOMEM; + ext_ioctl_data.u.if_name[IFNAMSIZ-1] = '\0'; + len = sizeof(wan_msg->upstream_ifname) > + sizeof(ext_ioctl_data.u.if_name) ? + sizeof(ext_ioctl_data.u.if_name) : + sizeof(wan_msg->upstream_ifname); + strlcpy(wan_msg->upstream_ifname, + ext_ioctl_data.u.if_name, len); + wan_msg->upstream_ifname[len-1] = '\0'; + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = WAN_XLAT_CONNECT; + msg_meta.msg_len = sizeof(struct ipa_wan_msg); + rc = ipa3_send_msg(&msg_meta, wan_msg, + ipa3_wwan_msg_free_cb); + if (rc) { + IPAWANERR("Failed to send XLAT_CONNECT msg\n"); + kfree(wan_msg); + } + break; + /* Get agg count */ + case RMNET_IOCTL_GET_AGGREGATION_COUNT: + break; + /* Set agg count */ + case RMNET_IOCTL_SET_AGGREGATION_COUNT: + break; + /* Get agg size */ + case RMNET_IOCTL_GET_AGGREGATION_SIZE: + break; + /* Set agg size */ + case RMNET_IOCTL_SET_AGGREGATION_SIZE: + break; + /* Do flow control */ + case RMNET_IOCTL_FLOW_CONTROL: + break; + /* For legacy use */ + case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL: + break; + /* Get HW/SW map */ + case RMNET_IOCTL_GET_HWSW_MAP: + break; + /* Set RX Headroom */ + case RMNET_IOCTL_SET_RX_HEADROOM: + break; + default: + IPAWANERR("[%s] unsupported extended cmd[%d]", + dev->name, + ext_ioctl_data.extended_ioctl); + rc = -EINVAL; + } + break; + default: + IPAWANERR("[%s] unsupported cmd[%d]", + dev->name, cmd); + rc = -EINVAL; + } + return rc; +} + +static const struct net_device_ops ipa3_wwan_ops_ip = { + .ndo_open = ipa3_wwan_open, + .ndo_stop = ipa3_wwan_stop, + .ndo_start_xmit = ipa3_wwan_xmit, + .ndo_tx_timeout = ipa3_wwan_tx_timeout, + .ndo_do_ioctl = ipa3_wwan_ioctl, + .ndo_change_mtu = ipa3_wwan_change_mtu, + .ndo_set_mac_address = 0, + .ndo_validate_addr = 0, +}; + +/** + * wwan_setup() - Setups the wwan network driver. + * + * @dev: network device + * + * Return codes: + * None + */ + +static void ipa3_wwan_setup(struct net_device *dev) +{ + dev->netdev_ops = &ipa3_wwan_ops_ip; + ether_setup(dev); + /* set this after calling ether_setup */ + dev->header_ops = 0; /* No header */ + dev->type = ARPHRD_RAWIP; + dev->hard_header_len = 0; + dev->mtu = WWAN_DATA_LEN; + dev->addr_len = 0; + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + dev->needed_headroom = HEADROOM_FOR_QMAP; + dev->needed_tailroom = TAILROOM; + dev->watchdog_timeo = 1000; +} + +/* IPA_RM related functions start*/ +static void ipa3_q6_prod_rm_request_resource(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_request, + ipa3_q6_prod_rm_request_resource); +static void ipa3_q6_prod_rm_release_resource(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_release, + ipa3_q6_prod_rm_release_resource); + +static void ipa3_q6_prod_rm_request_resource(struct work_struct *work) +{ + int ret = 0; + + ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD); + if (ret < 0 && ret != -EINPROGRESS) { + IPAWANERR("ipa_rm_request_resource failed %d\n", ret); + return; + } +} + +static int ipa3_q6_rm_request_resource(void) +{ + queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq, + &ipa3_q6_con_rm_request, 0); + return 0; +} + +static void ipa3_q6_prod_rm_release_resource(struct work_struct *work) +{ + int ret = 0; + + ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD); + if (ret < 0 && ret != -EINPROGRESS) { + IPAWANERR("ipa_rm_release_resource failed %d\n", ret); + return; + } +} + + +static int ipa3_q6_rm_release_resource(void) +{ + queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq, + &ipa3_q6_con_rm_release, 0); + return 0; +} + + +static void ipa3_q6_rm_notify_cb(void *user_data, + enum ipa_rm_event event, + unsigned long data) +{ + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + IPAWANDBG_LOW("Q6_PROD GRANTED CB\n"); + break; + case IPA_RM_RESOURCE_RELEASED: + IPAWANDBG_LOW("Q6_PROD RELEASED CB\n"); + break; + default: + return; + } +} + +int ipa3_wwan_set_modem_state(struct wan_ioctl_notify_wan_state *state) +{ + if (!state) + return -EINVAL; + + if (!ipa_pm_is_used()) + return 0; + + if (state->up) + return ipa_pm_activate_sync(rmnet_ipa3_ctx->q6_teth_pm_hdl); + else + return ipa_pm_deactivate_sync(rmnet_ipa3_ctx->q6_teth_pm_hdl); +} + +/** + * ipa3_q6_register_pm - Register modem clients for PM + * + * This function will register 2 client with IPA PM to represent modem + * in clock scaling calculation: + * - "EMB MODEM" - this client will be activated with embedded traffic + - "TETH MODEM" - this client we be activated by IPACM on offload to + modem. +*/ +static int ipa3_q6_register_pm(void) +{ + int result; + struct ipa_pm_register_params pm_reg; + + memset(&pm_reg, 0, sizeof(pm_reg)); + pm_reg.name = "EMB MODEM"; + pm_reg.group = IPA_PM_GROUP_MODEM; + pm_reg.skip_clk_vote = true; + result = ipa_pm_register(&pm_reg, &rmnet_ipa3_ctx->q6_pm_hdl); + if (result) { + IPAERR("failed to create IPA PM client %d\n", result); + return result; + } + + pm_reg.name = "TETH MODEM"; + pm_reg.group = IPA_PM_GROUP_MODEM; + pm_reg.skip_clk_vote = true; + result = ipa_pm_register(&pm_reg, &rmnet_ipa3_ctx->q6_teth_pm_hdl); + if (result) { + IPAERR("failed to create IPA PM client %d\n", result); + return result; + } + + return 0; +} + +static void ipa3_q6_deregister_pm(void) +{ + ipa_pm_deactivate_sync(rmnet_ipa3_ctx->q6_pm_hdl); + ipa_pm_deregister(rmnet_ipa3_ctx->q6_pm_hdl); +} + +int ipa3_wwan_set_modem_perf_profile(int throughput) +{ + struct ipa_rm_perf_profile profile; + int ret; + + if (ipa3_ctx->use_ipa_pm) { + ret = ipa_pm_set_throughput(rmnet_ipa3_ctx->q6_pm_hdl, + throughput); + if (ret) + return ret; + ret = ipa_pm_set_throughput(rmnet_ipa3_ctx->q6_teth_pm_hdl, + throughput); + } else { + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = throughput; + ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD, + &profile); + } + + return ret; +} + +static int ipa3_q6_initialize_rm(void) +{ + struct ipa_rm_create_params create_params; + struct ipa_rm_perf_profile profile; + int result; + + /* Initialize IPA_RM workqueue */ + rmnet_ipa3_ctx->rm_q6_wq = create_singlethread_workqueue("clnt_req"); + if (!rmnet_ipa3_ctx->rm_q6_wq) + return -ENOMEM; + + memset(&create_params, 0, sizeof(create_params)); + create_params.name = IPA_RM_RESOURCE_Q6_PROD; + create_params.reg_params.notify_cb = &ipa3_q6_rm_notify_cb; + result = ipa_rm_create_resource(&create_params); + if (result) + goto create_rsrc_err1; + memset(&create_params, 0, sizeof(create_params)); + create_params.name = IPA_RM_RESOURCE_Q6_CONS; + create_params.release_resource = &ipa3_q6_rm_release_resource; + create_params.request_resource = &ipa3_q6_rm_request_resource; + result = ipa_rm_create_resource(&create_params); + if (result) + goto create_rsrc_err2; + /* add dependency*/ + result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (result) + goto add_dpnd_err; + /* setup Performance profile */ + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = 100; + result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD, + &profile); + if (result) + goto set_perf_err; + result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS, + &profile); + if (result) + goto set_perf_err; + return result; + +set_perf_err: + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_APPS_CONS); +add_dpnd_err: + result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); + if (result < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_CONS, result); +create_rsrc_err2: + result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); + if (result < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_PROD, result); +create_rsrc_err1: + destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq); + return result; +} + +void ipa3_q6_deinitialize_rm(void) +{ + int ret; + + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (ret < 0) + IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", + IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS, + ret); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); + if (ret < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_CONS, ret); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); + if (ret < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_PROD, ret); + + if (rmnet_ipa3_ctx->rm_q6_wq) + destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq); +} + +static void ipa3_wake_tx_queue(struct work_struct *work) +{ + if (IPA_NETDEV()) { + __netif_tx_lock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0)); + netif_wake_queue(IPA_NETDEV()); + __netif_tx_unlock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0)); + } +} + +/** + * ipa3_rm_resource_granted() - Called upon + * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped. + * + * @work: work object supplied ny workqueue + * + * Return codes: + * None + */ +static void ipa3_rm_resource_granted(void *dev) +{ + IPAWANDBG_LOW("Resource Granted - starting queue\n"); + schedule_work(&ipa3_tx_wakequeue_work); +} + +/** + * ipa3_rm_notify() - Callback function for RM events. Handles + * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events. + * IPA_RM_RESOURCE_GRANTED is handled in the context of shared + * workqueue. + * + * @dev: network device + * @event: IPA RM event + * @data: Additional data provided by IPA RM + * + * Return codes: + * None + */ +static void ipa3_rm_notify(void *dev, enum ipa_rm_event event, + unsigned long data) +{ + struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev); + + pr_debug("%s: event %d\n", __func__, event); + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) { + complete_all(&wwan_ptr->resource_granted_completion); + break; + } + ipa3_rm_resource_granted(dev); + break; + case IPA_RM_RESOURCE_RELEASED: + break; + default: + pr_err("%s: unknown event %d\n", __func__, event); + break; + } +} + +/* IPA_RM related functions end*/ + +static int ipa3_ssr_notifier_cb(struct notifier_block *this, + unsigned long code, + void *data); + +static struct notifier_block ipa3_ssr_notifier = { + .notifier_call = ipa3_ssr_notifier_cb, +}; + +static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev, + struct ipa3_rmnet_plat_drv_res *ipa_rmnet_drv_res) +{ + int result; + + ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ; + ipa_rmnet_drv_res->ipa_rmnet_ssr = + of_property_read_bool(pdev->dev.of_node, + "qcom,rmnet-ipa-ssr"); + pr_info("IPA SSR support = %s\n", + ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False"); + ipa_rmnet_drv_res->is_platform_type_msm = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-platform-type-msm"); + pr_info("IPA is_platform_type_msm = %s\n", + ipa_rmnet_drv_res->is_platform_type_msm ? "True" : "False"); + + ipa_rmnet_drv_res->ipa_advertise_sg_support = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-advertise-sg-support"); + pr_info("IPA SG support = %s\n", + ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False"); + + ipa_rmnet_drv_res->ipa_napi_enable = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-napi-enable"); + pr_info("IPA Napi Enable = %s\n", + ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False"); + + /* Get IPA WAN RX desc fifo size */ + result = of_property_read_u32(pdev->dev.of_node, + "qcom,wan-rx-desc-size", + &ipa_rmnet_drv_res->wan_rx_desc_size); + if (result) + pr_info("using default for wan-rx-desc-size = %u\n", + ipa_rmnet_drv_res->wan_rx_desc_size); + else + IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n", + ipa_rmnet_drv_res->wan_rx_desc_size); + + return 0; +} + +struct ipa3_rmnet_context ipa3_rmnet_ctx; +static int ipa3_wwan_probe(struct platform_device *pdev); +struct platform_device *m_pdev; + +static void ipa3_delayed_probe(struct work_struct *work) +{ + (void)ipa3_wwan_probe(m_pdev); +} + +static DECLARE_WORK(ipa3_scheduled_probe, ipa3_delayed_probe); + +static void ipa3_ready_cb(void *user_data) +{ + struct platform_device *pdev = (struct platform_device *)(user_data); + + m_pdev = pdev; + + IPAWANDBG("IPA ready callback has been triggered\n"); + + schedule_work(&ipa3_scheduled_probe); +} + +static void ipa_pm_wwan_pm_cb(void *p, enum ipa_pm_cb_event event) +{ + struct net_device *dev = (struct net_device *)p; + struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev); + + IPAWANDBG_LOW("event %d\n", event); + switch (event) { + case IPA_PM_CLIENT_ACTIVATED: + if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) { + complete_all(&wwan_ptr->resource_granted_completion); + break; + } + ipa3_rm_resource_granted(dev); + break; + default: + pr_err("%s: unknown event %d\n", __func__, event); + break; + } +} + +static int ipa3_wwan_register_netdev_pm_client(struct net_device *dev) +{ + int result; + struct ipa_pm_register_params pm_reg; + + memset(&pm_reg, 0, sizeof(pm_reg)); + pm_reg.name = IPA_NETDEV()->name; + pm_reg.user_data = dev; + pm_reg.callback = ipa_pm_wwan_pm_cb; + pm_reg.group = IPA_PM_GROUP_APPS; + result = ipa_pm_register(&pm_reg, &rmnet_ipa3_ctx->pm_hdl); + if (result) { + IPAERR("failed to create IPA PM client %d\n", result); + return result; + } + return 0; +} + +static void ipa3_wwan_deregister_netdev_pm_client(void) +{ + ipa_pm_deactivate_sync(rmnet_ipa3_ctx->pm_hdl); + ipa_pm_deregister(rmnet_ipa3_ctx->pm_hdl); +} + +static int ipa3_wwan_create_wwan_rm_resource(struct net_device *dev) +{ + struct ipa_rm_create_params ipa_rm_params; + struct ipa_rm_perf_profile profile; + int ret; + + memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params)); + ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD; + ipa_rm_params.reg_params.user_data = dev; + ipa_rm_params.reg_params.notify_cb = ipa3_rm_notify; + ret = ipa_rm_create_resource(&ipa_rm_params); + if (ret) { + pr_err("%s: unable to create resourse %d in IPA RM\n", + __func__, IPA_RM_RESOURCE_WWAN_0_PROD); + return ret; + } + ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_INACTIVITY_TIMER); + if (ret) { + pr_err("%s: ipa rm timer init failed %d on resourse %d\n", + __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD); + goto timer_init_err; + } + /* add dependency */ + ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (ret) + goto add_dpnd_err; + /* setup Performance profile */ + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; + ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD, + &profile); + if (ret) + goto set_perf_err; + + return 0; + +set_perf_err: + ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_RESOURCE_Q6_CONS); +add_dpnd_err: + ipa_rm_inactivity_timer_destroy( + IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */ +timer_init_err: + ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + return ret; +} + +static void ipa3_wwan_delete_wwan_rm_resource(void) +{ + int ret; + + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (ret < 0) + IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS, + ret); + ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD); + if (ret < 0) + IPAWANERR( + "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, ret); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + if (ret < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, ret); +} + +/** + * ipa3_wwan_probe() - Initialized the module and registers as a + * network interface to the network stack + * + * Note: In case IPA driver hasn't initialized already, the probe function + * will return immediately after registering a callback to be invoked when + * IPA driver initialization is complete. + * + * Return codes: + * 0: success + * -ENOMEM: No memory available + * -EFAULT: Internal error + */ +static int ipa3_wwan_probe(struct platform_device *pdev) +{ + int ret, i; + struct net_device *dev; + int wan_cons_ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + + pr_info("rmnet_ipa3 started initialization\n"); + + if (!ipa3_is_ready()) { + IPAWANDBG("IPA driver not ready, registering callback\n"); + ret = ipa_register_ipa_ready_cb(ipa3_ready_cb, (void *)pdev); + + /* + * If we received -EEXIST, IPA has initialized. So we need + * to continue the probing process. + */ + if (ret != -EEXIST) { + if (ret) + IPAWANERR("IPA CB reg failed - %d\n", ret); + return ret; + } + } + + ret = get_ipa_rmnet_dts_configuration(pdev, &ipa3_rmnet_res); + ipa3_rmnet_ctx.ipa_rmnet_ssr = ipa3_rmnet_res.ipa_rmnet_ssr; + + ret = ipa3_init_q6_smem(); + if (ret) { + IPAWANERR("ipa3_init_q6_smem failed\n"); + return ret; + } + + /* initialize tx/rx endpoint setup */ + memset(&rmnet_ipa3_ctx->apps_to_ipa_ep_cfg, 0, + sizeof(struct ipa_sys_connect_params)); + memset(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg, 0, + sizeof(struct ipa_sys_connect_params)); + + /* initialize ex property setup */ + rmnet_ipa3_ctx->num_q6_rules = 0; + rmnet_ipa3_ctx->old_num_q6_rules = 0; + rmnet_ipa3_ctx->rmnet_index = 0; + rmnet_ipa3_ctx->egress_set = false; + rmnet_ipa3_ctx->a7_ul_flt_set = false; + for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) + memset(&rmnet_ipa3_ctx->mux_channel[i], 0, + sizeof(struct ipa3_rmnet_mux_val)); + + /* start A7 QMI service/client */ + if (ipa3_rmnet_res.is_platform_type_msm) + /* Android platform loads uC */ + ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01); + else + /* LE platform not loads uC */ + ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01); + + /* construct default WAN RT tbl for IPACM */ + if (wan_cons_ep != IPA_EP_NOT_ALLOCATED) { + ret = ipa3_setup_a7_qmap_hdr(); + if (ret) + goto setup_a7_qmap_hdr_err; + ret = ipa3_setup_dflt_wan_rt_tables(); + if (ret) + goto setup_dflt_wan_rt_tables_err; + } + + if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) { + /* Start transport-driver fd ioctl for ipacm for first init */ + ret = ipa3_wan_ioctl_init(); + if (ret) + goto wan_ioctl_init_err; + } else { + /* Enable sending QMI messages after SSR */ + ipa3_wan_ioctl_enable_qmi_messages(); + } + + /* initialize wan-driver netdev */ + dev = alloc_netdev(sizeof(struct ipa3_wwan_private), + IPA_WWAN_DEV_NAME, + NET_NAME_UNKNOWN, + ipa3_wwan_setup); + if (!dev) { + IPAWANERR("no memory for netdev\n"); + ret = -ENOMEM; + goto alloc_netdev_err; + } + rmnet_ipa3_ctx->wwan_priv = netdev_priv(dev); + memset(rmnet_ipa3_ctx->wwan_priv, 0, + sizeof(*(rmnet_ipa3_ctx->wwan_priv))); + IPAWANDBG("wwan_ptr (private) = %pK", rmnet_ipa3_ctx->wwan_priv); + rmnet_ipa3_ctx->wwan_priv->net = dev; + atomic_set(&rmnet_ipa3_ctx->wwan_priv->outstanding_pkts, 0); + spin_lock_init(&rmnet_ipa3_ctx->wwan_priv->lock); + init_completion( + &rmnet_ipa3_ctx->wwan_priv->resource_granted_completion); + + if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) { + /* IPA_RM configuration starts */ + if (ipa3_ctx->use_ipa_pm) + ret = ipa3_q6_register_pm(); + else + ret = ipa3_q6_initialize_rm(); + if (ret) { + IPAWANERR("ipa3_q6_initialize_rm failed, ret: %d\n", + ret); + goto q6_init_err; + } + } + + if (ipa3_ctx->use_ipa_pm) + ret = ipa3_wwan_register_netdev_pm_client(dev); + else + ret = ipa3_wwan_create_wwan_rm_resource(dev); + if (ret) { + IPAWANERR("fail to create/register pm resources\n"); + goto fail_pm; + } + + /* Enable SG support in netdevice. */ + if (ipa3_rmnet_res.ipa_advertise_sg_support) + dev->hw_features |= NETIF_F_SG; + + if (ipa3_rmnet_res.ipa_napi_enable) + netif_napi_add(dev, &(rmnet_ipa3_ctx->wwan_priv->napi), + ipa3_rmnet_poll, NAPI_WEIGHT); + ret = register_netdev(dev); + if (ret) { + IPAWANERR("unable to register ipa_netdev %d rc=%d\n", + 0, ret); + goto set_perf_err; + } + + IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n", dev->name); + if (ret) { + IPAWANERR("default configuration failed rc=%d\n", + ret); + goto config_err; + } + + /* + * for IPA 4.0 offline charge is not needed and we need to prevent + * power collapse until IPA uC is loaded. + */ + atomic_set(&rmnet_ipa3_ctx->is_initialized, 1); + if (!atomic_read(&rmnet_ipa3_ctx->is_ssr) && ipa3_ctx->ipa_hw_type != + IPA_HW_v4_0) { + /* offline charging mode */ + ipa3_proxy_clk_unvote(); + } + atomic_set(&rmnet_ipa3_ctx->is_ssr, 0); + + IPAWANERR("rmnet_ipa completed initialization\n"); + return 0; +config_err: + if (ipa3_rmnet_res.ipa_napi_enable) + netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi)); + unregister_netdev(dev); +set_perf_err: + if (ipa3_ctx->use_ipa_pm) + ipa3_wwan_deregister_netdev_pm_client(); + else + ipa3_wwan_delete_wwan_rm_resource(); +fail_pm: + if (ipa3_ctx->use_ipa_pm) { + if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) + ipa3_q6_deregister_pm(); + } else { + if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) + ipa3_q6_deinitialize_rm(); + } +q6_init_err: + free_netdev(dev); + rmnet_ipa3_ctx->wwan_priv = NULL; +alloc_netdev_err: + ipa3_wan_ioctl_deinit(); +wan_ioctl_init_err: + if (wan_cons_ep != IPA_EP_NOT_ALLOCATED) + ipa3_del_dflt_wan_rt_tables(); +setup_dflt_wan_rt_tables_err: + if (wan_cons_ep != IPA_EP_NOT_ALLOCATED) + ipa3_del_a7_qmap_hdr(); +setup_a7_qmap_hdr_err: + ipa3_qmi_service_exit(); + atomic_set(&rmnet_ipa3_ctx->is_ssr, 0); + return ret; +} + +static int ipa3_wwan_remove(struct platform_device *pdev) +{ + int ret; + + IPAWANINFO("rmnet_ipa started deinitialization\n"); + mutex_lock(&rmnet_ipa3_ctx->pipe_handle_guard); + ret = ipa3_teardown_sys_pipe(rmnet_ipa3_ctx->ipa3_to_apps_hdl); + if (ret < 0) + IPAWANERR("Failed to teardown IPA->APPS pipe\n"); + else + rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1; + ret = ipa3_teardown_sys_pipe(rmnet_ipa3_ctx->apps_to_ipa3_hdl); + if (ret < 0) + IPAWANERR("Failed to teardown APPS->IPA pipe\n"); + else + rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1; + if (ipa3_rmnet_res.ipa_napi_enable) + netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi)); + mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); + unregister_netdev(IPA_NETDEV()); + if (ipa3_ctx->use_ipa_pm) + ipa3_wwan_deregister_netdev_pm_client(); + else + ipa3_wwan_delete_wwan_rm_resource(); + cancel_work_sync(&ipa3_tx_wakequeue_work); + cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work); + if (IPA_NETDEV()) + free_netdev(IPA_NETDEV()); + rmnet_ipa3_ctx->wwan_priv = NULL; + /* No need to remove wwan_ioctl during SSR */ + if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) + ipa3_wan_ioctl_deinit(); + if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS) != + IPA_EP_NOT_ALLOCATED) { + ipa3_del_dflt_wan_rt_tables(); + ipa3_del_a7_qmap_hdr(); + } + ipa3_del_mux_qmap_hdrs(); + if (!ipa3_qmi_ctx->modem_cfg_emb_pipe_flt) + ipa3_wwan_del_ul_flt_rule_to_ipa(); + ipa3_cleanup_deregister_intf(); + atomic_set(&rmnet_ipa3_ctx->is_initialized, 0); + IPAWANINFO("rmnet_ipa completed deinitialization\n"); + return 0; +} + +/** + * rmnet_ipa_ap_suspend() - suspend callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP suspend + * operation is invoked, usually by pressing a suspend button. + * + * Returns -EAGAIN to runtime_pm framework in case there are pending packets + * in the Tx queue. This will postpone the suspend operation until all the + * pending packets will be transmitted. + * + * In case there are no packets to send, releases the WWAN0_PROD entity. + * As an outcome, the number of IPA active clients should be decremented + * until IPA clocks can be gated. + */ +static int rmnet_ipa_ap_suspend(struct device *dev) +{ + struct net_device *netdev = IPA_NETDEV(); + struct ipa3_wwan_private *wwan_ptr; + int ret; + + IPAWANDBG("Enter...\n"); + + if (netdev == NULL) { + IPAWANERR("netdev is NULL.\n"); + ret = 0; + goto bail; + } + + netif_tx_lock_bh(netdev); + wwan_ptr = netdev_priv(netdev); + if (wwan_ptr == NULL) { + IPAWANERR("wwan_ptr is NULL.\n"); + ret = 0; + netif_tx_unlock_bh(netdev); + goto bail; + } + + /* Do not allow A7 to suspend in case there are outstanding packets */ + if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) { + IPAWANDBG("Outstanding packets, postponing AP suspend.\n"); + ret = -EAGAIN; + netif_tx_unlock_bh(netdev); + goto bail; + } + + /* Make sure that there is no Tx operation ongoing */ + netif_stop_queue(netdev); + netif_tx_unlock_bh(netdev); + if (ipa3_ctx->use_ipa_pm) + ipa_pm_deactivate_sync(rmnet_ipa3_ctx->pm_hdl); + else + ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + ret = 0; +bail: + IPAWANDBG("Exit with %d\n", ret); + return ret; +} + +/** + * rmnet_ipa_ap_resume() - resume callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP resume + * operation is invoked. + * + * Enables the network interface queue and returns success to the + * runtime_pm framework. + */ +static int rmnet_ipa_ap_resume(struct device *dev) +{ + struct net_device *netdev = IPA_NETDEV(); + + IPAWANDBG("Enter...\n"); + if (netdev) + netif_wake_queue(netdev); + IPAWANDBG("Exit\n"); + + return 0; +} + +static void ipa_stop_polling_stats(void) +{ + cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work); + ipa3_rmnet_ctx.polling_interval = 0; +} + +static const struct of_device_id rmnet_ipa_dt_match[] = { + {.compatible = "qcom,rmnet-ipa3"}, + {}, +}; +MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match); + +static const struct dev_pm_ops rmnet_ipa_pm_ops = { + .suspend_noirq = rmnet_ipa_ap_suspend, + .resume_noirq = rmnet_ipa_ap_resume, +}; + +static struct platform_driver rmnet_ipa_driver = { + .driver = { + .name = "rmnet_ipa3", + .pm = &rmnet_ipa_pm_ops, + .of_match_table = rmnet_ipa_dt_match, + }, + .probe = ipa3_wwan_probe, + .remove = ipa3_wwan_remove, +}; + +/** + * rmnet_ipa_send_ssr_notification(bool ssr_done) - send SSR notification + * + * This function sends the SSR notification before modem shutdown and + * after_powerup from SSR framework, to user-space module + */ +static void rmnet_ipa_send_ssr_notification(bool ssr_done) +{ + struct ipa_msg_meta msg_meta; + int rc; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + if (ssr_done) + msg_meta.msg_type = IPA_SSR_AFTER_POWERUP; + else + msg_meta.msg_type = IPA_SSR_BEFORE_SHUTDOWN; + rc = ipa_send_msg(&msg_meta, NULL, NULL); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + return; + } +} + +static int ipa3_ssr_notifier_cb(struct notifier_block *this, + unsigned long code, + void *data) +{ + if (!ipa3_rmnet_ctx.ipa_rmnet_ssr) + return NOTIFY_DONE; + + switch (code) { + case SUBSYS_BEFORE_SHUTDOWN: + IPAWANINFO("IPA received MPSS BEFORE_SHUTDOWN\n"); + /* send SSR before-shutdown notification to IPACM */ + rmnet_ipa_send_ssr_notification(false); + atomic_set(&rmnet_ipa3_ctx->is_ssr, 1); + ipa3_q6_pre_shutdown_cleanup(); + if (IPA_NETDEV()) + netif_stop_queue(IPA_NETDEV()); + ipa3_qmi_stop_workqueues(); + ipa3_wan_ioctl_stop_qmi_messages(); + ipa_stop_polling_stats(); + if (atomic_read(&rmnet_ipa3_ctx->is_initialized)) + platform_driver_unregister(&rmnet_ipa_driver); + imp_handle_modem_shutdown(); + if (atomic_read(&rmnet_ipa3_ctx->is_ssr) && + ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + ipa3_q6_post_shutdown_cleanup(); + ipa3_odl_pipe_cleanup(true); + IPAWANINFO("IPA BEFORE_SHUTDOWN handling is complete\n"); + break; + case SUBSYS_AFTER_SHUTDOWN: + IPAWANINFO("IPA Received MPSS AFTER_SHUTDOWN\n"); + if (atomic_read(&rmnet_ipa3_ctx->is_ssr) && + ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) + ipa3_q6_post_shutdown_cleanup(); + IPAWANINFO("IPA AFTER_SHUTDOWN handling is complete\n"); + break; + case SUBSYS_BEFORE_POWERUP: + IPAWANINFO("IPA received MPSS BEFORE_POWERUP\n"); + if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) + /* clean up cached QMI msg/handlers */ + ipa3_qmi_service_exit(); + /*hold a proxy vote for the modem*/ + ipa3_proxy_clk_vote(); + ipa3_reset_freeze_vote(); + IPAWANINFO("IPA BEFORE_POWERUP handling is complete\n"); + break; + case SUBSYS_AFTER_POWERUP: + IPAWANINFO("IPA received MPSS AFTER_POWERUP\n"); + if (!atomic_read(&rmnet_ipa3_ctx->is_initialized) && + atomic_read(&rmnet_ipa3_ctx->is_ssr)) + platform_driver_register(&rmnet_ipa_driver); + ipa3_odl_pipe_open(); + IPAWANINFO("IPA AFTER_POWERUP handling is complete\n"); + break; + default: + IPAWANDBG("Unsupported subsys notification, IPA received: %lu", + code); + break; + } + + IPAWANDBG_LOW("Exit\n"); + return NOTIFY_DONE; +} + +/** + * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa_send_msg + * @buff: pointer to buffer containing the message + * @len: message len + * @type: message type + * + * This function is invoked when ipa_send_msg is complete (Provided as a + * free function pointer along with the message). + */ +static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type) +{ + if (!buff) { + IPAWANERR("Null buffer\n"); + return; + } + + if (type != IPA_TETHERING_STATS_UPDATE_STATS && + type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS && + type != IPA_PER_CLIENT_STATS_CONNECT_EVENT && + type != IPA_PER_CLIENT_STATS_DISCONNECT_EVENT) { + IPAWANERR("Wrong type given. buff %pK type %d\n", + buff, type); + } + kfree(buff); +} + +/** + * rmnet_ipa_get_stats_and_update() - Gets pipe stats from Modem + * + * This function queries the IPA Modem driver for the pipe stats + * via QMI, and updates the user space IPA entity. + */ +static void rmnet_ipa_get_stats_and_update(void) +{ + struct ipa_get_data_stats_req_msg_v01 req; + struct ipa_get_data_stats_resp_msg_v01 *resp; + struct ipa_msg_meta msg_meta; + int rc; + + resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01), + GFP_KERNEL); + if (!resp) + return; + + memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01)); + memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01)); + + req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01; + + rc = ipa3_qmi_get_data_stats(&req, resp); + if (rc) { + IPAWANERR("ipa3_qmi_get_data_stats failed: %d\n", rc); + kfree(resp); + return; + } + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS; + msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01); + rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + kfree(resp); + return; + } +} + +/** + * tethering_stats_poll_queue() - Stats polling function + * @work - Work entry + * + * This function is scheduled periodically (per the interval) in + * order to poll the IPA Modem driver for the pipe stats. + */ +static void tethering_stats_poll_queue(struct work_struct *work) +{ + rmnet_ipa_get_stats_and_update(); + + /* Schedule again only if there's an active polling interval */ + if (ipa3_rmnet_ctx.polling_interval != 0) + schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, + msecs_to_jiffies(ipa3_rmnet_ctx.polling_interval*1000)); +} + +/** + * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem + * + * This function retrieves the data usage (used quota) from the IPA Modem driver + * via QMI, and updates IPA user space entity. + */ +static void rmnet_ipa_get_network_stats_and_update(void) +{ + struct ipa_get_apn_data_stats_req_msg_v01 req; + struct ipa_get_apn_data_stats_resp_msg_v01 *resp; + struct ipa_msg_meta msg_meta; + int rc; + + resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01), + GFP_KERNEL); + if (!resp) + return; + + memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01)); + memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01)); + + req.mux_id_list_valid = true; + req.mux_id_list_len = 1; + req.mux_id_list[0] = ipa3_rmnet_ctx.metered_mux_id; + + rc = ipa3_qmi_get_network_stats(&req, resp); + if (rc) { + IPAWANERR("ipa3_qmi_get_network_stats failed: %d\n", rc); + kfree(resp); + return; + } + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS; + msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01); + rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + kfree(resp); + return; + } +} + +/** + * rmnet_ipa_send_quota_reach_ind() - send quota_reach notification from + * IPA Modem + * This function sends the quota_reach indication from the IPA Modem driver + * via QMI, to user-space module + */ +static void rmnet_ipa_send_quota_reach_ind(void) +{ + struct ipa_msg_meta msg_meta; + int rc; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_QUOTA_REACH; + rc = ipa_send_msg(&msg_meta, NULL, NULL); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + return; + } +} + +/** + * rmnet_ipa3_poll_tethering_stats() - Tethering stats polling IOCTL handler + * @data - IOCTL data + * + * This function handles WAN_IOC_POLL_TETHERING_STATS. + * In case polling interval received is 0, polling will stop + * (If there's a polling in progress, it will allow it to finish), and then will + * fetch network stats, and update the IPA user space. + * + * Return codes: + * 0: Success + */ +int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data) +{ + ipa3_rmnet_ctx.polling_interval = data->polling_interval_secs; + + cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work); + + if (ipa3_rmnet_ctx.polling_interval == 0) { + ipa3_qmi_stop_data_qouta(); + rmnet_ipa_get_network_stats_and_update(); + rmnet_ipa_get_stats_and_update(); + return 0; + } + + schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0); + return 0; +} + +/** + * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler + * @data - IOCTL data + * + * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface. + * It translates the given interface name to the Modem MUX ID and + * sends the request of the quota to the IPA Modem driver via QMI. + * + * Return codes: + * 0: Success + * -EFAULT: Invalid interface name provided + * other: See ipa_qmi_set_data_quota + */ +static int rmnet_ipa3_set_data_quota_modem( + struct wan_ioctl_set_data_quota *data) +{ + u32 mux_id; + int index; + struct ipa_set_data_usage_quota_req_msg_v01 req; + + /* stop quota */ + if (!data->set_quota) + ipa3_qmi_stop_data_qouta(); + + /* prevent string buffer overflows */ + data->interface_name[IFNAMSIZ-1] = '\0'; + + index = find_vchannel_name_index(data->interface_name); + IPAWANERR("iface name %s, quota %lu\n", + data->interface_name, (unsigned long) data->quota_mbytes); + + if (index == MAX_NUM_OF_MUX_CHANNEL) { + IPAWANERR("%s is an invalid iface name\n", + data->interface_name); + return -ENODEV; + } + + mux_id = rmnet_ipa3_ctx->mux_channel[index].mux_id; + ipa3_rmnet_ctx.metered_mux_id = mux_id; + + memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01)); + req.apn_quota_list_valid = true; + req.apn_quota_list_len = 1; + req.apn_quota_list[0].mux_id = mux_id; + req.apn_quota_list[0].num_Mbytes = data->quota_mbytes; + + return ipa3_qmi_set_data_quota(&req); +} + +static int rmnet_ipa3_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data) +{ + struct ipa_set_wifi_quota wifi_quota; + int rc = 0; + + memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota)); + wifi_quota.set_quota = data->set_quota; + wifi_quota.quota_bytes = data->quota_mbytes; + IPAWANERR("iface name %s, quota %lu\n", + data->interface_name, (unsigned long) data->quota_mbytes); + + rc = ipa3_set_wlan_quota(&wifi_quota); + /* check if wlan-fw takes this quota-set */ + if (!wifi_quota.set_valid) + rc = -EFAULT; + return rc; +} + +/** + * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler + * @data - IOCTL data + * + * This function handles WAN_IOC_SET_DATA_QUOTA. + * It translates the given interface name to the Modem MUX ID and + * sends the request of the quota to the IPA Modem driver via QMI. + * + * Return codes: + * 0: Success + * -EFAULT: Invalid interface name provided + * other: See ipa_qmi_set_data_quota + */ +int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data) +{ + enum ipa_upstream_type upstream_type; + int rc = 0; + + /* prevent string buffer overflows */ + data->interface_name[IFNAMSIZ-1] = '\0'; + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->interface_name); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR("Wrong interface_name name %s\n", + data->interface_name); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + rc = rmnet_ipa3_set_data_quota_wifi(data); + if (rc) { + IPAWANERR("set quota on wifi failed\n"); + return rc; + } + } else { + rc = rmnet_ipa3_set_data_quota_modem(data); + if (rc) { + IPAWANERR("set quota on modem failed\n"); + return rc; + } + } + return rc; +} +/* rmnet_ipa_set_tether_client_pipe() - + * @data - IOCTL data + * + * This function handles WAN_IOC_SET_DATA_QUOTA. + * It translates the given interface name to the Modem MUX ID and + * sends the request of the quota to the IPA Modem driver via QMI. + * + * Return codes: + * 0: Success + * -EFAULT: Invalid interface name provided + * other: See ipa_qmi_set_data_quota + */ +int rmnet_ipa3_set_tether_client_pipe( + struct wan_ioctl_set_tether_client_pipe *data) +{ + int number, i; + + /* error checking if ul_src_pipe_len valid or not*/ + if (data->ul_src_pipe_len > QMI_IPA_MAX_PIPES_V01 || + data->ul_src_pipe_len < 0) { + IPAWANERR("UL src pipes %d exceeding max %d\n", + data->ul_src_pipe_len, + QMI_IPA_MAX_PIPES_V01); + return -EFAULT; + } + /* error checking if dl_dst_pipe_len valid or not*/ + if (data->dl_dst_pipe_len > QMI_IPA_MAX_PIPES_V01 || + data->dl_dst_pipe_len < 0) { + IPAWANERR("DL dst pipes %d exceeding max %d\n", + data->dl_dst_pipe_len, + QMI_IPA_MAX_PIPES_V01); + return -EFAULT; + } + + IPAWANDBG("client %d, UL %d, DL %d, reset %d\n", + data->ipa_client, + data->ul_src_pipe_len, + data->dl_dst_pipe_len, + data->reset_client); + number = data->ul_src_pipe_len; + for (i = 0; i < number; i++) { + IPAWANDBG("UL index-%d pipe %d\n", i, + data->ul_src_pipe_list[i]); + if (data->reset_client) + ipa3_set_client(data->ul_src_pipe_list[i], + 0, false); + else + ipa3_set_client(data->ul_src_pipe_list[i], + data->ipa_client, true); + } + number = data->dl_dst_pipe_len; + for (i = 0; i < number; i++) { + IPAWANDBG("DL index-%d pipe %d\n", i, + data->dl_dst_pipe_list[i]); + if (data->reset_client) + ipa3_set_client(data->dl_dst_pipe_list[i], + 0, false); + else + ipa3_set_client(data->dl_dst_pipe_list[i], + data->ipa_client, false); + } + return 0; +} + +static int rmnet_ipa3_query_tethering_stats_wifi( + struct wan_ioctl_query_tether_stats *data, bool reset) +{ + struct ipa_get_wdi_sap_stats *sap_stats; + int rc; + + sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats), + GFP_KERNEL); + if (!sap_stats) + return -ENOMEM; + + memset(sap_stats, 0, sizeof(struct ipa_get_wdi_sap_stats)); + + sap_stats->reset_stats = reset; + IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats); + + rc = ipa3_get_wlan_stats(sap_stats); + if (rc) { + IPAWANERR_RL("can't get ipa3_get_wlan_stats\n"); + kfree(sap_stats); + return rc; + } else if (data == NULL) { + IPAWANDBG("only reset wlan stats\n"); + kfree(sap_stats); + return 0; + } + + if (sap_stats->stats_valid) { + data->ipv4_tx_packets = sap_stats->ipv4_tx_packets; + data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes; + data->ipv4_rx_packets = sap_stats->ipv4_rx_packets; + data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes; + data->ipv6_tx_packets = sap_stats->ipv6_tx_packets; + data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes; + data->ipv6_rx_packets = sap_stats->ipv6_rx_packets; + data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes; + } + + IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n", + (unsigned long) data->ipv4_rx_packets, + (unsigned long) data->ipv6_rx_packets, + (unsigned long) data->ipv4_rx_bytes, + (unsigned long) data->ipv6_rx_bytes); + IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n", + (unsigned long) data->ipv4_tx_packets, + (unsigned long) data->ipv6_tx_packets, + (unsigned long) data->ipv4_tx_bytes, + (unsigned long) data->ipv6_tx_bytes); + + kfree(sap_stats); + return rc; +} + +static int rmnet_ipa3_query_tethering_stats_modem( + struct wan_ioctl_query_tether_stats *data, bool reset) +{ + struct ipa_get_data_stats_req_msg_v01 *req; + struct ipa_get_data_stats_resp_msg_v01 *resp; + int pipe_len, rc; + struct ipa_pipe_stats_info_type_v01 *stat_ptr; + + req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01), + GFP_KERNEL); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01), + GFP_KERNEL); + if (!resp) { + kfree(req); + return -ENOMEM; + } + memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01)); + memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01)); + + req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01; + if (reset) { + req->reset_stats_valid = true; + req->reset_stats = true; + IPAWANDBG("reset the pipe stats\n"); + } + + rc = ipa3_qmi_get_data_stats(req, resp); + if (rc) { + IPAWANERR("can't get ipa_qmi_get_data_stats\n"); + kfree(req); + kfree(resp); + return rc; + } else if (data == NULL) { + IPAWANDBG("only reset modem stats\n"); + kfree(req); + kfree(resp); + return 0; + } + + if (resp->dl_dst_pipe_stats_list_valid) { + for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len; + pipe_len++) { + stat_ptr = + &resp->dl_dst_pipe_stats_list[pipe_len]; + + IPAWANDBG_LOW("Check entry(%d) dl_dst_pipe(%d)\n", + pipe_len, + stat_ptr->pipe_index); + IPAWANDBG_LOW("dl_p_v4(%lu)v6(%lu)\n", + (unsigned long) stat_ptr->num_ipv4_packets, + (unsigned long) stat_ptr->num_ipv6_packets + ); + IPAWANDBG_LOW("dl_b_v4(%lu)v6(%lu)\n", + (unsigned long) stat_ptr->num_ipv4_bytes, + (unsigned long) stat_ptr->num_ipv6_bytes); + if (ipa_get_client_uplink( + stat_ptr->pipe_index) == false) { + if (data->ipa_client == ipa_get_client( + stat_ptr->pipe_index)) { + /* update the DL stats */ + data->ipv4_rx_packets += + stat_ptr->num_ipv4_packets; + data->ipv6_rx_packets += + stat_ptr->num_ipv6_packets; + data->ipv4_rx_bytes += + stat_ptr->num_ipv4_bytes; + data->ipv6_rx_bytes += + stat_ptr->num_ipv6_bytes; + } + } + } + } + IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n", + (unsigned long) data->ipv4_rx_packets, + (unsigned long) data->ipv6_rx_packets, + (unsigned long) data->ipv4_rx_bytes, + (unsigned long) data->ipv6_rx_bytes); + + if (resp->ul_src_pipe_stats_list_valid) { + for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len; + pipe_len++) { + stat_ptr = + &resp->ul_src_pipe_stats_list[pipe_len]; + IPAWANDBG_LOW("Check entry(%d) ul_dst_pipe(%d)\n", + pipe_len, + stat_ptr->pipe_index); + IPAWANDBG_LOW("ul_p_v4(%lu)v6(%lu)\n", + (unsigned long) stat_ptr->num_ipv4_packets, + (unsigned long) stat_ptr->num_ipv6_packets + ); + IPAWANDBG_LOW("ul_b_v4(%lu)v6(%lu)\n", + (unsigned long)stat_ptr->num_ipv4_bytes, + (unsigned long) stat_ptr->num_ipv6_bytes); + if (ipa_get_client_uplink( + stat_ptr->pipe_index) == true) { + if (data->ipa_client == ipa_get_client( + stat_ptr->pipe_index)) { + /* update the DL stats */ + data->ipv4_tx_packets += + stat_ptr->num_ipv4_packets; + data->ipv6_tx_packets += + stat_ptr->num_ipv6_packets; + data->ipv4_tx_bytes += + stat_ptr->num_ipv4_bytes; + data->ipv6_tx_bytes += + stat_ptr->num_ipv6_bytes; + } + } + } + } + IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n", + (unsigned long) data->ipv4_tx_packets, + (unsigned long) data->ipv6_tx_packets, + (unsigned long) data->ipv4_tx_bytes, + (unsigned long) data->ipv6_tx_bytes); + kfree(req); + kfree(resp); + return 0; +} + +static int rmnet_ipa3_query_tethering_stats_hw( + struct wan_ioctl_query_tether_stats *data, bool reset) +{ + int rc = 0; + struct ipa_quota_stats_all *con_stats; + + /* qet HW-stats */ + rc = ipa_get_teth_stats(); + if (rc) { + IPAWANDBG("ipa_get_teth_stats failed %d,\n", rc); + return rc; + } + + /* query DL stats */ + IPAWANDBG("reset the pipe stats? (%d)\n", reset); + con_stats = kzalloc(sizeof(*con_stats), GFP_KERNEL); + if (!con_stats) { + IPAWANERR("no memory\n"); + return -ENOMEM; + } + rc = ipa_query_teth_stats(IPA_CLIENT_Q6_WAN_PROD, con_stats, reset); + if (rc) { + IPAERR("IPA_CLIENT_Q6_WAN_PROD query failed %d,\n", rc); + kfree(con_stats); + return rc; + } + IPAWANDBG("wlan: v4_rx_p(%d) b(%lld) v6_rx_p(%d) b(%lld)\n", + con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv4_pkts, + con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv4_bytes, + con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv6_pkts, + con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv6_bytes); + + IPAWANDBG("usb: v4_rx_p(%d) b(%lld) v6_rx_p(%d) b(%lld)\n", + con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_pkts, + con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_bytes, + con_stats->client[IPA_CLIENT_USB_CONS].num_ipv6_pkts, + con_stats->client[IPA_CLIENT_USB_CONS].num_ipv6_bytes); + + /* update the DL stats */ + data->ipv4_rx_packets = + con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv4_pkts + + con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_pkts; + data->ipv6_rx_packets = + con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv6_pkts + + con_stats->client[IPA_CLIENT_USB_CONS].num_ipv6_pkts; + data->ipv4_rx_bytes = + con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv4_bytes + + con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_bytes; + data->ipv6_rx_bytes = + con_stats->client[IPA_CLIENT_WLAN1_CONS].num_ipv6_bytes + + con_stats->client[IPA_CLIENT_USB_CONS].num_ipv6_bytes; + + IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n", + (unsigned long) data->ipv4_rx_packets, + (unsigned long) data->ipv6_rx_packets, + (unsigned long) data->ipv4_rx_bytes, + (unsigned long) data->ipv6_rx_bytes); + + /* query USB UL stats */ + memset(con_stats, 0, sizeof(struct ipa_quota_stats_all)); + rc = ipa_query_teth_stats(IPA_CLIENT_USB_PROD, con_stats, reset); + if (rc) { + IPAERR("IPA_CLIENT_USB_PROD query failed %d\n", rc); + kfree(con_stats); + return rc; + } + + IPAWANDBG("usb: v4_tx_p(%d) b(%lld) v6_tx_p(%d) b(%lld)\n", + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_pkts, + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_bytes, + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_pkts, + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_bytes); + + /* update the USB UL stats */ + data->ipv4_tx_packets = + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_pkts; + data->ipv6_tx_packets = + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_pkts; + data->ipv4_tx_bytes = + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_bytes; + data->ipv6_tx_bytes = + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_bytes; + + /* query WLAN UL stats */ + memset(con_stats, 0, sizeof(struct ipa_quota_stats_all)); + rc = ipa_query_teth_stats(IPA_CLIENT_WLAN1_PROD, con_stats, reset); + if (rc) { + IPAERR("IPA_CLIENT_WLAN1_PROD query failed %d\n", rc); + kfree(con_stats); + return rc; + } + + IPAWANDBG("wlan: v4_tx_p(%d) b(%lld) v6_tx_p(%d) b(%lld)\n", + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_pkts, + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_bytes, + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_pkts, + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_bytes); + + /* update the wlan UL stats */ + data->ipv4_tx_packets += + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_pkts; + data->ipv6_tx_packets += + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_pkts; + data->ipv4_tx_bytes += + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv4_bytes; + data->ipv6_tx_bytes += + con_stats->client[IPA_CLIENT_Q6_WAN_CONS].num_ipv6_bytes; + + IPAWANDBG("v4_tx_p(%lu) v6_tx_p(%lu) v4_tx_b(%lu) v6_tx_b(%lu)\n", + (unsigned long) data->ipv4_tx_packets, + (unsigned long) data->ipv6_tx_packets, + (unsigned long) data->ipv4_tx_bytes, + (unsigned long) data->ipv6_tx_bytes); + kfree(con_stats); + return rc; +} + + +int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data, + bool reset) +{ + enum ipa_upstream_type upstream_type; + int rc = 0; + + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + data->tetherIface[IFNAMSIZ-1] = '\0'; + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->upstreamIface); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR(" Wrong upstreamIface name %s\n", + data->upstreamIface); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + IPAWANDBG_LOW(" query wifi-backhaul stats\n"); + rc = rmnet_ipa3_query_tethering_stats_wifi( + data, false); + if (rc) { + IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + } else { + IPAWANDBG_LOW(" query modem-backhaul stats\n"); + rc = rmnet_ipa3_query_tethering_stats_modem( + data, false); + if (rc) { + IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + } + return rc; +} + +int rmnet_ipa3_query_tethering_stats_all( + struct wan_ioctl_query_tether_stats_all *data) +{ + struct wan_ioctl_query_tether_stats tether_stats; + enum ipa_upstream_type upstream_type; + int rc = 0; + + memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats)); + + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->upstreamIface); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR(" Wrong upstreamIface name %s\n", + data->upstreamIface); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + IPAWANDBG_LOW(" query wifi-backhaul stats\n"); + rc = rmnet_ipa3_query_tethering_stats_wifi( + &tether_stats, data->reset_stats); + if (rc) { + IPAWANERR_RL( + "wlan WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + data->tx_bytes = tether_stats.ipv4_tx_bytes + + tether_stats.ipv6_tx_bytes; + data->rx_bytes = tether_stats.ipv4_rx_bytes + + tether_stats.ipv6_rx_bytes; + } else { + IPAWANDBG_LOW(" query modem-backhaul stats\n"); + tether_stats.ipa_client = data->ipa_client; + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0 || + !ipa3_ctx->hw_stats.enabled) { + IPAWANDBG("hw version %d,hw_stats.enabled %d\n", + ipa3_ctx->ipa_hw_type, + ipa3_ctx->hw_stats.enabled); + /* get modem stats from QMI */ + rc = rmnet_ipa3_query_tethering_stats_modem( + &tether_stats, data->reset_stats); + if (rc) { + IPAWANERR("modem QUERY_TETHER_STATS failed\n"); + return rc; + } + } else { + /* get modem stats from IPA-HW counters */ + rc = rmnet_ipa3_query_tethering_stats_hw( + &tether_stats, data->reset_stats); + if (rc) { + IPAWANERR("modem QUERY_TETHER_STATS failed\n"); + return rc; + } + } + data->tx_bytes = tether_stats.ipv4_tx_bytes + + tether_stats.ipv6_tx_bytes; + data->rx_bytes = tether_stats.ipv4_rx_bytes + + tether_stats.ipv6_rx_bytes; + } + return rc; +} + +int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data) +{ + enum ipa_upstream_type upstream_type; + int rc = 0; + + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->upstreamIface); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR(" Wrong upstreamIface name %s\n", + data->upstreamIface); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + IPAWANERR(" reset wifi-backhaul stats\n"); + rc = rmnet_ipa3_query_tethering_stats_wifi( + NULL, true); + if (rc) { + IPAWANERR("reset WLAN stats failed\n"); + return rc; + } + } else { + IPAWANERR(" reset modem-backhaul stats\n"); + rc = rmnet_ipa3_query_tethering_stats_modem( + NULL, true); + if (rc) { + IPAWANERR("reset MODEM stats failed\n"); + return rc; + } + } + return rc; +} + +/** + * ipa3_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota + * @mux_id - The MUX ID on which the quota has been reached + * + * This function broadcasts a Netlink event using the kobject of the + * rmnet_ipa interface in order to alert the user space that the quota + * on the specific interface which matches the mux_id has been reached. + * + */ +void ipa3_broadcast_quota_reach_ind(u32 mux_id, + enum ipa_upstream_type upstream_type) +{ + char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE]; + char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE]; + char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE]; + char *envp[IPA_UEVENT_NUM_EVNP] = { + alert_msg, iface_name_l, iface_name_m, NULL}; + int res; + int index; + + /* check upstream_type*/ + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR(" Wrong upstreamIface type %d\n", upstream_type); + return; + } else if (upstream_type == IPA_UPSTEAM_MODEM) { + index = ipa3_find_mux_channel_index(mux_id); + if (index == MAX_NUM_OF_MUX_CHANNEL) { + IPAWANERR("%u is an mux ID\n", mux_id); + return; + } + } + res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE, + "ALERT_NAME=%s", "quotaReachedAlert"); + if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) { + IPAWANERR("message too long (%d)", res); + return; + } + /* posting msg for L-release for CNE */ + if (upstream_type == IPA_UPSTEAM_MODEM) { + res = snprintf(iface_name_l, + IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "UPSTREAM=%s", + rmnet_ipa3_ctx->mux_channel[index].vchannel_name); + } else { + res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME); + } + if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) { + IPAWANERR("message too long (%d)", res); + return; + } + /* posting msg for M-release for CNE */ + if (upstream_type == IPA_UPSTEAM_MODEM) { + res = snprintf(iface_name_m, + IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "INTERFACE=%s", + rmnet_ipa3_ctx->mux_channel[index].vchannel_name); + } else { + res = snprintf(iface_name_m, + IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "INTERFACE=%s", + IPA_UPSTEAM_WLAN_IFACE_NAME); + } + if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) { + IPAWANERR("message too long (%d)", res); + return; + } + + IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n", + alert_msg, iface_name_l, iface_name_m); + kobject_uevent_env(&(IPA_NETDEV()->dev.kobj), + KOBJ_CHANGE, envp); + + rmnet_ipa_send_quota_reach_ind(); +} + +/** + * ipa3_q6_handshake_complete() - Perform operations once Q6 is up + * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR. + * + * This function is invoked once the handshake between the IPA AP driver + * and IPA Q6 driver is complete. At this point, it is possible to perform + * operations which can't be performed until IPA Q6 driver is up. + * + */ +void ipa3_q6_handshake_complete(bool ssr_bootup) +{ + /* It is required to recover the network stats after SSR recovery */ + if (ssr_bootup) { + /* + * In case the uC is required to be loaded by the Modem, + * the proxy vote will be removed only when uC loading is + * complete and indication is received by the AP. After SSR, + * uC is already loaded. Therefore, proxy vote can be removed + * once Modem init is complete. + */ + ipa3_proxy_clk_unvote(); + + /* send SSR power-up notification to IPACM */ + rmnet_ipa_send_ssr_notification(true); + + /* + * It is required to recover the network stats after + * SSR recovery + */ + rmnet_ipa_get_network_stats_and_update(); + } else { + /* + * To enable ipa power collapse we need to enable rpmh and uc + * handshake So that uc can do register retention. To enable + * this handshake we need to send the below message to rpmh + */ + ipa_pc_qmp_enable(); + } + + imp_handle_modem_ready(); +} + +static inline bool rmnet_ipa3_check_any_client_inited +( + enum ipacm_per_client_device_type device_type +) +{ + int i = 0; + struct ipa_tether_device_info *teth_ptr = NULL; + + for (; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) { + teth_ptr = &rmnet_ipa3_ctx->tether_device[device_type]; + + if (teth_ptr->lan_client[i].client_idx != -1 && + teth_ptr->lan_client[i].inited) { + IPAWANERR("Found client index: %d which is inited\n", + i); + return true; + } + } + + return false; +} + +static inline int rmnet_ipa3_get_lan_client_info +( + enum ipacm_per_client_device_type device_type, + uint8_t mac[] +) +{ + int i = 0; + struct ipa_tether_device_info *teth_ptr = NULL; + + IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n", + mac[0], mac[1], mac[2], + mac[3], mac[4], mac[5]); + + for (; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) { + teth_ptr = &rmnet_ipa3_ctx->tether_device[device_type]; + + if (memcmp( + teth_ptr->lan_client[i].mac, + mac, + IPA_MAC_ADDR_SIZE) == 0) { + IPAWANDBG("Matched client index: %d\n", i); + return i; + } + } + + return -EINVAL; +} + +static inline int rmnet_ipa3_delete_lan_client_info +( + enum ipacm_per_client_device_type device_type, + int lan_clnt_idx +) +{ + struct ipa_lan_client *lan_client = NULL; + int i; + struct ipa_tether_device_info *teth_ptr = NULL; + + /* Check if Device type is valid. */ + if (device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES || + device_type < 0) { + IPAWANERR("Invalid Device type: %d\n", device_type); + return -EINVAL; + } + + /* Check if the request is to clean up all clients. */ + teth_ptr = &rmnet_ipa3_ctx->tether_device[device_type]; + + if (lan_clnt_idx == 0xffffffff) { + /* Reset the complete device info. */ + memset(teth_ptr, 0, + sizeof(struct ipa_tether_device_info)); + teth_ptr->ul_src_pipe = -1; + for (i = 0; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) + teth_ptr->lan_client[i].client_idx = -1; + } else { + lan_client = &teth_ptr->lan_client[lan_clnt_idx]; + + /* Reset the client info before sending the message. */ + memset(lan_client, 0, sizeof(struct ipa_lan_client)); + lan_client->client_idx = -1; + + } + return 0; +} + +/* rmnet_ipa3_set_lan_client_info() - + * @data - IOCTL data + * + * This function handles WAN_IOC_SET_LAN_CLIENT_INFO. + * It is used to store LAN client information which + * is used to fetch the packet stats for a client. + * + * Return codes: + * 0: Success + * -EINVAL: Invalid args provided + */ +int rmnet_ipa3_set_lan_client_info( + struct wan_ioctl_lan_client_info *data) +{ + struct ipa_lan_client *lan_client = NULL; + struct ipa_tether_device_info *teth_ptr = NULL; + + + IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n", + data->mac[0], data->mac[1], data->mac[2], + data->mac[3], data->mac[4], data->mac[5]); + + /* Check if Device type is valid. */ + if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES || + data->device_type < 0) { + IPAWANERR("Invalid Device type: %d\n", data->device_type); + return -EINVAL; + } + + /* Check if Client index is valid. */ + if (data->client_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS || + data->client_idx < 0) { + IPAWANERR("Invalid Client Index: %d\n", data->client_idx); + return -EINVAL; + } + + mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard); + if (data->client_init) { + /* check if the client is already inited. */ + if (rmnet_ipa3_ctx->tether_device[data->device_type] + .lan_client[data->client_idx].inited) { + IPAWANERR("Client already inited: %d:%d\n", + data->device_type, data->client_idx); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + return -EINVAL; + } + } + + teth_ptr = &rmnet_ipa3_ctx->tether_device[data->device_type]; + lan_client = &teth_ptr->lan_client[data->client_idx]; + + memcpy(lan_client->mac, data->mac, IPA_MAC_ADDR_SIZE); + + lan_client->client_idx = data->client_idx; + + /* Update the Source pipe. */ + rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe = + ipa3_get_ep_mapping(data->ul_src_pipe); + + /* Update the header length if not set. */ + if (!rmnet_ipa3_ctx->tether_device[data->device_type].hdr_len) + rmnet_ipa3_ctx->tether_device[data->device_type].hdr_len = + data->hdr_len; + + lan_client->inited = true; + + rmnet_ipa3_ctx->tether_device[data->device_type].num_clients++; + + IPAWANDBG("Set the lan client info: %d, %d, %d\n", + lan_client->client_idx, + rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe, + rmnet_ipa3_ctx->tether_device[data->device_type].num_clients); + + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + + return 0; +} + +/* rmnet_ipa3_delete_lan_client_info() - + * @data - IOCTL data + * + * This function handles WAN_IOC_DELETE_LAN_CLIENT_INFO. + * It is used to delete LAN client information which + * is used to fetch the packet stats for a client. + * + * Return codes: + * 0: Success + * -EINVAL: Invalid args provided + */ +int rmnet_ipa3_clear_lan_client_info( + struct wan_ioctl_lan_client_info *data) +{ + struct ipa_lan_client *lan_client = NULL; + struct ipa_tether_device_info *teth_ptr = NULL; + + IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n", + data->mac[0], data->mac[1], data->mac[2], + data->mac[3], data->mac[4], data->mac[5]); + + /* Check if Device type is valid. */ + if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES || + data->device_type < 0) { + IPAWANERR("Invalid Device type: %d\n", data->device_type); + return -EINVAL; + } + + /* Check if Client index is valid. */ + if (data->client_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS || + data->client_idx < 0) { + IPAWANERR("Invalid Client Index: %d\n", data->client_idx); + return -EINVAL; + } + + teth_ptr = &rmnet_ipa3_ctx->tether_device[data->device_type]; + mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard); + lan_client = &teth_ptr->lan_client[data->client_idx]; + + if (!data->client_init) { + /* check if the client is already de-inited. */ + if (!lan_client->inited) { + IPAWANERR("Client already de-inited: %d:%d\n", + data->device_type, data->client_idx); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + return -EINVAL; + } + } + + lan_client->inited = false; + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + + return 0; +} + + +/* rmnet_ipa3_send_lan_client_msg() - + * @data - IOCTL data + * + * This function handles WAN_IOC_SEND_LAN_CLIENT_MSG. + * It is used to send LAN client information to IPACM. + * + * Return codes: + * 0: Success + * -EINVAL: Invalid args provided + */ +int rmnet_ipa3_send_lan_client_msg( + struct wan_ioctl_send_lan_client_msg *data) +{ + struct ipa_msg_meta msg_meta; + int rc; + struct ipa_lan_client_msg *lan_client; + + /* Notify IPACM to reset the client index. */ + lan_client = kzalloc(sizeof(struct ipa_lan_client_msg), + GFP_KERNEL); + if (!lan_client) { + IPAWANERR("Can't allocate memory for tether_info\n"); + return -ENOMEM; + } + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + memcpy(lan_client, &data->lan_client, + sizeof(struct ipa_lan_client_msg)); + msg_meta.msg_type = data->client_event; + msg_meta.msg_len = sizeof(struct ipa_lan_client_msg); + + rc = ipa_send_msg(&msg_meta, lan_client, rmnet_ipa_free_msg); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + kfree(lan_client); + return rc; + } + return 0; +} + +/* rmnet_ipa3_enable_per_client_stats() - + * @data - IOCTL data + * + * This function handles WAN_IOC_ENABLE_PER_CLIENT_STATS. + * It is used to indicate Q6 to start capturing per client stats. + * + * Return codes: + * 0: Success + * -EINVAL: Invalid args provided + */ +int rmnet_ipa3_enable_per_client_stats( + bool *data) +{ + struct ipa_enable_per_client_stats_req_msg_v01 *req; + struct ipa_enable_per_client_stats_resp_msg_v01 *resp; + int rc; + + req = + kzalloc(sizeof(struct ipa_enable_per_client_stats_req_msg_v01), + GFP_KERNEL); + if (!req) { + IPAWANERR("Can't allocate memory for stats message\n"); + return -ENOMEM; + } + resp = + kzalloc(sizeof(struct ipa_enable_per_client_stats_resp_msg_v01), + GFP_KERNEL); + if (!resp) { + IPAWANERR("Can't allocate memory for stats message\n"); + kfree(req); + return -ENOMEM; + } + memset(req, 0, + sizeof(struct ipa_enable_per_client_stats_req_msg_v01)); + memset(resp, 0, + sizeof(struct ipa_enable_per_client_stats_resp_msg_v01)); + + if (*data) + req->enable_per_client_stats = 1; + else + req->enable_per_client_stats = 0; + + rc = ipa3_qmi_enable_per_client_stats(req, resp); + if (rc) { + IPAWANERR("can't enable per client stats\n"); + kfree(req); + kfree(resp); + return rc; + } + + kfree(req); + kfree(resp); + return 0; +} + +int rmnet_ipa3_query_per_client_stats( + struct wan_ioctl_query_per_client_stats *data) +{ + struct ipa_get_stats_per_client_req_msg_v01 *req; + struct ipa_get_stats_per_client_resp_msg_v01 *resp; + int rc, lan_clnt_idx, lan_clnt_idx1, i; + struct ipa_lan_client *lan_client = NULL; + struct ipa_tether_device_info *teth_ptr = NULL; + + IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n", + data->client_info[0].mac[0], + data->client_info[0].mac[1], + data->client_info[0].mac[2], + data->client_info[0].mac[3], + data->client_info[0].mac[4], + data->client_info[0].mac[5]); + + /* Check if Device type is valid. */ + if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES || + data->device_type < 0) { + IPAWANERR("Invalid Device type: %d\n", data->device_type); + return -EINVAL; + } + + /* Check if num_clients is valid. */ + if (data->num_clients != IPA_MAX_NUM_HW_PATH_CLIENTS && + data->num_clients != 1) { + IPAWANERR("Invalid number of clients: %d\n", data->num_clients); + return -EINVAL; + } + + mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard); + + if (data->num_clients == 1) { + /* Check if the client info is valid.*/ + lan_clnt_idx1 = rmnet_ipa3_get_lan_client_info( + data->device_type, + data->client_info[0].mac); + if (lan_clnt_idx1 < 0) { + IPAWANERR("Client info not available return.\n"); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + return -EINVAL; + } + + teth_ptr = &rmnet_ipa3_ctx->tether_device[data->device_type]; + lan_client = &teth_ptr->lan_client[lan_clnt_idx1]; + + /* + * Check if disconnect flag is set and + * see if all the clients info are cleared. + */ + if (data->disconnect_clnt && + lan_client->inited) { + IPAWANERR("Client not inited. Try again.\n"); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + return -EAGAIN; + } + + } else { + /* Max number of clients. */ + /* Check if disconnect flag is set and + * see if all the clients info are cleared. + */ + if (data->disconnect_clnt && + rmnet_ipa3_check_any_client_inited(data->device_type)) { + IPAWANERR("CLient not inited. Try again.\n"); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + return -EAGAIN; + } + lan_clnt_idx1 = 0xffffffff; + } + + req = kzalloc(sizeof(struct ipa_get_stats_per_client_req_msg_v01), + GFP_KERNEL); + if (!req) { + IPAWANERR("Can't allocate memory for stats message\n"); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + return -ENOMEM; + } + resp = kzalloc(sizeof(struct ipa_get_stats_per_client_resp_msg_v01), + GFP_KERNEL); + if (!resp) { + IPAWANERR("Can't allocate memory for stats message\n"); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + kfree(req); + return -ENOMEM; + } + memset(req, 0, sizeof(struct ipa_get_stats_per_client_req_msg_v01)); + memset(resp, 0, sizeof(struct ipa_get_stats_per_client_resp_msg_v01)); + + if (data->reset_stats) { + req->reset_stats_valid = true; + req->reset_stats = true; + IPAWANDBG("fetch and reset the client stats\n"); + } + + req->client_id = lan_clnt_idx1; + req->src_pipe_id = + rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe; + + IPAWANDBG("fetch the client stats for %d, %d\n", req->client_id, + req->src_pipe_id); + + rc = ipa3_qmi_get_per_client_packet_stats(req, resp); + if (rc) { + IPAWANERR("can't get per client stats\n"); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + kfree(req); + kfree(resp); + return rc; + } + + if (resp->per_client_stats_list_valid) { + for (i = 0; i < resp->per_client_stats_list_len + && i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) { + /* Subtract the header bytes from the DL bytes. */ + data->client_info[i].ipv4_rx_bytes = + (resp->per_client_stats_list[i].num_dl_ipv4_bytes) - + (teth_ptr->hdr_len * + resp->per_client_stats_list[i].num_dl_ipv4_pkts); + /* UL header bytes are subtracted by Q6. */ + data->client_info[i].ipv4_tx_bytes = + resp->per_client_stats_list[i].num_ul_ipv4_bytes; + /* Subtract the header bytes from the DL bytes. */ + data->client_info[i].ipv6_rx_bytes = + (resp->per_client_stats_list[i].num_dl_ipv6_bytes) - + (teth_ptr->hdr_len * + resp->per_client_stats_list[i].num_dl_ipv6_pkts); + /* UL header bytes are subtracted by Q6. */ + data->client_info[i].ipv6_tx_bytes = + resp->per_client_stats_list[i].num_ul_ipv6_bytes; + + IPAWANDBG("tx_b_v4(%lu)v6(%lu)rx_b_v4(%lu) v6(%lu)\n", + (unsigned long) data->client_info[i].ipv4_tx_bytes, + (unsigned long) data->client_info[i].ipv6_tx_bytes, + (unsigned long) data->client_info[i].ipv4_rx_bytes, + (unsigned long) data->client_info[i].ipv6_rx_bytes); + + /* Get the lan client index. */ + lan_clnt_idx = resp->per_client_stats_list[i].client_id; + /* Check if lan_clnt_idx is valid. */ + if (lan_clnt_idx < 0 || + lan_clnt_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS) { + IPAWANERR("Lan client index not valid.\n"); + mutex_unlock( + &rmnet_ipa3_ctx->per_client_stats_guard); + kfree(req); + kfree(resp); + ipa_assert(); + return -EINVAL; + } + memcpy(data->client_info[i].mac, + teth_ptr->lan_client[lan_clnt_idx].mac, + IPA_MAC_ADDR_SIZE); + } + } + + if (data->disconnect_clnt) { + rmnet_ipa3_delete_lan_client_info(data->device_type, + lan_clnt_idx1); + } + + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + kfree(req); + kfree(resp); + return 0; +} + + + +#ifdef CONFIG_DEBUG_FS +static void rmnet_ipa_debugfs_init(void) +{ + const mode_t read_write_mode = 0664; + struct rmnet_ipa_debugfs *dbgfs = &rmnet_ipa3_ctx->dbgfs; + + + dbgfs->dent = debugfs_create_dir("rmnet_ipa", 0); + if (IS_ERR(dbgfs->dent)) { + pr_err("fail to create folder in debug_fs\n"); + return; + } + + dbgfs->dfile_outstanding_high = debugfs_create_u32("outstanding_high", + read_write_mode, dbgfs->dent, + &rmnet_ipa3_ctx->outstanding_high); + if (!dbgfs->dfile_outstanding_high || + IS_ERR(dbgfs->dfile_outstanding_high)) { + pr_err("failed to create file for outstanding_high\n"); + goto fail; + } + + dbgfs->dfile_outstanding_high_ctl = + debugfs_create_u32("outstanding_high_ctl", + read_write_mode, dbgfs->dent, + &rmnet_ipa3_ctx->outstanding_high_ctl); + if (!dbgfs->dfile_outstanding_high_ctl || + IS_ERR(dbgfs->dfile_outstanding_high_ctl)) { + pr_err("failed to create file for outstanding_high_ctl\n"); + goto fail; + } + + dbgfs->dfile_outstanding_low = debugfs_create_u32("outstanding_low", + read_write_mode, dbgfs->dent, + &rmnet_ipa3_ctx->outstanding_low); + if (!dbgfs->dfile_outstanding_low || + IS_ERR(dbgfs->dfile_outstanding_low)) { + pr_err("failed to create file for outstanding_low\n"); + goto fail; + } + + return; + +fail: + debugfs_remove_recursive(dbgfs->dent); + memset(dbgfs, 0, sizeof(struct rmnet_ipa_debugfs)); +} + +static void rmnet_ipa_debugfs_remove(void) +{ + if (IS_ERR(rmnet_ipa3_ctx->dbgfs.dent)) + return; + + debugfs_remove_recursive(rmnet_ipa3_ctx->dbgfs.dent); + memset(&rmnet_ipa3_ctx->dbgfs, 0, sizeof(struct rmnet_ipa_debugfs)); +} +#else /* CONFIG_DEBUG_FS */ +static void rmnet_ipa_debugfs_init(void){} +static void rmnet_ipa_debugfs_remove(void){} +#endif /* CONFIG_DEBUG_FS */ + + + +static int __init ipa3_wwan_init(void) +{ + int i, j; + struct ipa_tether_device_info *teth_ptr = NULL; + + rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL); + + if (!rmnet_ipa3_ctx) + return -ENOMEM; + + atomic_set(&rmnet_ipa3_ctx->is_initialized, 0); + atomic_set(&rmnet_ipa3_ctx->is_ssr, 0); + + mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard); + mutex_init(&rmnet_ipa3_ctx->add_mux_channel_lock); + mutex_init(&rmnet_ipa3_ctx->per_client_stats_guard); + /* Reset the Lan Stats. */ + for (i = 0; i < IPACM_MAX_CLIENT_DEVICE_TYPES; i++) { + teth_ptr = &rmnet_ipa3_ctx->tether_device[i]; + teth_ptr->ul_src_pipe = -1; + + for (j = 0; j < IPA_MAX_NUM_HW_PATH_CLIENTS; j++) + teth_ptr->lan_client[j].client_idx = -1; + } + rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1; + rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1; + + ipa3_qmi_init(); + + rmnet_ipa3_ctx->outstanding_high = OUTSTANDING_HIGH_DEFAULT; + rmnet_ipa3_ctx->outstanding_high_ctl = OUTSTANDING_HIGH_CTL_DEFAULT; + rmnet_ipa3_ctx->outstanding_low = OUTSTANDING_LOW_DEFAULT; + + rmnet_ipa_debugfs_init(); + + /* Register for Modem SSR */ + rmnet_ipa3_ctx->subsys_notify_handle = subsys_notif_register_notifier( + SUBSYS_MODEM, + &ipa3_ssr_notifier); + if (!IS_ERR(rmnet_ipa3_ctx->subsys_notify_handle)) + return platform_driver_register(&rmnet_ipa_driver); + else + return (int)PTR_ERR(rmnet_ipa3_ctx->subsys_notify_handle); +} + +static void __exit ipa3_wwan_cleanup(void) +{ + int ret; + + platform_driver_unregister(&rmnet_ipa_driver); + + ret = subsys_notif_unregister_notifier( + rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier); + if (ret) + IPAWANERR( + "Error subsys_notif_unregister_notifier system %s, ret=%d\n", + SUBSYS_MODEM, ret); + + rmnet_ipa_debugfs_remove(); + ipa3_qmi_cleanup(); + mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard); + mutex_destroy(&rmnet_ipa3_ctx->add_mux_channel_lock); + mutex_destroy(&rmnet_ipa3_ctx->per_client_stats_guard); + + kfree(rmnet_ipa3_ctx); + rmnet_ipa3_ctx = NULL; +} + +static void ipa3_wwan_msg_free_cb(void *buff, u32 len, u32 type) +{ + kfree(buff); +} + +static int ipa3_rmnet_poll(struct napi_struct *napi, int budget) +{ + int rcvd_pkts = 0; + + rcvd_pkts = ipa_rx_poll(rmnet_ipa3_ctx->ipa3_to_apps_hdl, + NAPI_WEIGHT); + IPAWANDBG_LOW("rcvd packets: %d\n", rcvd_pkts); + return rcvd_pkts; +} + +late_initcall(ipa3_wwan_init); +module_exit(ipa3_wwan_cleanup); +MODULE_DESCRIPTION("WWAN Network Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c new file mode 100644 index 000000000000..3d1a051ea7c3 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c @@ -0,0 +1,553 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_qmi_service.h" + +#define DRIVER_NAME "wwan_ioctl" + +#ifdef CONFIG_COMPAT +#define WAN_IOC_ADD_FLT_RULE32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ADD_FLT_RULE, \ + compat_uptr_t) +#define WAN_IOC_ADD_FLT_RULE_INDEX32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ADD_FLT_INDEX, \ + compat_uptr_t) +#define WAN_IOC_POLL_TETHERING_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_POLL_TETHERING_STATS, \ + compat_uptr_t) +#define WAN_IOC_SET_DATA_QUOTA32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_SET_DATA_QUOTA, \ + compat_uptr_t) +#define WAN_IOC_SET_TETHER_CLIENT_PIPE32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_SET_TETHER_CLIENT_PIPE, \ + compat_uptr_t) +#define WAN_IOC_QUERY_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_TETHER_STATS, \ + compat_uptr_t) +#define WAN_IOC_RESET_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_RESET_TETHER_STATS, \ + compat_uptr_t) +#define WAN_IOC_QUERY_DL_FILTER_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_DL_FILTER_STATS, \ + compat_uptr_t) +#define WAN_IOC_QUERY_TETHER_STATS_ALL32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_TETHER_STATS_ALL, \ + compat_uptr_t) +#define WAN_IOC_NOTIFY_WAN_STATE32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_NOTIFY_WAN_STATE, \ + compat_uptr_t) +#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \ + compat_uptr_t) +#define WAN_IOCTL_QUERY_PER_CLIENT_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_PER_CLIENT_STATS, \ + compat_uptr_t) +#define WAN_IOCTL_SET_LAN_CLIENT_INFO32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_SET_LAN_CLIENT_INFO, \ + compat_uptr_t) +#endif + +static unsigned int dev_num = 1; +static struct cdev ipa3_wan_ioctl_cdev; +static unsigned int ipa3_process_ioctl = 1; +static struct class *class; +static dev_t device; + +static long ipa3_wan_ioctl(struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int retval = 0, rc = 0; + u32 pyld_sz; + u8 *param = NULL; + + IPAWANDBG("device %s got ioctl events :>>>\n", + DRIVER_NAME); + + if (!ipa3_process_ioctl) { + + if ((cmd == WAN_IOC_SET_LAN_CLIENT_INFO) || + (cmd == WAN_IOC_CLEAR_LAN_CLIENT_INFO)) { + IPAWANDBG("Modem is in SSR\n"); + IPAWANDBG("Still allow IOCTL for exceptions (%d)\n", + cmd); + } else { + IPAWANERR("Modem is in SSR, ignoring ioctl (%d)\n", + cmd); + return -EAGAIN; + } + } + + switch (cmd) { + case WAN_IOC_ADD_FLT_RULE: + IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(struct ipa_install_fltr_rule_req_msg_v01); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + if (ipa3_qmi_filter_request_send( + (struct ipa_install_fltr_rule_req_msg_v01 *)param)) { + IPAWANDBG("IPACM->Q6 add filter rule failed\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_ADD_FLT_RULE_EX: + IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_EX :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + if (ipa3_qmi_filter_request_ex_send( + (struct ipa_install_fltr_rule_req_ex_msg_v01 *)param)) { + IPAWANDBG("IPACM->Q6 add filter rule failed\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_ADD_UL_FLT_RULE: + IPAWANDBG("device %s got WAN_IOC_UL_ADD_FLT_RULE :>>>\n", + DRIVER_NAME); + pyld_sz = + sizeof(struct ipa_configure_ul_firewall_rules_req_msg_v01); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + if (ipa3_qmi_ul_filter_request_send( + (struct ipa_configure_ul_firewall_rules_req_msg_v01 *) + param)) { + IPAWANDBG("IPACM->Q6 add ul filter rule failed\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_ADD_FLT_RULE_INDEX: + IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(struct ipa_fltr_installed_notif_req_msg_v01); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + if (ipa3_qmi_filter_notify_send( + (struct ipa_fltr_installed_notif_req_msg_v01 *)param)) { + IPAWANDBG("IPACM->Q6 rule index fail\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_VOTE_FOR_BW_MBPS: + IPAWANDBG("device %s got WAN_IOC_VOTE_FOR_BW_MBPS :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(uint32_t); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + if (ipa3_vote_for_bus_bw((uint32_t *)param)) { + IPAWANERR("Failed to vote for bus BW\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_POLL_TETHERING_STATS: + IPAWANDBG_LOW("got WAN_IOCTL_POLL_TETHERING_STATS :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_poll_tethering_stats); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + if (rmnet_ipa3_poll_tethering_stats( + (struct wan_ioctl_poll_tethering_stats *)param)) { + IPAWANERR_RL("WAN_IOCTL_POLL_TETHERING_STATS failed\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_SET_DATA_QUOTA: + IPAWANDBG_LOW("device %s got WAN_IOCTL_SET_DATA_QUOTA :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(struct wan_ioctl_set_data_quota); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + rc = rmnet_ipa3_set_data_quota( + (struct wan_ioctl_set_data_quota *)param); + if (rc != 0) { + IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n"); + if (rc == -ENODEV) + retval = -ENODEV; + else + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_SET_TETHER_CLIENT_PIPE: + IPAWANDBG_LOW("got WAN_IOC_SET_TETHER_CLIENT_PIPE :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_set_tether_client_pipe); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + if (rmnet_ipa3_set_tether_client_pipe( + (struct wan_ioctl_set_tether_client_pipe *)param)) { + IPAWANERR("WAN_IOC_SET_TETHER_CLIENT_PIPE failed\n"); + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_QUERY_TETHER_STATS: + IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_query_tether_stats); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + + if (rmnet_ipa3_query_tethering_stats( + (struct wan_ioctl_query_tether_stats *)param, false)) { + IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n"); + retval = -EFAULT; + break; + } + + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_QUERY_TETHER_STATS_ALL: + IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS_ALL :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_query_tether_stats_all); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + + if (rmnet_ipa3_query_tethering_stats_all( + (struct wan_ioctl_query_tether_stats_all *)param)) { + IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n"); + retval = -EFAULT; + break; + } + + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_RESET_TETHER_STATS: + IPAWANDBG_LOW("device %s got WAN_IOC_RESET_TETHER_STATS :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(struct wan_ioctl_reset_tether_stats); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + + if (rmnet_ipa3_reset_tethering_stats( + (struct wan_ioctl_reset_tether_stats *)param)) { + IPAWANERR("WAN_IOC_RESET_TETHER_STATS failed\n"); + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_NOTIFY_WAN_STATE: + IPAWANDBG_LOW("device %s got WAN_IOC_NOTIFY_WAN_STATE :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(struct wan_ioctl_notify_wan_state); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + + if (ipa3_wwan_set_modem_state( + (struct wan_ioctl_notify_wan_state *)param)) { + IPAWANERR("WAN_IOC_NOTIFY_WAN_STATE failed\n"); + retval = -EFAULT; + break; + } + + break; + case WAN_IOC_ENABLE_PER_CLIENT_STATS: + IPAWANDBG_LOW("got WAN_IOC_ENABLE_PER_CLIENT_STATS :>>>\n"); + pyld_sz = sizeof(bool); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + if (rmnet_ipa3_enable_per_client_stats( + (bool *)param)) { + IPAWANERR("WAN_IOC_ENABLE_PER_CLIENT_STATS failed\n"); + retval = -EFAULT; + break; + } + break; + case WAN_IOC_QUERY_PER_CLIENT_STATS: + IPAWANDBG_LOW("got WAN_IOC_QUERY_PER_CLIENT_STATS :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_query_per_client_stats); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + + retval = rmnet_ipa3_query_per_client_stats( + (struct wan_ioctl_query_per_client_stats *)param); + if (retval) { + IPAWANERR("WAN_IOC_QUERY_PER_CLIENT_STATS failed\n"); + break; + } + + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_SET_LAN_CLIENT_INFO: + IPAWANDBG_LOW("got WAN_IOC_SET_LAN_CLIENT_INFO :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_lan_client_info); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + if (rmnet_ipa3_set_lan_client_info( + (struct wan_ioctl_lan_client_info *)param)) { + IPAWANERR("WAN_IOC_SET_LAN_CLIENT_INFO failed\n"); + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_CLEAR_LAN_CLIENT_INFO: + IPAWANDBG_LOW("got WAN_IOC_CLEAR_LAN_CLIENT_INFO :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_lan_client_info); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + if (rmnet_ipa3_clear_lan_client_info( + (struct wan_ioctl_lan_client_info *)param)) { + IPAWANERR("WAN_IOC_CLEAR_LAN_CLIENT_INFO failed\n"); + retval = -EFAULT; + break; + } + break; + + + case WAN_IOC_SEND_LAN_CLIENT_MSG: + IPAWANDBG_LOW("got WAN_IOC_SEND_LAN_CLIENT_MSG :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_send_lan_client_msg); + param = memdup_user((const void __user *)arg, pyld_sz); + if (IS_ERR(param)) { + retval = PTR_ERR(param); + break; + } + if (rmnet_ipa3_send_lan_client_msg( + (struct wan_ioctl_send_lan_client_msg *) + param)) { + IPAWANERR("IOC_SEND_LAN_CLIENT_MSG failed\n"); + retval = -EFAULT; + break; + } + break; + + default: + retval = -ENOTTY; + } + kfree(param); + return retval; +} + +#ifdef CONFIG_COMPAT +long ipa3_compat_wan_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case WAN_IOC_ADD_FLT_RULE32: + cmd = WAN_IOC_ADD_FLT_RULE; + break; + case WAN_IOC_ADD_FLT_RULE_INDEX32: + cmd = WAN_IOC_ADD_FLT_RULE_INDEX; + break; + case WAN_IOC_POLL_TETHERING_STATS32: + cmd = WAN_IOC_POLL_TETHERING_STATS; + break; + case WAN_IOC_SET_DATA_QUOTA32: + cmd = WAN_IOC_SET_DATA_QUOTA; + break; + case WAN_IOC_SET_TETHER_CLIENT_PIPE32: + cmd = WAN_IOC_SET_TETHER_CLIENT_PIPE; + break; + case WAN_IOC_QUERY_TETHER_STATS32: + cmd = WAN_IOC_QUERY_TETHER_STATS; + break; + case WAN_IOC_RESET_TETHER_STATS32: + cmd = WAN_IOC_RESET_TETHER_STATS; + break; + case WAN_IOC_QUERY_DL_FILTER_STATS32: + cmd = WAN_IOC_QUERY_DL_FILTER_STATS; + break; + default: + return -ENOIOCTLCMD; + } + return ipa3_wan_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); +} +#endif + +static int ipa3_wan_ioctl_open(struct inode *inode, struct file *filp) +{ + IPAWANDBG("\n IPA A7 ipa3_wan_ioctl open OK :>>>> "); + return 0; +} + +const struct file_operations rmnet_ipa3_fops = { + .owner = THIS_MODULE, + .open = ipa3_wan_ioctl_open, + .read = NULL, + .unlocked_ioctl = ipa3_wan_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ipa3_compat_wan_ioctl, +#endif +}; + +int ipa3_wan_ioctl_init(void) +{ + unsigned int wan_ioctl_major = 0; + int ret; + struct device *dev; + + device = MKDEV(wan_ioctl_major, 0); + + ret = alloc_chrdev_region(&device, 0, dev_num, DRIVER_NAME); + if (ret) { + IPAWANERR(":device_alloc err.\n"); + goto dev_alloc_err; + } + wan_ioctl_major = MAJOR(device); + + class = class_create(THIS_MODULE, DRIVER_NAME); + if (IS_ERR(class)) { + IPAWANERR(":class_create err.\n"); + goto class_err; + } + + dev = device_create(class, NULL, device, + NULL, DRIVER_NAME); + if (IS_ERR(dev)) { + IPAWANERR(":device_create err.\n"); + goto device_err; + } + + cdev_init(&ipa3_wan_ioctl_cdev, &rmnet_ipa3_fops); + ret = cdev_add(&ipa3_wan_ioctl_cdev, device, dev_num); + if (ret) { + IPAWANERR(":cdev_add err.\n"); + goto cdev_add_err; + } + + ipa3_process_ioctl = 1; + + IPAWANDBG("IPA %s major(%d) initial ok :>>>>\n", + DRIVER_NAME, wan_ioctl_major); + return 0; + +cdev_add_err: + device_destroy(class, device); +device_err: + class_destroy(class); +class_err: + unregister_chrdev_region(device, dev_num); +dev_alloc_err: + return -ENODEV; +} + +void ipa3_wan_ioctl_stop_qmi_messages(void) +{ + ipa3_process_ioctl = 0; +} + +void ipa3_wan_ioctl_enable_qmi_messages(void) +{ + ipa3_process_ioctl = 1; +} + +void ipa3_wan_ioctl_deinit(void) +{ + cdev_del(&ipa3_wan_ioctl_cdev); + device_destroy(class, device); + class_destroy(class); + unregister_chrdev_region(device, dev_num); +} diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c new file mode 100644 index 000000000000..de6143e1a555 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" + +#define TETH_BRIDGE_DRV_NAME "ipa_tethering_bridge" + +#define TETH_DBG(fmt, args...) \ + pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args) +#define TETH_DBG_FUNC_ENTRY() \ + pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d ENTRY\n", __func__, __LINE__) +#define TETH_DBG_FUNC_EXIT() \ + pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d EXIT\n", __func__, __LINE__) +#define TETH_ERR(fmt, args...) \ + pr_err(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) + +/** + * struct ipa3_teth_bridge_ctx - Tethering bridge driver context information + * @class: kernel class pointer + * @dev_num: kernel device number + * @dev: kernel device struct pointer + * @cdev: kernel character device struct + */ +struct ipa3_teth_bridge_ctx { + struct class *class; + dev_t dev_num; + struct device *dev; + struct cdev cdev; + u32 modem_pm_hdl; +}; +static struct ipa3_teth_bridge_ctx *ipa3_teth_ctx; + +/** + * teth_bridge_ipa_cb() - Callback to handle IPA data path events + * @priv - private data + * @evt - event type + * @data - event specific data (usually skb) + * + * This callback is called by IPA driver for exception packets from USB. + * All exception packets are handled by Q6 and should not reach this function. + * Packets will arrive to AP exception pipe only in case where packets are + * sent from USB before Q6 has setup the call. + */ +static void teth_bridge_ipa_cb(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + + TETH_DBG_FUNC_ENTRY(); + if (evt != IPA_RECEIVE) { + TETH_ERR("unexpected event %d\n", evt); + WARN_ON(1); + return; + } + + TETH_ERR("Unexpected exception packet from USB, dropping packet\n"); + dev_kfree_skb_any(skb); + TETH_DBG_FUNC_EXIT(); +} + +/** + * ipa3_teth_bridge_init() - Initialize the Tethering bridge driver + * @params - in/out params for USB initialization API (please look at struct + * definition for more info) + * + * USB driver gets a pointer to a callback function (usb_notify_cb) and an + * associated data. + * + * Builds IPA resource manager dependency graph. + * + * Return codes: 0: success, + * -EINVAL - Bad parameter + * Other negative value - Failure + */ +int ipa3_teth_bridge_init(struct teth_bridge_init_params *params) +{ + TETH_DBG_FUNC_ENTRY(); + + if (!params) { + TETH_ERR("Bad parameter\n"); + TETH_DBG_FUNC_EXIT(); + return -EINVAL; + } + + params->usb_notify_cb = teth_bridge_ipa_cb; + params->private_data = NULL; + params->skip_ep_cfg = true; + + TETH_DBG_FUNC_EXIT(); + return 0; +} + +/** + * ipa3_teth_bridge_disconnect() - Disconnect tethering bridge module + */ +int ipa3_teth_bridge_disconnect(enum ipa_client_type client) +{ + int res = 0; + + TETH_DBG_FUNC_ENTRY(); + if (ipa_pm_is_used()) { + res = ipa_pm_deactivate_sync(ipa3_teth_ctx->modem_pm_hdl); + if (res) { + TETH_ERR("fail to deactivate modem %d\n", res); + return res; + } + res = ipa_pm_deregister(ipa3_teth_ctx->modem_pm_hdl); + ipa3_teth_ctx->modem_pm_hdl = ~0; + } else { + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_Q6_CONS); + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_USB_CONS); + } + TETH_DBG_FUNC_EXIT(); + + return res; +} + +/** + * ipa3_teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call + * @connect_params: Connection info + * + * Return codes: 0: success + * -EINVAL: invalid parameters + * -EPERM: Operation not permitted as the bridge is already + * connected + */ +int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params) +{ + int res = 0; + struct ipa_pm_register_params reg_params; + + memset(®_params, 0, sizeof(reg_params)); + + TETH_DBG_FUNC_ENTRY(); + + if (ipa_pm_is_used()) { + reg_params.name = "MODEM (USB RMNET)"; + reg_params.group = IPA_PM_GROUP_MODEM; + reg_params.skip_clk_vote = true; + res = ipa_pm_register(®_params, + &ipa3_teth_ctx->modem_pm_hdl); + if (res) { + TETH_ERR("fail to register with PM %d\n", res); + return res; + } + + res = ipa_pm_activate_sync(ipa3_teth_ctx->modem_pm_hdl); + goto bail; + } + + /* Build the dependency graph, first add_dependency call is sync + * in order to make sure the IPA clocks are up before we continue + * and notify the USB driver it may continue. + */ + res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res < 0) { + TETH_ERR("ipa_rm_add_dependency() failed.\n"); + goto bail; + } + + /* this add_dependency call can't be sync since it will block until USB + * status is connected (which can happen only after the tethering + * bridge is connected), the clocks are already up so the call doesn't + * need to block. + */ + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_USB_CONS); + if (res < 0 && res != -EINPROGRESS) { + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_Q6_CONS); + TETH_ERR("ipa_rm_add_dependency() failed.\n"); + goto bail; + } + + res = 0; + +bail: + TETH_DBG_FUNC_EXIT(); + return res; +} + +static long ipa3_teth_bridge_ioctl(struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + IPAERR("No ioctls are supported!\n"); + return -ENOIOCTLCMD; +} + +static const struct file_operations ipa3_teth_bridge_drv_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = ipa3_teth_bridge_ioctl, +}; + +/** + * ipa3_teth_bridge_driver_init() - Initialize tethering bridge driver + * + */ +int ipa3_teth_bridge_driver_init(void) +{ + int res; + + TETH_DBG("Tethering bridge driver init\n"); + ipa3_teth_ctx = kzalloc(sizeof(*ipa3_teth_ctx), GFP_KERNEL); + if (!ipa3_teth_ctx) + return -ENOMEM; + + ipa3_teth_ctx->class = class_create(THIS_MODULE, TETH_BRIDGE_DRV_NAME); + + res = alloc_chrdev_region(&ipa3_teth_ctx->dev_num, 0, 1, + TETH_BRIDGE_DRV_NAME); + if (res) { + TETH_ERR("alloc_chrdev_region err.\n"); + res = -ENODEV; + goto fail_alloc_chrdev_region; + } + + ipa3_teth_ctx->dev = device_create(ipa3_teth_ctx->class, + NULL, + ipa3_teth_ctx->dev_num, + ipa3_teth_ctx, + TETH_BRIDGE_DRV_NAME); + if (IS_ERR(ipa3_teth_ctx->dev)) { + TETH_ERR(":device_create err.\n"); + res = -ENODEV; + goto fail_device_create; + } + + cdev_init(&ipa3_teth_ctx->cdev, &ipa3_teth_bridge_drv_fops); + ipa3_teth_ctx->cdev.owner = THIS_MODULE; + ipa3_teth_ctx->cdev.ops = &ipa3_teth_bridge_drv_fops; + + res = cdev_add(&ipa3_teth_ctx->cdev, ipa3_teth_ctx->dev_num, 1); + if (res) { + TETH_ERR(":cdev_add err=%d\n", -res); + res = -ENODEV; + goto fail_cdev_add; + } + + ipa3_teth_ctx->modem_pm_hdl = ~0; + TETH_DBG("Tethering bridge driver init OK\n"); + + return 0; +fail_cdev_add: + device_destroy(ipa3_teth_ctx->class, ipa3_teth_ctx->dev_num); +fail_device_create: + unregister_chrdev_region(ipa3_teth_ctx->dev_num, 1); +fail_alloc_chrdev_region: + kfree(ipa3_teth_ctx); + ipa3_teth_ctx = NULL; + + return res; +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Tethering bridge driver"); diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile new file mode 100644 index 000000000000..eb580934e4c7 --- /dev/null +++ b/drivers/platform/msm/ipa/test/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o +ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o ipa_test_hw_stats.o ipa_pm_ut.o diff --git a/drivers/platform/msm/ipa/test/ipa_pm_ut.c b/drivers/platform/msm/ipa/test/ipa_pm_ut.c new file mode 100644 index 000000000000..0ec4792151ff --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_pm_ut.c @@ -0,0 +1,1792 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include +#include "../ipa_v3/ipa_pm.h" +#include "../ipa_v3/ipa_i.h" +#include "ipa_ut_framework.h" +#include + +struct callback_param { + struct completion complete; + enum ipa_pm_cb_event evt; +}; + +static int ipa_pm_ut_setup(void **ppriv) +{ + int i; + int vote; + + IPA_UT_DBG("Start Setup\n"); + + /* decrement UT vote */ + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT"); + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote) { + IPA_UT_ERR("clock vote is not zero %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("clock is voted"); + IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT"); + return -EINVAL; + } + + /*decouple PM from RPM */ + ipa3_ctx->enable_clock_scaling = false; + + if (ipa3_ctx->use_ipa_pm) { + for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) { + ipa_pm_deactivate_sync(i); + ipa_pm_deregister(i); + } + + ipa_pm_destroy(); + } + + return 0; +} + +static int ipa_pm_ut_teardown(void *priv) +{ + IPA_UT_DBG("Start Teardown\n"); + IPA_UT_ERR("WARNING: IPA_PM HAS BEEN DESTROYED, REBOOT TO RE_INIT\n"); + + /* undo UT vote */ + IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT"); + return 0; +} + +/* pass completion struct as the user data/callback params */ +static void ipa_pm_call_back(void *user_data, enum ipa_pm_cb_event evt) +{ + struct callback_param *param; + + param = (struct callback_param *) user_data; + param->evt = evt; + + if (evt == IPA_PM_CLIENT_ACTIVATED) { + IPA_UT_DBG("Activate callback called\n"); + complete_all(¶m->complete); + } else if (evt == IPA_PM_REQUEST_WAKEUP) { + IPA_UT_DBG("Request Wakeup callback called\n"); + complete_all(¶m->complete); + } else + IPA_UT_ERR("invalid callback - callback #%d\n", evt); +} + +static int clean_up(int n, ...) +{ + va_list args; + int i, hdl, rc = 0; + + va_start(args, n); + + IPA_UT_DBG("n = %d\n", n); + + IPA_UT_DBG("Clean up Started"); + + for (i = 0; i < n; i++) { + hdl = va_arg(args, int); + + rc = ipa_pm_deactivate_sync(hdl); + if (rc) { + IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("deactivate failed"); + return -EFAULT; + } + rc = ipa_pm_deregister(hdl); + if (rc) { + IPA_UT_ERR("fail to deregister client - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("deregister failed"); + return -EFAULT; + } + } + va_end(args); + rc = ipa_pm_destroy(); + if (rc) { + IPA_UT_ERR("fail to destroy pm - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return -EFAULT; + } + + return 0; +} + + +/* test 1 */ +static int ipa_pm_ut_single_registration(void *priv) +{ + int rc = 0; + int hdl, vote; + struct callback_param user_data; + + struct ipa_pm_init_params init_params = { + .threshold_size = 2, + .default_threshold = {600, 1000} + }; + + struct ipa_pm_register_params register_params = { + .name = "USB", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + .user_data = &user_data + }; + user_data.evt = IPA_PM_CB_EVENT_MAX; + + rc = ipa_pm_init(&init_params); + if (rc) { + IPA_UT_ERR("Fail to init ipa_pm rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to init params"); + return -EFAULT; + } + + init_completion(&user_data.complete); + + rc = ipa_pm_register(®ister_params, &hdl); + if (rc) { + IPA_UT_ERR("fail to register client rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_activate(hdl); + if (rc != -EINPROGRESS) { + IPA_UT_ERR("fail to queue work - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("queue activate work failed"); + return -EFAULT; + } + + if (!wait_for_completion_timeout(&user_data.complete, + msecs_to_jiffies(2000))) { + IPA_UT_ERR("timeout waiting for activate_callback\n"); + IPA_UT_TEST_FAIL_REPORT("activate callback not called"); + return -ETIME; + } + + if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) { + IPA_UT_ERR("Callback = %d\n", user_data.evt); + IPA_UT_TEST_FAIL_REPORT("wrong callback called"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 1) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = ipa_pm_deregister(hdl); + if (rc == 0) { + IPA_UT_ERR("deregister succeeded while it should not\n"); + IPA_UT_TEST_FAIL_REPORT("deregister should not succeed"); + return -EFAULT; + } + + rc = ipa_pm_deferred_deactivate(hdl); + if (rc) { + IPA_UT_ERR("fail to deferred deactivate client - rc = %d\n" + , rc); + IPA_UT_TEST_FAIL_REPORT("fail to deferred deactivate client"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 1) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + msleep(2000); + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 0) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = ipa_pm_deregister(hdl); + if (rc) { + IPA_UT_ERR("fail to deregister client - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to deregister client"); + return -EFAULT; + } + + rc = ipa_pm_activate(hdl); + if (rc == 0) { + IPA_UT_ERR("activate succeeded while it should not\n"); + IPA_UT_TEST_FAIL_REPORT("activate should not succeed"); + return -EFAULT; + } + + rc = ipa_pm_destroy(); + if (rc) { + IPA_UT_ERR("terminate failed - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("terminate_failed"); + } + + return 0; +} + +/* test 2 */ +static int ipa_pm_ut_double_register_activate(void *priv) +{ + int rc = 0; + int hdl, hdl_test, vote; + struct callback_param user_data; + + struct ipa_pm_init_params init_params = { + .threshold_size = 2, + .default_threshold = {600, 1000} + }; + + struct ipa_pm_register_params register_params = { + .name = "USB", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + .user_data = &user_data + }; + user_data.evt = IPA_PM_CB_EVENT_MAX; + + rc = ipa_pm_init(&init_params); + if (rc) { + IPA_UT_ERR("Fail to init ipa_pm rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to init params"); + return -EFAULT; + } + + init_completion(&user_data.complete); + + rc = ipa_pm_register(®ister_params, &hdl); + if (rc) { + IPA_UT_ERR("fail to register client rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_register(®ister_params, &hdl_test); + if (rc != -EEXIST) { + IPA_UT_ERR("registered client with same name rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("did not to fail register"); + return -EFAULT; + } + + rc = ipa_pm_activate(hdl); + if (rc != -EINPROGRESS) { + IPA_UT_ERR("fail to queue work - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("queue activate work failed"); + return -EFAULT; + } + + /* It is possible that previous activation already completed. */ + rc = ipa_pm_activate(hdl); + if (rc != -EINPROGRESS && rc != 0) { + IPA_UT_ERR("second time activation failed - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("second time activation failed"); + return -EFAULT; + } + + if (!wait_for_completion_timeout(&user_data.complete, + msecs_to_jiffies(2000))) { + IPA_UT_ERR("timeout waiting for activate_callback\n"); + IPA_UT_TEST_FAIL_REPORT("activate callback not called"); + return -ETIME; + } + + if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) { + IPA_UT_ERR("Callback = %d\n", user_data.evt); + IPA_UT_TEST_FAIL_REPORT("wrong callback called"); + return -EFAULT; + } + + rc = ipa_pm_activate(hdl); + if (rc) { + IPA_UT_ERR("fail to do nothing on 2nd activate = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to not reactivate"); + return -EFAULT; + } + + msleep(2000); + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 1) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = ipa_pm_deactivate_sync(hdl); + if (rc) { + IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to deactivate client"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 0) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = clean_up(1, hdl); + return rc; +} + +/* test 3 */ +static int ipa_pm_ut_deferred_deactivate(void *priv) +{ + int rc = 0; + int hdl, vote; + struct callback_param user_data; + + struct ipa_pm_init_params init_params = { + .threshold_size = 2, + .default_threshold = {600, 1000} + }; + + struct ipa_pm_register_params register_params = { + .name = "USB", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + .user_data = &user_data + }; + user_data.evt = IPA_PM_CB_EVENT_MAX; + + rc = ipa_pm_init(&init_params); + if (rc) { + IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to init params"); + return -EFAULT; + } + + init_completion(&user_data.complete); + + rc = ipa_pm_register(®ister_params, &hdl); + if (rc) { + IPA_UT_ERR("fail to register client rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_activate(hdl); + if (rc != -EINPROGRESS) { + IPA_UT_ERR("fail to queue work - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("queue activate work failed"); + return -EFAULT; + } + + if (!wait_for_completion_timeout(&user_data.complete, + msecs_to_jiffies(2000))) { + IPA_UT_ERR("timeout waiting for activate_callback\n"); + IPA_UT_TEST_FAIL_REPORT("activate callback not called"); + return -ETIME; + } + + if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) { + IPA_UT_ERR("Callback = %d\n", user_data.evt); + IPA_UT_TEST_FAIL_REPORT("wrong callback called"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 1) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = ipa_pm_deferred_deactivate(hdl); + if (rc) { + IPA_UT_ERR("fail to deffered deactivate client - rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail"); + return -EFAULT; + } + + rc = ipa_pm_activate(hdl); + if (rc) { + IPA_UT_ERR("fail to reactivate client - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("reactivate client failed"); + return -EFAULT; + } + + msleep(2000); + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 1) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = ipa_pm_deactivate_sync(hdl); + if (rc) { + IPA_UT_ERR("fail to deactivate_sync client - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("deactivate sync failed"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = clean_up(1, hdl); + return rc; +} + + +/* test 4 */ +static int ipa_pm_ut_two_clients_activate(void *priv) +{ + int rc = 0; + int hdl_USB, hdl_WLAN, vote; + u32 pipes; + struct callback_param user_data_USB; + struct callback_param user_data_WLAN; + bool wait_for_completion; + + struct ipa_pm_init_params init_params = { + .threshold_size = 2, + .default_threshold = {600, 1000} + }; + + struct ipa_pm_register_params USB_params = { + .name = "USB", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + .user_data = &user_data_USB + }; + + struct ipa_pm_register_params WLAN_params = { + .name = "WLAN", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + .user_data = &user_data_WLAN + }; + user_data_USB.evt = IPA_PM_CB_EVENT_MAX; + user_data_WLAN.evt = IPA_PM_CB_EVENT_MAX; + + rc = ipa_pm_init(&init_params); + if (rc) { + IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to init params"); + return -EFAULT; + } + + init_completion(&user_data_USB.complete); + init_completion(&user_data_WLAN.complete); + + rc = ipa_pm_register(&USB_params, &hdl_USB); + if (rc) { + IPA_UT_ERR("fail to register client 1 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_register(&WLAN_params, &hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to register client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_associate_ipa_cons_to_client(hdl_USB, IPA_CLIENT_USB_CONS); + if (rc) { + IPA_UT_ERR("fail to map client 1 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to map client"); + return -EFAULT; + } + + rc = ipa_pm_associate_ipa_cons_to_client(hdl_WLAN, + IPA_CLIENT_WLAN1_CONS); + if (rc) { + IPA_UT_ERR("fail to map client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to map client"); + return -EFAULT; + } + + rc = ipa_pm_associate_ipa_cons_to_client(hdl_WLAN, + IPA_CLIENT_USB_DPL_CONS); + if (rc) { + IPA_UT_ERR("fail to map client 2 to multiplt pipes rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("fail to map client"); + return -EFAULT; + } + + rc = ipa_pm_activate(hdl_USB); + if (rc != -EINPROGRESS) { + IPA_UT_ERR("fail to queue work for client 1 - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("queue activate work failed"); + return -EFAULT; + } + + /* It could be that USB enabled clocks so WLAN will be activated + * without delay. + */ + rc = ipa_pm_activate(hdl_WLAN); + if (rc != -EINPROGRESS && rc != 0) { + IPA_UT_ERR("failed to activate WLAN - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("failed to activate WLAN"); + return -EFAULT; + } + wait_for_completion = !rc ? false : true; + + if (!wait_for_completion_timeout(&user_data_USB.complete, + msecs_to_jiffies(2000))) { + IPA_UT_ERR("timeout waiting for activate_callback 1\n"); + IPA_UT_TEST_FAIL_REPORT("activate callback not called"); + return -ETIME; + } + + if (user_data_USB.evt != IPA_PM_CLIENT_ACTIVATED) { + IPA_UT_ERR("Callback = %d\n", user_data_USB.evt); + IPA_UT_TEST_FAIL_REPORT("wrong callback called"); + return -EFAULT; + } + + if (wait_for_completion && + !wait_for_completion_timeout(&user_data_WLAN.complete, + msecs_to_jiffies(2000))) { + IPA_UT_ERR("timeout waiting for activate_callback 2\n"); + IPA_UT_TEST_FAIL_REPORT("activate callback not called"); + return -ETIME; + } + + /* In case WLAN activated immediately, there will be no event */ + if (wait_for_completion && + user_data_WLAN.evt != IPA_PM_CLIENT_ACTIVATED) { + IPA_UT_ERR("Callback = %d\n", user_data_WLAN.evt); + IPA_UT_TEST_FAIL_REPORT("wrong callback called"); + return -EFAULT; + } + + reinit_completion(&user_data_USB.complete); + reinit_completion(&user_data_WLAN.complete); + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 2) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = ipa_pm_deferred_deactivate(hdl_USB); + if (rc) { + IPA_UT_ERR("fail to deffered deactivate client 1 - rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail"); + return -EFAULT; + } + + msleep(2000); + + rc = ipa_pm_activate(hdl_USB); + if (rc) { + IPA_UT_ERR("no-block activate failed - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("no-block activate fail"); + return -EFAULT; + } + + pipes = 1 << ipa_get_ep_mapping(IPA_CLIENT_USB_CONS); + pipes |= 1 << ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS); + pipes |= 1 << ipa_get_ep_mapping(IPA_CLIENT_USB_DPL_CONS); + + IPA_UT_DBG("pipes = %d\n", pipes); + + rc = ipa_pm_handle_suspend(pipes); + + if (!wait_for_completion_timeout(&user_data_USB.complete, + msecs_to_jiffies(2000))) { + IPA_UT_ERR("timeout waiting for wakeup_callback 1\n"); + IPA_UT_TEST_FAIL_REPORT("wakeup callback not called"); + return -ETIME; + } + + if (user_data_USB.evt != IPA_PM_REQUEST_WAKEUP) { + IPA_UT_ERR("Callback = %d\n", user_data_USB.evt); + IPA_UT_TEST_FAIL_REPORT("wrong callback called"); + return -EFAULT; + } + + if (!wait_for_completion_timeout(&user_data_WLAN.complete, + msecs_to_jiffies(2000))) { + IPA_UT_ERR("timeout waiting for wakeup_callback 2\n"); + IPA_UT_TEST_FAIL_REPORT("wakeup callback not called"); + return -ETIME; + } + + if (user_data_WLAN.evt != IPA_PM_REQUEST_WAKEUP) { + IPA_UT_ERR("Callback = %d\n", user_data_WLAN.evt); + IPA_UT_TEST_FAIL_REPORT("wrong callback called"); + return -EFAULT; + } + + reinit_completion(&user_data_USB.complete); + + rc = ipa_pm_deactivate_sync(hdl_USB); + if (rc) { + IPA_UT_ERR("fail to deactivate_sync client 1 - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to deactivate_sync"); + return -EFAULT; + } + + rc = ipa_pm_activate(hdl_USB); + if (rc) { + IPA_UT_ERR("no-block activate failed - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("no-block activate fail"); + return -EFAULT; + } + + pipes = 1 << ipa_get_ep_mapping(IPA_CLIENT_USB_CONS); + + rc = ipa_pm_handle_suspend(pipes); + + if (!wait_for_completion_timeout(&user_data_USB.complete, + msecs_to_jiffies(2000))) { + IPA_UT_ERR("timeout waiting for wakeup_callback 1\n"); + IPA_UT_TEST_FAIL_REPORT("wakeup callback not called"); + return -ETIME; + } + + if (user_data_USB.evt != IPA_PM_REQUEST_WAKEUP) { + IPA_UT_ERR("Callback = %d\n", user_data_USB.evt); + IPA_UT_TEST_FAIL_REPORT("wrong callback called"); + return -EFAULT; + } + + rc = clean_up(2, hdl_USB, hdl_WLAN); + return rc; +} + +/* test 5 */ +static int ipa_pm_ut_deactivate_all_deferred(void *priv) +{ + + int rc = 0; + int hdl_USB, hdl_WLAN, hdl_MODEM, vote; + struct callback_param user_data; + + struct ipa_pm_init_params init_params = { + .threshold_size = 2, + .default_threshold = {600, 1000} + }; + + struct ipa_pm_register_params USB_params = { + .name = "USB", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + .user_data = &user_data + }; + + struct ipa_pm_register_params WLAN_params = { + .name = "WLAN", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + }; + + struct ipa_pm_register_params MODEM_params = { + .name = "MODEM", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + }; + user_data.evt = IPA_PM_CB_EVENT_MAX; + + rc = ipa_pm_init(&init_params); + if (rc) { + IPA_UT_ERR("Fail to init ipa_pm - rce %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to init params"); + return -EFAULT; + } + + init_completion(&user_data.complete); + + rc = ipa_pm_register(&USB_params, &hdl_USB); + if (rc) { + IPA_UT_ERR("fail to register client 1 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_register(&WLAN_params, &hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to register client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_activate(hdl_USB); + if (rc != -EINPROGRESS) { + IPA_UT_ERR("fail to queue work for client 1 - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("queue activate work failed"); + return -EFAULT; + } + + rc = ipa_pm_activate_sync(hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to activate sync for client 2- rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("activate sync failed"); + return -EFAULT; + } + + if (!wait_for_completion_timeout(&user_data.complete, + msecs_to_jiffies(2000))) { + IPA_UT_ERR("timeout waiting for activate_callback 1\n"); + IPA_UT_TEST_FAIL_REPORT("activate callback not called"); + return -ETIME; + } + + if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) { + IPA_UT_ERR("Callback = %d\n", user_data.evt); + IPA_UT_TEST_FAIL_REPORT("wrong callback called"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 2) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = ipa_pm_register(&MODEM_params, &hdl_MODEM); + if (rc) { + IPA_UT_ERR("fail to register client 3 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_activate(hdl_MODEM); + if (rc) { + IPA_UT_ERR("fail to no-block activate - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("no-block-activate failed"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 3) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = ipa_pm_deferred_deactivate(hdl_USB); + if (rc) { + IPA_UT_ERR("fail to deffered deactivate client 1 - rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail"); + return -EFAULT; + } + + rc = ipa_pm_deferred_deactivate(hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to deffered deactivate client 2 - rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail"); + return -EFAULT; + } + + rc = ipa_pm_deactivate_all_deferred(); + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 1) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("deactivate_all_deferred failed"); + return -EINVAL; + } + + msleep(2000); + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 1) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("clock vote went below 1"); + return -EINVAL; + } + + rc = clean_up(3, hdl_USB, hdl_WLAN, hdl_MODEM); + return rc; +} + +/* test 5 */ +static int ipa_pm_ut_deactivate_after_activate(void *priv) +{ + + int rc = 0; + int hdl, vote; + struct callback_param user_data; + + struct ipa_pm_init_params init_params = { + .threshold_size = 2, + .default_threshold = {600, 1000} + }; + + struct ipa_pm_register_params USB_params = { + .name = "USB", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + .user_data = &user_data + }; + + rc = ipa_pm_init(&init_params); + if (rc) { + IPA_UT_ERR("Fail to init ipa_pm - rce %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to init params"); + return -EFAULT; + } + + init_completion(&user_data.complete); + + rc = ipa_pm_register(&USB_params, &hdl); + if (rc) { + IPA_UT_ERR("fail to register client 1 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_activate(hdl); + if (rc != -EINPROGRESS) { + IPA_UT_ERR("fail to queue work for client rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("queue activate work failed"); + return -EFAULT; + } + + rc = ipa_pm_deferred_deactivate(hdl); + if (rc) { + IPA_UT_ERR("fail to deffered deactivate client - rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail"); + return -EFAULT; + } + + msleep(2000); + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + + rc = ipa_pm_activate(hdl); + if (rc != -EINPROGRESS) { + IPA_UT_ERR("fail to queue work for client rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("queue activate work failed"); + return -EFAULT; + } + + rc = ipa_pm_deactivate_sync(hdl); + if (rc) { + IPA_UT_ERR("fail to deactivate sync client - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("deactivate sync fail"); + return -EFAULT; + } + + msleep(2000); + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = clean_up(1, hdl); + return rc; +} + +/* test 6 */ +static int ipa_pm_ut_atomic_activate(void *priv) +{ + int rc = 0; + int hdl, vote; + struct callback_param user_data; + spinlock_t lock; + unsigned long flags; + + struct ipa_pm_init_params init_params = { + .threshold_size = 2, + .default_threshold = {600, 1000} + }; + + struct ipa_pm_register_params register_params = { + .name = "USB", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + .user_data = &user_data + }; + user_data.evt = IPA_PM_CB_EVENT_MAX; + + + spin_lock_init(&lock); + + rc = ipa_pm_init(&init_params); + if (rc) { + IPA_UT_ERR("Fail to init ipa_pm rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to init params"); + return -EFAULT; + } + + init_completion(&user_data.complete); + + rc = ipa_pm_register(®ister_params, &hdl); + if (rc) { + IPA_UT_ERR("fail to register client rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + spin_lock_irqsave(&lock, flags); + rc = ipa_pm_activate(hdl); + if (rc != -EINPROGRESS) { + IPA_UT_ERR("fail to queue work - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("queue activate work failed"); + spin_unlock_irqrestore(&lock, flags); + return -EFAULT; + } + spin_unlock_irqrestore(&lock, flags); + + if (!wait_for_completion_timeout(&user_data.complete, + msecs_to_jiffies(2000))) { + IPA_UT_ERR("timeout waiting for activate_callback\n"); + IPA_UT_TEST_FAIL_REPORT("activate callback not called"); + return -ETIME; + } + + if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) { + IPA_UT_ERR("Callback = %d\n", user_data.evt); + IPA_UT_TEST_FAIL_REPORT("wrong callback called"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 1) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = clean_up(1, hdl); + return rc; +} + +/* test 7 */ +static int ipa_pm_ut_deactivate_loop(void *priv) +{ + int rc = 0; + int i, hdl_USB, hdl_WLAN, vote; + + struct ipa_pm_init_params init_params = { + .threshold_size = 2, + .default_threshold = {600, 1000} + }; + + struct ipa_pm_register_params USB_params = { + .name = "USB", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + }; + + struct ipa_pm_register_params WLAN_params = { + .name = "WLAN", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + }; + + rc = ipa_pm_init(&init_params); + if (rc) { + IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to init params"); + return -EFAULT; + } + + rc = ipa_pm_register(&USB_params, &hdl_USB); + if (rc) { + IPA_UT_ERR("fail to register client 1 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_set_throughput(hdl_USB, 1200); + if (rc) { + IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to set perf profile"); + return -EFAULT; + } + + rc = ipa_pm_register(&WLAN_params, &hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to register client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_set_throughput(hdl_WLAN, 800); + if (rc) { + IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to set perf profile"); + return -EFAULT; + } + + rc = ipa_pm_activate_sync(hdl_USB); + if (rc) { + IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("activate sync failed"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 1) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = ipa_pm_activate(hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("activate no block failed"); + return -EFAULT; + } + + msleep(2000); + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 2) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = ipa_pm_deferred_deactivate(hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to deffered deactivate client 2 - rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail"); + return -EFAULT; + } + + for (i = 0; i < 50; i++) { + IPA_UT_DBG("Loop iteration #%d\n", i); + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 2) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = ipa_pm_activate(hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to undo deactivate for client 2"); + IPA_UT_ERR(" - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("undo deactivate failed"); + return -EFAULT; + } + + rc = ipa_pm_deferred_deactivate(hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to deffered deactivate client"); + IPA_UT_ERR(" - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail"); + return -EFAULT; + } + } + + msleep(2000); + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 1) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + rc = clean_up(2, hdl_USB, hdl_WLAN); + return rc; + +} + + +/*test 8*/ +static int ipa_pm_ut_set_perf_profile(void *priv) +{ + int rc = 0; + int hdl_USB, hdl_WLAN, vote, idx; + + struct ipa_pm_init_params init_params = { + .threshold_size = 2, + .default_threshold = {600, 1000} + }; + + struct ipa_pm_register_params USB_params = { + .name = "USB", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + }; + + struct ipa_pm_register_params WLAN_params = { + .name = "WLAN", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + }; + + rc = ipa_pm_init(&init_params); + if (rc) { + IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to init params"); + return -EFAULT; + } + + rc = ipa_pm_register(&USB_params, &hdl_USB); + if (rc) { + IPA_UT_ERR("fail to register client 1 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_set_throughput(hdl_USB, 1200); + if (rc) { + IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to set perf profile"); + return -EFAULT; + } + + rc = ipa_pm_register(&WLAN_params, &hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to register client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_set_throughput(hdl_WLAN, 800); + if (rc) { + IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to set perf profile"); + return -EFAULT; + } + + rc = ipa_pm_activate_sync(hdl_USB); + if (rc) { + IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("activate sync failed"); + return -EFAULT; + } + + idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; + if (idx != 1) { + IPA_UT_ERR("clock plan is at %d\n", idx); + IPA_UT_TEST_FAIL_REPORT("wrong clock plan"); + return -EINVAL; + } + + rc = ipa_pm_activate(hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("activate no block failed"); + return -EFAULT; + } + + msleep(2000); + idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; + if (idx != 2) { + IPA_UT_ERR("clock plan is at %d\n", idx); + IPA_UT_TEST_FAIL_REPORT("wrong clock plan"); + return -EINVAL; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 2) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = ipa_pm_set_throughput(hdl_WLAN, 1200); + if (rc) { + IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to set perf profile"); + return -EFAULT; + } + + idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; + if (idx != 3) { + IPA_UT_ERR("clock plan is at %d\n", idx); + IPA_UT_TEST_FAIL_REPORT("wrong clock plan"); + return -EINVAL; + } + + rc = clean_up(2, hdl_USB, hdl_WLAN); + return rc; +} + +/*test 9*/ +static int ipa_pm_ut_group_tput(void *priv) +{ + int rc = 0; + int hdl_USB, hdl_WLAN, hdl_MODEM, vote, idx; + + struct ipa_pm_init_params init_params = { + .threshold_size = 2, + .default_threshold = {600, 1000} + }; + + struct ipa_pm_register_params USB_params = { + .name = "USB", + .group = IPA_PM_GROUP_APPS, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + }; + + struct ipa_pm_register_params WLAN_params = { + .name = "WLAN", + .group = IPA_PM_GROUP_APPS, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + }; + + struct ipa_pm_register_params MODEM_params = { + .name = "MODEM", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + }; + + rc = ipa_pm_init(&init_params); + if (rc) { + IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to init params"); + return -EFAULT; + } + + rc = ipa_pm_register(&USB_params, &hdl_USB); + if (rc) { + IPA_UT_ERR("fail to register client 1 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_register(&WLAN_params, &hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to register client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_set_throughput(hdl_USB, 500); + if (rc) { + IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to set perf profile"); + return -EFAULT; + } + + rc = ipa_pm_set_throughput(hdl_WLAN, 800); + if (rc) { + IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to set perf profile"); + return -EFAULT; + } + + rc = ipa_pm_activate_sync(hdl_USB); + if (rc) { + IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("activate sync failed"); + return -EFAULT; + } + + idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; + if (idx != 1) { + IPA_UT_ERR("clock plan is at %d\n", idx); + IPA_UT_TEST_FAIL_REPORT("wrong clock plan"); + return -EINVAL; + } + + rc = ipa_pm_activate(hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("activate no block failed"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 2) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + msleep(2000); + idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; + if (idx != 1) { + IPA_UT_ERR("clock plan is at %d\n", idx); + IPA_UT_TEST_FAIL_REPORT("wrong clock plan"); + return -EINVAL; + } + + rc = ipa_pm_register(&MODEM_params, &hdl_MODEM); + if (rc) { + IPA_UT_ERR("fail to register client 3 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_set_throughput(hdl_MODEM, 1000); + if (rc) { + IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to set perf profile"); + return -EFAULT; + } + + rc = ipa_pm_activate(hdl_MODEM); + if (rc) { + IPA_UT_ERR("fail to activate no block for client 3 - rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("activate no block failed"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 3) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + msleep(2000); + idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; + if (idx != 2) { + IPA_UT_ERR("clock plan is at %d\n", idx); + IPA_UT_TEST_FAIL_REPORT("wrong clock plan"); + return -EINVAL; + } + + rc = ipa_pm_deactivate_sync(hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("deactivate failed"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 2) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; + if (idx != 2) { + IPA_UT_ERR("clock plan is at %d\n", idx); + IPA_UT_TEST_FAIL_REPORT("wrong clock plan"); + return -EINVAL; + } + + rc = clean_up(3, hdl_USB, hdl_WLAN, hdl_MODEM); + return rc; + +} + +/*test 10*/ +static int ipa_pm_ut_skip_clk_vote_tput(void *priv) +{ + int rc = 0; + int hdl_USB, hdl_WLAN, hdl_MODEM, vote, idx; + + struct ipa_pm_init_params init_params = { + .threshold_size = 2, + .default_threshold = {600, 1000} + }; + + struct ipa_pm_register_params USB_params = { + .name = "USB", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + }; + + struct ipa_pm_register_params WLAN_params = { + .name = "WLAN", + .group = IPA_PM_GROUP_MODEM, + .skip_clk_vote = 1, + .callback = ipa_pm_call_back, + }; + + struct ipa_pm_register_params MODEM_params = { + .name = "MODEM", + .group = IPA_PM_GROUP_MODEM, + .skip_clk_vote = 1, + .callback = ipa_pm_call_back, + }; + + rc = ipa_pm_init(&init_params); + if (rc) { + IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to init params"); + return -EFAULT; + } + + rc = ipa_pm_register(&USB_params, &hdl_USB); + if (rc) { + IPA_UT_ERR("fail to register client 1 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_register(&WLAN_params, &hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to register client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_set_throughput(hdl_USB, 1200); + if (rc) { + IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to set perf profile"); + return -EFAULT; + } + + rc = ipa_pm_set_throughput(hdl_WLAN, 800); + if (rc) { + IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to set perf profile"); + return -EFAULT; + } + + rc = ipa_pm_activate_sync(hdl_USB); + if (rc) { + IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("activate sync failed"); + return -EFAULT; + } + + idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; + if (idx != 1) { + IPA_UT_ERR("clock plan is at %d\n", idx); + IPA_UT_TEST_FAIL_REPORT("wrong clock plan"); + return -EINVAL; + } + + rc = ipa_pm_activate(hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("activate no block failed"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 1) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + msleep(2000); + idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; + if (idx != 2) { + IPA_UT_ERR("clock plan is at %d\n", idx); + IPA_UT_TEST_FAIL_REPORT("wrong clock plan"); + return -EINVAL; + } + + rc = ipa_pm_register(&MODEM_params, &hdl_MODEM); + if (rc) { + IPA_UT_ERR("fail to register client 3 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_set_throughput(hdl_MODEM, 2000); + if (rc) { + IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to set perf profile"); + return -EFAULT; + } + + rc = ipa_pm_activate(hdl_MODEM); + if (rc) { + IPA_UT_ERR("fail to activate no block for client 3 - rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("activate no block failed"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 1) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + msleep(2000); + idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; + if (idx != 3) { + IPA_UT_ERR("clock plan is at %d\n", idx); + IPA_UT_TEST_FAIL_REPORT("wrong clock plan"); + return -EINVAL; + } + + + rc = ipa_pm_deactivate_sync(hdl_USB); + if (rc) { + IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("deactivate failed"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 0) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + rc = clean_up(3, hdl_USB, hdl_WLAN, hdl_MODEM); + return rc; +} + +/* Test 11 */ +static int ipa_pm_ut_simple_exception(void *priv) +{ + int rc = 0; + int hdl_USB, hdl_WLAN, hdl_MODEM, vote, idx; + + struct ipa_pm_exception exceptions = { + .usecase = "USB", + .threshold = {1000, 1800}, + }; + + struct ipa_pm_init_params init_params = { + .threshold_size = 2, + .default_threshold = {600, 1000}, + .exception_size = 1, + .exceptions[0] = exceptions, + }; + + struct ipa_pm_register_params USB_params = { + .name = "USB", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + }; + + struct ipa_pm_register_params WLAN_params = { + .name = "WLAN", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + }; + + struct ipa_pm_register_params MODEM_params = { + .name = "MODEM", + .group = IPA_PM_GROUP_DEFAULT, + .skip_clk_vote = 0, + .callback = ipa_pm_call_back, + }; + + rc = ipa_pm_init(&init_params); + if (rc) { + IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to init params"); + return -EFAULT; + } + + rc = ipa_pm_register(&USB_params, &hdl_USB); + if (rc) { + IPA_UT_ERR("fail to register client 1 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_register(&WLAN_params, &hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to register client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_set_throughput(hdl_USB, 1200); + if (rc) { + IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to set perf profile"); + return -EFAULT; + } + + rc = ipa_pm_set_throughput(hdl_WLAN, 2000); + if (rc) { + IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to set perf profile"); + return -EFAULT; + } + + rc = ipa_pm_activate_sync(hdl_USB); + if (rc) { + IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("activate sync failed"); + return -EFAULT; + } + + idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; + if (idx != 1) { + IPA_UT_ERR("clock plan is at %d\n", idx); + IPA_UT_TEST_FAIL_REPORT("wrong clock plan"); + return -EINVAL; + } + + rc = ipa_pm_activate(hdl_WLAN); + if (rc) { + IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("activate no block failed"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 2) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + msleep(2000); + idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; + if (idx != 2) { + IPA_UT_ERR("clock plan is at %d\n", idx); + IPA_UT_TEST_FAIL_REPORT("wrong clock plan"); + return -EINVAL; + } + + rc = ipa_pm_register(&MODEM_params, &hdl_MODEM); + if (rc) { + IPA_UT_ERR("fail to register client 3 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to register"); + return -EFAULT; + } + + rc = ipa_pm_set_throughput(hdl_MODEM, 800); + if (rc) { + IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to set perf profile"); + return -EFAULT; + } + + rc = ipa_pm_activate(hdl_MODEM); + if (rc) { + IPA_UT_ERR("fail to activate no block for client 3 - rc = %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("activate no block failed"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 3) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + msleep(2000); + idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; + if (idx != 3) { + IPA_UT_ERR("clock plan is at %d\n", idx); + IPA_UT_TEST_FAIL_REPORT("wrong clock plan"); + return -EINVAL; + } + + rc = ipa_pm_deactivate_sync(hdl_USB); + if (rc) { + IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("deactivate failed"); + return -EFAULT; + } + + vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt); + if (vote != 2) { + IPA_UT_ERR("clock vote is at %d\n", vote); + IPA_UT_TEST_FAIL_REPORT("wrong clock vote"); + return -EINVAL; + } + + idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx; + if (idx != 2) { + IPA_UT_ERR("clock plan is at %d\n", idx); + IPA_UT_TEST_FAIL_REPORT("wrong clock plan"); + return -EINVAL; + } + + rc = clean_up(3, hdl_USB, hdl_WLAN, hdl_MODEM); + return rc; +} + +/* Suite definition block */ +IPA_UT_DEFINE_SUITE_START(pm, "PM for IPA", + ipa_pm_ut_setup, ipa_pm_ut_teardown) +{ + IPA_UT_ADD_TEST(single_registration, + "Single Registration/Basic Functions", + ipa_pm_ut_single_registration, + true, IPA_HW_v4_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(double_register_activate, + "double register/activate", + ipa_pm_ut_double_register_activate, + true, IPA_HW_v4_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(deferred_deactivate, + "Deferred_deactivate", + ipa_pm_ut_deferred_deactivate, + true, IPA_HW_v4_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(two_clients_activate, + "Activate two clients", + ipa_pm_ut_two_clients_activate, + true, IPA_HW_v4_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(deactivate_all_deferred, + "Deactivate all deferred", + ipa_pm_ut_deactivate_all_deferred, + true, IPA_HW_v4_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(deactivate_after_activate, + "Deactivate after activate", + ipa_pm_ut_deactivate_after_activate, + true, IPA_HW_v4_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(atomic_activate, + "Atomic activate", + ipa_pm_ut_atomic_activate, + true, IPA_HW_v4_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(deactivate_loop, + "Deactivate Loop", + ipa_pm_ut_deactivate_loop, + true, IPA_HW_v4_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(set_perf_profile, + "Set perf profile", + ipa_pm_ut_set_perf_profile, + true, IPA_HW_v4_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(group_tput, + "Group throughputs", + ipa_pm_ut_group_tput, + true, IPA_HW_v4_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(skip_clk_vote_tput, + "Skip clock vote and tput", + ipa_pm_ut_skip_clk_vote_tput, + true, IPA_HW_v4_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(simple_exception, + "throughput while passing simple exception", + ipa_pm_ut_simple_exception, + true, IPA_HW_v4_0, IPA_HW_MAX), +} IPA_UT_DEFINE_SUITE_END(pm); diff --git a/drivers/platform/msm/ipa/test/ipa_test_dma.c b/drivers/platform/msm/ipa/test/ipa_test_dma.c new file mode 100644 index 000000000000..5eae9fe832eb --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_test_dma.c @@ -0,0 +1,1136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include "../ipa_v3/ipa_i.h" +#include "ipa_ut_framework.h" + +#define IPA_TEST_DMA_WQ_NAME_BUFF_SZ 64 +#define IPA_TEST_DMA_MT_TEST_NUM_WQ 200 +#define IPA_TEST_DMA_MEMCPY_BUFF_SIZE 16384 +#define IPA_TEST_DMA_MAX_PKT_SIZE 0xFF00 +#define IPA_DMA_TEST_LOOP_NUM 1000 +#define IPA_DMA_TEST_INT_LOOP_NUM 50 +#define IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM 128 +#define IPA_DMA_RUN_TEST_UNIT_IN_LOOP(test_unit, iters, rc, args...) \ + do { \ + int __i; \ + for (__i = 0; __i < iters; __i++) { \ + IPA_UT_LOG(#test_unit " START iter %d\n", __i); \ + rc = test_unit(args); \ + if (!rc) \ + continue; \ + IPA_UT_LOG(#test_unit " failed %d\n", rc); \ + break; \ + } \ + } while (0) + +/** + * struct ipa_test_dma_async_user_data - user_data structure for async memcpy + * @src_mem: source memory buffer + * @dest_mem: destination memory buffer + * @call_serial_number: Id of the caller + * @copy_done: Completion object + */ +struct ipa_test_dma_async_user_data { + struct ipa_mem_buffer src_mem; + struct ipa_mem_buffer dest_mem; + int call_serial_number; + struct completion copy_done; +}; + +/** + * ipa_test_dma_setup() - Suite setup function + */ +static int ipa_test_dma_setup(void **ppriv) +{ + int rc; + + IPA_UT_DBG("Start Setup\n"); + + if (!ipa3_ctx) { + IPA_UT_ERR("No IPA ctx\n"); + return -EINVAL; + } + + rc = ipa_dma_init(); + if (rc) + IPA_UT_ERR("Fail to init ipa_dma - return code %d\n", rc); + else + IPA_UT_DBG("ipa_dma_init() Completed successfully!\n"); + + *ppriv = NULL; + + return rc; +} + +/** + * ipa_test_dma_teardown() - Suite teardown function + */ +static int ipa_test_dma_teardown(void *priv) +{ + IPA_UT_DBG("Start Teardown\n"); + ipa_dma_destroy(); + return 0; +} + +static int ipa_test_dma_alloc_buffs(struct ipa_mem_buffer *src, + struct ipa_mem_buffer *dest, + int size) +{ + int i; + static int val = 1; + int rc; + + val++; + src->size = size; + src->base = dma_alloc_coherent(ipa3_ctx->pdev, src->size, + &src->phys_base, GFP_KERNEL); + if (!src->base) { + IPA_UT_LOG("fail to alloc dma mem %d bytes\n", size); + IPA_UT_TEST_FAIL_REPORT("fail to alloc dma mem"); + return -ENOMEM; + } + + dest->size = size; + dest->base = dma_zalloc_coherent(ipa3_ctx->pdev, dest->size, + &dest->phys_base, GFP_KERNEL); + if (!dest->base) { + IPA_UT_LOG("fail to alloc dma mem %d bytes\n", size); + IPA_UT_TEST_FAIL_REPORT("fail to alloc dma mem"); + rc = -ENOMEM; + goto fail_alloc_dest; + } + + for (i = 0; i < src->size; i++) + memset(src->base + i, (val + i) & 0xFF, 1); + rc = memcmp(dest->base, src->base, dest->size); + if (rc == 0) { + IPA_UT_LOG("dest & src buffers are equal\n"); + IPA_UT_TEST_FAIL_REPORT("dest & src buffers are equal"); + rc = -EFAULT; + goto fail_buf_cmp; + } + + return 0; + +fail_buf_cmp: + dma_free_coherent(ipa3_ctx->pdev, dest->size, dest->base, + dest->phys_base); +fail_alloc_dest: + dma_free_coherent(ipa3_ctx->pdev, src->size, src->base, + src->phys_base); + return rc; +} + +static void ipa_test_dma_destroy_buffs(struct ipa_mem_buffer *src, + struct ipa_mem_buffer *dest) +{ + dma_free_coherent(ipa3_ctx->pdev, src->size, src->base, + src->phys_base); + dma_free_coherent(ipa3_ctx->pdev, dest->size, dest->base, + dest->phys_base); +} + +/** + * ipa_test_dma_memcpy_sync() - memcpy in sync mode + * + * @size: buffer size + * @expect_fail: test expects the memcpy to fail + * + * To be run during tests + * 1. Alloc src and dst buffers + * 2. sync memcpy src to dst via dma + * 3. compare src and dts if memcpy succeeded as expected + */ +static int ipa_test_dma_memcpy_sync(int size, bool expect_fail) +{ + int rc = 0; + int i; + struct ipa_mem_buffer src_mem; + struct ipa_mem_buffer dest_mem; + u8 *src; + u8 *dest; + + rc = ipa_test_dma_alloc_buffs(&src_mem, &dest_mem, size); + if (rc) { + IPA_UT_LOG("fail to alloc buffers\n"); + IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers"); + return rc; + } + + rc = ipa_dma_sync_memcpy(dest_mem.phys_base, src_mem.phys_base, size); + if (!expect_fail && rc) { + IPA_UT_LOG("fail to sync memcpy - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy failed"); + goto free_buffs; + } + if (expect_fail && !rc) { + IPA_UT_LOG("sync memcpy succeeded while expected to fail\n"); + IPA_UT_TEST_FAIL_REPORT( + "sync memcpy succeeded while expected to fail"); + rc = -EFAULT; + goto free_buffs; + } + + if (!rc) { + /* if memcpy succeeded, compare the buffers */ + rc = memcmp(dest_mem.base, src_mem.base, size); + if (rc) { + IPA_UT_LOG("BAD memcpy - buffs are not equals\n"); + IPA_UT_TEST_FAIL_REPORT( + "BAD memcpy - buffs are not equals"); + src = src_mem.base; + dest = dest_mem.base; + for (i = 0; i < size; i++) { + if (*(src + i) != *(dest + i)) { + IPA_UT_LOG("byte: %d 0x%x != 0x%x\n", + i, *(src + i), *(dest + i)); + } + } + } + } else { + /* if memcpy failed as expected, update the rc */ + rc = 0; + } + +free_buffs: + ipa_test_dma_destroy_buffs(&src_mem, &dest_mem); + return rc; +} + +static void ipa_test_dma_async_memcpy_cb(void *comp_obj) +{ + struct completion *xfer_done; + + if (!comp_obj) { + IPA_UT_ERR("Invalid Input\n"); + return; + } + xfer_done = (struct completion *)comp_obj; + complete(xfer_done); +} + +static void ipa_test_dma_async_memcpy_cb_user_data(void *user_param) +{ + int rc; + int i; + u8 *src; + u8 *dest; + struct ipa_test_dma_async_user_data *udata = + (struct ipa_test_dma_async_user_data *)user_param; + + if (!udata) { + IPA_UT_ERR("Invalid user param\n"); + return; + } + + rc = memcmp(udata->dest_mem.base, udata->src_mem.base, + udata->src_mem.size); + if (rc) { + IPA_UT_LOG("BAD memcpy - buffs are not equal sn=%d\n", + udata->call_serial_number); + IPA_UT_TEST_FAIL_REPORT( + "BAD memcpy - buffs are not equal"); + src = udata->src_mem.base; + dest = udata->dest_mem.base; + for (i = 0; i < udata->src_mem.size; i++) { + if (*(src + i) != *(dest + i)) { + IPA_UT_ERR("byte: %d 0x%x != 0x%x\n", i, + *(src + i), *(dest + i)); + } + } + return; + } + + IPA_UT_LOG("Notify on async memcopy sn=%d\n", + udata->call_serial_number); + complete(&(udata->copy_done)); +} + +/** + * ipa_test_dma_memcpy_async() - memcpy in async mode + * + * @size: buffer size + * @expect_fail: test expected the memcpy to fail + * + * To be run during tests + * 1. Alloc src and dst buffers + * 2. async memcpy src to dst via dma and wait for completion + * 3. compare src and dts if memcpy succeeded as expected + */ +static int ipa_test_dma_memcpy_async(int size, bool expect_fail) +{ + int rc = 0; + int i; + struct ipa_mem_buffer src_mem; + struct ipa_mem_buffer dest_mem; + u8 *src; + u8 *dest; + struct completion xfer_done; + + rc = ipa_test_dma_alloc_buffs(&src_mem, &dest_mem, size); + if (rc) { + IPA_UT_LOG("fail to alloc buffers\n"); + IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers"); + return rc; + } + + init_completion(&xfer_done); + rc = ipa_dma_async_memcpy(dest_mem.phys_base, src_mem.phys_base, size, + ipa_test_dma_async_memcpy_cb, &xfer_done); + if (!expect_fail && rc) { + IPA_UT_LOG("fail to initiate async memcpy - rc=%d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("async memcpy initiate failed"); + goto free_buffs; + } + if (expect_fail && !rc) { + IPA_UT_LOG("async memcpy succeeded while expected to fail\n"); + IPA_UT_TEST_FAIL_REPORT( + "async memcpy succeeded while expected to fail"); + rc = -EFAULT; + goto free_buffs; + } + + if (!rc) { + /* if memcpy succeeded, compare the buffers */ + wait_for_completion(&xfer_done); + rc = memcmp(dest_mem.base, src_mem.base, size); + if (rc) { + IPA_UT_LOG("BAD memcpy - buffs are not equals\n"); + IPA_UT_TEST_FAIL_REPORT( + "BAD memcpy - buffs are not equals"); + src = src_mem.base; + dest = dest_mem.base; + for (i = 0; i < size; i++) { + if (*(src + i) != *(dest + i)) { + IPA_UT_LOG("byte: %d 0x%x != 0x%x\n", + i, *(src + i), *(dest + i)); + } + } + } + } else { + /* if memcpy failed as expected, update the rc */ + rc = 0; + } + +free_buffs: + ipa_test_dma_destroy_buffs(&src_mem, &dest_mem); + return rc; +} + +/** + * ipa_test_dma_sync_async_memcpy() - memcpy in sync and then async mode + * + * @size: buffer size + * + * To be run during tests + * 1. several sync memcopy in row + * 2. several async memcopy - + * back-to-back (next async try initiated after prev is completed) + */ +static int ipa_test_dma_sync_async_memcpy(int size) +{ + int rc; + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_sync, + IPA_DMA_TEST_INT_LOOP_NUM, rc, size, false); + if (rc) { + IPA_UT_LOG("sync memcopy fail rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcopy fail"); + return rc; + } + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_async, + IPA_DMA_TEST_INT_LOOP_NUM, rc, size, false); + if (rc) { + IPA_UT_LOG("async memcopy fail rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("async memcopy fail"); + return rc; + } + + return 0; +} + +/** + * TEST: test enable/disable dma + * 1. enable dma + * 2. disable dma + */ +static int ipa_test_dma_enable_disable(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: test init/enable/disable/destroy dma + * 1. init dma + * 2. enable dma + * 3. disable dma + * 4. destroy dma + */ +static int ipa_test_dma_init_enbl_disable_destroy(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_init(); + if (rc) { + IPA_UT_LOG("DMA Init failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail init dma"); + return rc; + } + + rc = ipa_dma_enable(); + if (rc) { + ipa_dma_destroy(); + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + ipa_dma_destroy(); + + return 0; +} + +/** + * TEST: test enablex2/disablex2 dma + * 1. enable dma + * 2. enable dma + * 3. disable dma + * 4. disable dma + */ +static int ipa_test_dma_enblx2_disablex2(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + ipa_dma_destroy(); + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_dma_enable(); + if (rc) { + ipa_dma_destroy(); + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: memcpy before dma enable + * + * 1. sync memcpy - should fail + * 2. async memcpy - should fail + */ +static int ipa_test_dma_memcpy_before_enable(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, true); + if (rc) { + IPA_UT_LOG("sync memcpy succeeded unexpectedly rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy succeeded unexpectedly"); + return rc; + } + + rc = ipa_test_dma_memcpy_async(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, true); + if (rc) { + IPA_UT_LOG("async memcpy succeeded unexpectedly rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy succeeded unexpectedly"); + return rc; + } + + return 0; +} + +/** + * TEST: Sync memory copy + * + * 1. dma enable + * 2. sync memcpy + * 3. dma disable + */ +static int ipa_test_dma_sync_memcpy(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false); + if (rc) { + IPA_UT_LOG("sync memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Small sync memory copy + * + * 1. dma enable + * 2. small sync memcpy + * 3. small sync memcpy + * 4. dma disable + */ +static int ipa_test_dma_sync_memcpy_small(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_test_dma_memcpy_sync(4, false); + if (rc) { + IPA_UT_LOG("sync memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_test_dma_memcpy_sync(7, false); + if (rc) { + IPA_UT_LOG("sync memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Async memory copy + * + * 1. dma enable + * 2. async memcpy + * 3. dma disable + */ +static int ipa_test_dma_async_memcpy(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_test_dma_memcpy_async(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false); + if (rc) { + IPA_UT_LOG("async memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("async memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Small async memory copy + * + * 1. dma enable + * 2. async memcpy + * 3. async memcpy + * 4. dma disable + */ +static int ipa_test_dma_async_memcpy_small(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_test_dma_memcpy_async(4, false); + if (rc) { + IPA_UT_LOG("async memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("async memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_test_dma_memcpy_async(7, false); + if (rc) { + IPA_UT_LOG("async memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("async memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Iteration of sync memory copy + * + * 1. dma enable + * 2. sync memcpy in loop - in row + * 3. dma disable + */ +static int ipa_test_dma_sync_memcpy_in_loop(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_sync, + IPA_DMA_TEST_LOOP_NUM, rc, + IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false); + if (rc) { + IPA_UT_LOG("Iterations of sync memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("Iterations of sync memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Iteration of async memory copy + * + * 1. dma enable + * 2. async memcpy in loop - back-to-back + * next async copy is initiated once previous one completed + * 3. dma disable + */ +static int ipa_test_dma_async_memcpy_in_loop(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_async, + IPA_DMA_TEST_LOOP_NUM, rc, + IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false); + if (rc) { + IPA_UT_LOG("Iterations of async memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("Iterations of async memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Iteration of interleaved sync and async memory copy + * + * 1. dma enable + * 2. sync and async memcpy in loop - interleaved + * 3. dma disable + */ +static int ipa_test_dma_interleaved_sync_async_memcpy_in_loop(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_sync_async_memcpy, + IPA_DMA_TEST_INT_LOOP_NUM, rc, + IPA_TEST_DMA_MEMCPY_BUFF_SIZE); + if (rc) { + IPA_UT_LOG( + "Iterations of interleaved sync async memcpy failed rc=%d\n" + , rc); + IPA_UT_TEST_FAIL_REPORT( + "Iterations of interleaved sync async memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +static atomic_t ipa_test_dma_mt_test_pass; + +struct one_memcpy_work { + struct work_struct work_s; + int size; +}; + +static void ipa_test_dma_wrapper_test_one_sync(struct work_struct *work) +{ + int rc; + struct one_memcpy_work *data = + container_of(work, struct one_memcpy_work, work_s); + + rc = ipa_test_dma_memcpy_sync(data->size, false); + if (rc) { + IPA_UT_LOG("fail sync memcpy from thread rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail sync memcpy from thread"); + return; + } + atomic_inc(&ipa_test_dma_mt_test_pass); +} + +static void ipa_test_dma_wrapper_test_one_async(struct work_struct *work) +{ + int rc; + struct one_memcpy_work *data = + container_of(work, struct one_memcpy_work, work_s); + + rc = ipa_test_dma_memcpy_async(data->size, false); + if (rc) { + IPA_UT_LOG("fail async memcpy from thread rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail async memcpy from thread"); + return; + } + atomic_inc(&ipa_test_dma_mt_test_pass); +} + +/** + * TEST: Multiple threads running sync and sync mem copy + * + * 1. dma enable + * 2. In-loop + * 2.1 create wq for sync memcpy + * 2.2 create wq for async memcpy + * 2.3 queue sync memcpy work + * 2.4 queue async memcoy work + * 3. In-loop + * 3.1 flush and destroy wq sync + * 3.2 flush and destroy wq async + * 3. dma disable + */ +static int ipa_test_dma_mt_sync_async(void *priv) +{ + int rc; + int i; + static struct workqueue_struct *wq_sync[IPA_TEST_DMA_MT_TEST_NUM_WQ]; + static struct workqueue_struct *wq_async[IPA_TEST_DMA_MT_TEST_NUM_WQ]; + static struct one_memcpy_work async[IPA_TEST_DMA_MT_TEST_NUM_WQ]; + static struct one_memcpy_work sync[IPA_TEST_DMA_MT_TEST_NUM_WQ]; + char buff[IPA_TEST_DMA_WQ_NAME_BUFF_SZ]; + + memset(wq_sync, 0, sizeof(wq_sync)); + memset(wq_sync, 0, sizeof(wq_async)); + memset(async, 0, sizeof(async)); + memset(sync, 0, sizeof(sync)); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + atomic_set(&ipa_test_dma_mt_test_pass, 0); + for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) { + snprintf(buff, sizeof(buff), "ipa_test_dmaSwq%d", i); + wq_sync[i] = create_singlethread_workqueue(buff); + if (!wq_sync[i]) { + IPA_UT_ERR("failed to create sync wq#%d\n", i); + rc = -EFAULT; + goto fail_create_wq; + } + snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipa_test_dmaAwq%d", i); + wq_async[i] = create_singlethread_workqueue(buff); + if (!wq_async[i]) { + IPA_UT_ERR("failed to create async wq#%d\n", i); + rc = -EFAULT; + goto fail_create_wq; + } + + if (i % 2) { + sync[i].size = IPA_TEST_DMA_MEMCPY_BUFF_SIZE; + async[i].size = IPA_TEST_DMA_MEMCPY_BUFF_SIZE; + } else { + sync[i].size = 4; + async[i].size = 4; + } + INIT_WORK(&sync[i].work_s, ipa_test_dma_wrapper_test_one_sync); + queue_work(wq_sync[i], &sync[i].work_s); + INIT_WORK(&async[i].work_s, + ipa_test_dma_wrapper_test_one_async); + queue_work(wq_async[i], &async[i].work_s); + } + + for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) { + flush_workqueue(wq_sync[i]); + destroy_workqueue(wq_sync[i]); + flush_workqueue(wq_async[i]); + destroy_workqueue(wq_async[i]); + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + if ((2 * IPA_TEST_DMA_MT_TEST_NUM_WQ) != + atomic_read(&ipa_test_dma_mt_test_pass)) { + IPA_UT_LOG( + "Multi-threaded sync/async memcopy failed passed=%d\n" + , atomic_read(&ipa_test_dma_mt_test_pass)); + IPA_UT_TEST_FAIL_REPORT( + "Multi-threaded sync/async memcopy failed"); + return -EFAULT; + } + + return 0; + +fail_create_wq: + (void)ipa_dma_disable(); + for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) { + if (wq_sync[i]) + destroy_workqueue(wq_sync[i]); + if (wq_async[i]) + destroy_workqueue(wq_async[i]); + } + + return rc; +} + +/** + * TEST: Several parallel async memory copy iterations + * + * 1. create several user_data structures - one per iteration + * 2. allocate buffs. Give slice for each iteration + * 3. iterations of async mem copy + * 4. wait for all to complete + * 5. dma disable + */ +static int ipa_test_dma_parallel_async_memcpy_in_loop(void *priv) +{ + int rc; + struct ipa_test_dma_async_user_data *udata; + struct ipa_mem_buffer all_src_mem; + struct ipa_mem_buffer all_dest_mem; + int i; + bool is_fail = false; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + udata = kzalloc(IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM * + sizeof(struct ipa_test_dma_async_user_data), GFP_KERNEL); + if (!udata) { + IPA_UT_ERR("fail allocate user_data array\n"); + (void)ipa_dma_disable(); + return -ENOMEM; + } + + rc = ipa_test_dma_alloc_buffs(&all_src_mem, &all_dest_mem, + IPA_TEST_DMA_MEMCPY_BUFF_SIZE); + if (rc) { + IPA_UT_LOG("fail to alloc buffers\n"); + IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers"); + kfree(udata); + (void)ipa_dma_disable(); + return rc; + } + + for (i = 0 ; i < IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM ; i++) { + udata[i].src_mem.size = + IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM; + udata[i].src_mem.base = all_src_mem.base + i * + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + udata[i].src_mem.phys_base = all_src_mem.phys_base + i * + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + + udata[i].dest_mem.size = + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + udata[i].dest_mem.base = all_dest_mem.base + i * + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + udata[i].dest_mem.phys_base = all_dest_mem.phys_base + i * + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + + udata[i].call_serial_number = i + 1; + init_completion(&(udata[i].copy_done)); + rc = ipa_dma_async_memcpy(udata[i].dest_mem.phys_base, + udata[i].src_mem.phys_base, + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM), + ipa_test_dma_async_memcpy_cb_user_data, &udata[i]); + if (rc) { + IPA_UT_LOG("async memcpy initiation fail i=%d rc=%d\n", + i, rc); + is_fail = true; + } + } + + for (i = 0; i < IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM ; i++) + wait_for_completion(&udata[i].copy_done); + + ipa_test_dma_destroy_buffs(&all_src_mem, &all_dest_mem); + kfree(udata); + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + if (is_fail) { + IPA_UT_LOG("async memcopy failed\n"); + IPA_UT_TEST_FAIL_REPORT("async memcopy failed"); + return -EFAULT; + } + + return 0; +} + +/** + * TEST: Sync memory copy + * + * 1. dma enable + * 2. sync memcpy with max packet size + * 3. dma disable + */ +static int ipa_test_dma_sync_memcpy_max_pkt_size(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MAX_PKT_SIZE, false); + if (rc) { + IPA_UT_LOG("sync memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/* Suite definition block */ +IPA_UT_DEFINE_SUITE_START(dma, "DMA for GSI", + ipa_test_dma_setup, ipa_test_dma_teardown) +{ + IPA_UT_ADD_TEST(init_enable_disable_destroy, + "Init->Enable->Disable->Destroy", + ipa_test_dma_enable_disable, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(initx2_enable_disable_destroyx2, + "Initx2->Enable->Disable->Destroyx2", + ipa_test_dma_init_enbl_disable_destroy, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(init_enablex2_disablex2_destroy, + "Init->Enablex2->Disablex2->Destroy", + ipa_test_dma_enblx2_disablex2, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(memcpy_before_enable, + "Call memcpy before dma enable and expect it to fail", + ipa_test_dma_memcpy_before_enable, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(sync_memcpy, + "Sync memory copy", + ipa_test_dma_sync_memcpy, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(sync_memcpy_small, + "Small Sync memory copy", + ipa_test_dma_sync_memcpy_small, + true, IPA_HW_v3_5, IPA_HW_MAX), + IPA_UT_ADD_TEST(async_memcpy, + "Async memory copy", + ipa_test_dma_async_memcpy, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(async_memcpy_small, + "Small async memory copy", + ipa_test_dma_async_memcpy_small, + true, IPA_HW_v3_5, IPA_HW_MAX), + IPA_UT_ADD_TEST(sync_memcpy_in_loop, + "Several sync memory copy iterations", + ipa_test_dma_sync_memcpy_in_loop, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(async_memcpy_in_loop, + "Several async memory copy iterations", + ipa_test_dma_async_memcpy_in_loop, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(interleaved_sync_async_memcpy_in_loop, + "Several interleaved sync and async memory copy iterations", + ipa_test_dma_interleaved_sync_async_memcpy_in_loop, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(multi_threaded_multiple_sync_async_memcpy, + "Several multi-threaded sync and async memory copy iterations", + ipa_test_dma_mt_sync_async, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(parallel_async_memcpy_in_loop, + "Several parallel async memory copy iterations", + ipa_test_dma_parallel_async_memcpy_in_loop, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(sync_memcpy_max_pkt_size, + "Sync memory copy with max packet size", + ipa_test_dma_sync_memcpy_max_pkt_size, + true, IPA_HW_v3_0, IPA_HW_MAX), +} IPA_UT_DEFINE_SUITE_END(dma); diff --git a/drivers/platform/msm/ipa/test/ipa_test_example.c b/drivers/platform/msm/ipa/test/ipa_test_example.c new file mode 100644 index 000000000000..ebbe666e7c8a --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_test_example.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#include "ipa_ut_framework.h" + +/** + * Example IPA Unit-test suite + * To be a reference for writing new suites and tests. + * This suite is also used as unit-test for the testing framework itself. + * Structure: + * 1- Define the setup and teardown functions + * Not Mandatory. Null may be used as well + * 2- For each test, define its Run() function + * 3- Use IPA_UT_DEFINE_SUITE_START() to start defining the suite + * 4- use IPA_UT_ADD_TEST() for adding tests within + * the suite definition block + * 5- IPA_UT_DEFINE_SUITE_END() close the suite definition + */ + +static int ipa_test_example_dummy; + +static int ipa_test_example_suite_setup(void **ppriv) +{ + IPA_UT_DBG("Start Setup - set 0x1234F\n"); + + ipa_test_example_dummy = 0x1234F; + *ppriv = (void *)&ipa_test_example_dummy; + + return 0; +} + +static int ipa_test_example_teardown(void *priv) +{ + IPA_UT_DBG("Start Teardown\n"); + IPA_UT_DBG("priv=0x%pK - value=0x%x\n", priv, *((int *)priv)); + + return 0; +} + +static int ipa_test_example_test1(void *priv) +{ + IPA_UT_LOG("priv=0x%pK - value=0x%x\n", priv, *((int *)priv)); + ipa_test_example_dummy++; + + return 0; +} + +static int ipa_test_example_test2(void *priv) +{ + IPA_UT_LOG("priv=0x%pK - value=0x%x\n", priv, *((int *)priv)); + ipa_test_example_dummy++; + + return 0; +} + +static int ipa_test_example_test3(void *priv) +{ + IPA_UT_LOG("priv=0x%pK - value=0x%x\n", priv, *((int *)priv)); + ipa_test_example_dummy++; + + return 0; +} + +static int ipa_test_example_test4(void *priv) +{ + IPA_UT_LOG("priv=0x%pK - value=0x%x\n", priv, *((int *)priv)); + ipa_test_example_dummy++; + + IPA_UT_TEST_FAIL_REPORT("failed on test"); + + return -EFAULT; +} + +/* Suite definition block */ +IPA_UT_DEFINE_SUITE_START(example, "Example suite", + ipa_test_example_suite_setup, ipa_test_example_teardown) +{ + IPA_UT_ADD_TEST(test1, "This is test number 1", + ipa_test_example_test1, false, IPA_HW_v1_0, IPA_HW_MAX), + + IPA_UT_ADD_TEST(test2, "This is test number 2", + ipa_test_example_test2, false, IPA_HW_v1_0, IPA_HW_MAX), + + IPA_UT_ADD_TEST(test3, "This is test number 3", + ipa_test_example_test3, false, IPA_HW_v1_1, IPA_HW_v2_6), + + IPA_UT_ADD_TEST(test4, "This is test number 4", + ipa_test_example_test4, false, IPA_HW_v1_1, IPA_HW_MAX), + +} IPA_UT_DEFINE_SUITE_END(example); diff --git a/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c new file mode 100644 index 000000000000..b5603e18169b --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#include "ipa_ut_framework.h" +#include + +struct ipa_test_hw_stats_ctx { + u32 odu_prod_hdl; + u32 odu_cons_hdl; + u32 rt4_usb; + u32 rt6_usb; + u32 rt4_odu_cons; + u32 rt6_odu_cons; + atomic_t odu_pending; +}; + +static struct ipa_test_hw_stats_ctx *ctx; + +static int ipa_test_hw_stats_suite_setup(void **ppriv) +{ + IPA_UT_DBG("Start Setup\n"); + + if (!ctx) + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + + return 0; +} + +static int ipa_test_hw_stats_suite_teardown(void *priv) +{ + IPA_UT_DBG("Start Teardown\n"); + + return 0; +} + +static void odu_prod_notify(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + + switch (evt) { + case IPA_RECEIVE: + dev_kfree_skb_any(skb); + break; + case IPA_WRITE_DONE: + atomic_dec(&ctx->odu_pending); + dev_kfree_skb_any(skb); + break; + default: + IPA_UT_ERR("unexpected evt %d\n", evt); + } +} +static void odu_cons_notify(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + int ret; + + switch (evt) { + case IPA_RECEIVE: + if (atomic_read(&ctx->odu_pending) >= 64) + msleep(20); + atomic_inc(&ctx->odu_pending); + skb_put(skb, 100); + ret = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, NULL); + while (ret) { + msleep(100); + ret = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, NULL); + } + break; + case IPA_WRITE_DONE: + dev_kfree_skb_any(skb); + break; + default: + IPA_UT_ERR("unexpected evt %d\n", evt); + } +} + +static int ipa_test_hw_stats_configure(void *priv) +{ + struct ipa_sys_connect_params odu_prod_params; + struct ipa_sys_connect_params odu_emb_cons_params; + int res; + + /* first connect all additional pipe */ + memset(&odu_prod_params, 0, sizeof(odu_prod_params)); + memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params)); + + odu_prod_params.client = IPA_CLIENT_ODU_PROD; + odu_prod_params.desc_fifo_sz = 0x1000; + odu_prod_params.priv = NULL; + odu_prod_params.notify = odu_prod_notify; + res = ipa_setup_sys_pipe(&odu_prod_params, + &ctx->odu_prod_hdl); + if (res) { + IPA_UT_ERR("fail to setup sys pipe ODU_PROD %d\n", res); + return res; + } + + odu_emb_cons_params.client = IPA_CLIENT_ODU_EMB_CONS; + odu_emb_cons_params.desc_fifo_sz = 0x1000; + odu_emb_cons_params.priv = NULL; + odu_emb_cons_params.notify = odu_cons_notify; + res = ipa_setup_sys_pipe(&odu_emb_cons_params, + &ctx->odu_cons_hdl); + if (res) { + IPA_UT_ERR("fail to setup sys pipe ODU_EMB_CONS %d\n", res); + ipa_teardown_sys_pipe(ctx->odu_prod_hdl); + return res; + } + + IPA_UT_INFO("Configured. Please connect USB RNDIS now\n"); + + return 0; +} + +static int ipa_test_hw_stats_add_FnR(void *priv) +{ + struct ipa_ioc_add_rt_rule *rt_rule; + struct ipa_ioc_add_flt_rule *flt_rule; + struct ipa_ioc_get_rt_tbl rt_lookup; + int ret; + + rt_rule = kzalloc(sizeof(*rt_rule) + 1 * sizeof(struct ipa_rt_rule_add), + GFP_KERNEL); + if (!rt_rule) { + IPA_UT_DBG("no mem\n"); + return -ENOMEM; + } + + flt_rule = kzalloc(sizeof(*flt_rule) + + 1 * sizeof(struct ipa_flt_rule_add), GFP_KERNEL); + if (!flt_rule) { + IPA_UT_DBG("no mem\n"); + ret = -ENOMEM; + goto free_rt; + } + + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v4; + rt_lookup.ip = rt_rule->ip; + strlcpy(rt_rule->rt_tbl_name, "V4_RT_TO_USB_CONS", + IPA_RESOURCE_NAME_MAX); + strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX); + rt_rule->num_rules = 1; + rt_rule->rules[0].rule.dst = IPA_CLIENT_USB_CONS; + rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + rt_rule->rules[0].rule.attrib.dst_port = 5002; + rt_rule->rules[0].rule.hashable = true; + if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + if (ipa_get_rt_tbl(&rt_lookup)) { + IPA_UT_ERR("failed to query V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + ctx->rt4_usb = rt_lookup.hdl; + + memset(rt_rule, 0, sizeof(*rt_rule)); + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v6; + rt_lookup.ip = rt_rule->ip; + strlcpy(rt_rule->rt_tbl_name, "V6_RT_TO_USB_CONS", + IPA_RESOURCE_NAME_MAX); + strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX); + rt_rule->num_rules = 1; + rt_rule->rules[0].rule.dst = IPA_CLIENT_USB_CONS; + rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + rt_rule->rules[0].rule.attrib.dst_port = 5002; + rt_rule->rules[0].rule.hashable = true; + if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + if (ipa_get_rt_tbl(&rt_lookup)) { + IPA_UT_ERR("failed to query V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + ctx->rt6_usb = rt_lookup.hdl; + + memset(rt_rule, 0, sizeof(*rt_rule)); + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v4; + rt_lookup.ip = rt_rule->ip; + strlcpy(rt_rule->rt_tbl_name, "V4_RT_TO_ODU_CONS", + IPA_RESOURCE_NAME_MAX); + strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX); + rt_rule->num_rules = 1; + rt_rule->rules[0].rule.dst = IPA_CLIENT_ODU_EMB_CONS; + rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + rt_rule->rules[0].rule.attrib.dst_port = 5002; + rt_rule->rules[0].rule.hashable = true; + if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + if (ipa_get_rt_tbl(&rt_lookup)) { + IPA_UT_ERR("failed to query V4 rules\n"); + return -EFAULT; + } + ctx->rt4_odu_cons = rt_lookup.hdl; + + memset(rt_rule, 0, sizeof(*rt_rule)); + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v6; + rt_lookup.ip = rt_rule->ip; + strlcpy(rt_rule->rt_tbl_name, "V6_RT_TO_ODU_CONS", + IPA_RESOURCE_NAME_MAX); + strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX); + rt_rule->num_rules = 1; + rt_rule->rules[0].rule.dst = IPA_CLIENT_ODU_EMB_CONS; + rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + rt_rule->rules[0].rule.attrib.dst_port = 5002; + rt_rule->rules[0].rule.hashable = true; + if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + if (ipa_get_rt_tbl(&rt_lookup)) { + IPA_UT_ERR("failed to query V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + ctx->rt6_odu_cons = rt_lookup.hdl; + + flt_rule->commit = 1; + flt_rule->ip = IPA_IP_v4; + flt_rule->ep = IPA_CLIENT_USB_PROD; + flt_rule->num_rules = 1; + flt_rule->rules[0].at_rear = 1; + flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING; + flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + flt_rule->rules[0].rule.attrib.dst_port = 5002; + flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt4_odu_cons; + flt_rule->rules[0].rule.hashable = 1; + if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + + memset(flt_rule, 0, sizeof(*flt_rule)); + flt_rule->commit = 1; + flt_rule->ip = IPA_IP_v6; + flt_rule->ep = IPA_CLIENT_USB_PROD; + flt_rule->num_rules = 1; + flt_rule->rules[0].at_rear = 1; + flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING; + flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + flt_rule->rules[0].rule.attrib.dst_port = 5002; + flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt6_odu_cons; + flt_rule->rules[0].rule.hashable = 1; + if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V6 rules\n"); + ret = -EFAULT; + goto free_flt; + } + + memset(flt_rule, 0, sizeof(*flt_rule)); + flt_rule->commit = 1; + flt_rule->ip = IPA_IP_v4; + flt_rule->ep = IPA_CLIENT_ODU_PROD; + flt_rule->num_rules = 1; + flt_rule->rules[0].at_rear = 1; + flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING; + flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + flt_rule->rules[0].rule.attrib.dst_port = 5002; + flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt4_usb; + flt_rule->rules[0].rule.hashable = 1; + if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + + memset(flt_rule, 0, sizeof(*flt_rule)); + flt_rule->commit = 1; + flt_rule->ip = IPA_IP_v6; + flt_rule->ep = IPA_CLIENT_ODU_PROD; + flt_rule->num_rules = 1; + flt_rule->rules[0].at_rear = 1; + flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING; + flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + flt_rule->rules[0].rule.attrib.dst_port = 5002; + flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt6_usb; + flt_rule->rules[0].rule.hashable = 1; + if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V6 rules\n"); + ret = -EFAULT; + goto free_flt; + } + + IPA_UT_INFO( + "Rules added. Please start data transfer on ports 5001/5002\n"); + ret = 0; +free_flt: + kfree(flt_rule); +free_rt: + kfree(rt_rule); + return ret; + +} + +/* Suite definition block */ +IPA_UT_DEFINE_SUITE_START(hw_stats, "HW stats test", + ipa_test_hw_stats_suite_setup, ipa_test_hw_stats_suite_teardown) +{ + IPA_UT_ADD_TEST(configure, "Configure the setup", + ipa_test_hw_stats_configure, false, IPA_HW_v4_0, IPA_HW_MAX), + + IPA_UT_ADD_TEST(add_rules, "Add FLT and RT rules", + ipa_test_hw_stats_add_FnR, false, IPA_HW_v4_0, IPA_HW_MAX), + +} IPA_UT_DEFINE_SUITE_END(hw_stats); diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c new file mode 100644 index 000000000000..337472dceac2 --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c @@ -0,0 +1,3326 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include "../ipa_v3/ipa_i.h" +#include "../../gsi/gsi.h" +#include "../../gsi/gsi_reg.h" +#include "ipa_ut_framework.h" + +#define IPA_MHI_TEST_NUM_CHANNELS 8 +#define IPA_MHI_TEST_NUM_EVENT_RINGS 8 +#define IPA_MHI_TEST_FIRST_CHANNEL_ID 100 +#define IPA_MHI_TEST_FIRST_EVENT_RING_ID 100 +#define IPA_MHI_TEST_LAST_CHANNEL_ID \ + (IPA_MHI_TEST_FIRST_CHANNEL_ID + IPA_MHI_TEST_NUM_CHANNELS - 1) +#define IPA_MHI_TEST_LAST_EVENT_RING_ID \ + (IPA_MHI_TEST_FIRST_EVENT_RING_ID + IPA_MHI_TEST_NUM_EVENT_RINGS - 1) +#define IPA_MHI_TEST_MAX_DATA_BUF_SIZE 1500 +#define IPA_MHI_TEST_SEQ_TYPE_DMA 0x00000000 + +#define IPA_MHI_TEST_LOOP_NUM 5 +#define IPA_MHI_RUN_TEST_UNIT_IN_LOOP(test_unit, rc, args...) \ + do { \ + int __i; \ + for (__i = 0; __i < IPA_MHI_TEST_LOOP_NUM; __i++) { \ + IPA_UT_LOG(#test_unit " START iter %d\n", __i); \ + rc = test_unit(args); \ + if (!rc) \ + continue; \ + IPA_UT_LOG(#test_unit " failed %d\n", rc); \ + break; \ + } \ + } while (0) + +/** + * check for MSI interrupt for one or both channels: + * OUT channel MSI my be missed as it + * will be overwritten by the IN channel MSI + */ +#define IPA_MHI_TEST_CHECK_MSI_INTR(__both, __timeout) \ + do { \ + int i; \ + for (i = 0; i < 20; i++) { \ + if (*((u32 *)test_mhi_ctx->msi.base) == \ + (0x10000000 | \ + (IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1))) { \ + __timeout = false; \ + break; \ + } \ + if (__both && (*((u32 *)test_mhi_ctx->msi.base) == \ + (0x10000000 | \ + (IPA_MHI_TEST_FIRST_EVENT_RING_ID)))) { \ + /* sleep to be sure IN MSI is generated */ \ + msleep(20); \ + __timeout = false; \ + break; \ + } \ + msleep(20); \ + } \ + } while (0) + +static DECLARE_COMPLETION(mhi_test_ready_comp); +static DECLARE_COMPLETION(mhi_test_wakeup_comp); + +/** + * enum ipa_mhi_ring_elements_type - MHI ring elements types. + */ +enum ipa_mhi_ring_elements_type { + IPA_MHI_RING_ELEMENT_NO_OP = 1, + IPA_MHI_RING_ELEMENT_TRANSFER = 2 +}; + +/** + * enum ipa_mhi_channel_direction - MHI channel directions + */ +enum ipa_mhi_channel_direction { + IPA_MHI_OUT_CHAHNNEL = 1, + IPA_MHI_IN_CHAHNNEL = 2, +}; + +/** + * struct ipa_mhi_channel_context_array - MHI Channel context array entry + * + * mapping is taken from MHI spec + */ +struct ipa_mhi_channel_context_array { + u32 chstate:8; /*0-7*/ + u32 brsmode:2; /*8-9*/ + u32 pollcfg:6; /*10-15*/ + u32 reserved:16; /*16-31*/ + u32 chtype; /*channel type (inbound/outbound)*/ + u32 erindex; /*event ring index*/ + u64 rbase; /*ring base address in the host addr spc*/ + u64 rlen; /*ring length in bytes*/ + u64 rp; /*read pointer in the host system addr spc*/ + u64 wp; /*write pointer in the host system addr spc*/ +} __packed; + +/** + * struct ipa_mhi_event_context_array - MGI event ring context array entry + * + * mapping is taken from MHI spec + */ +struct ipa_mhi_event_context_array { + u16 intmodc; + u16 intmodt;/* Interrupt moderation timer (in microseconds) */ + u32 ertype; + u32 msivec; /* MSI vector for interrupt (MSI data)*/ + u64 rbase; /* ring base address in host address space*/ + u64 rlen; /* ring length in bytes*/ + u64 rp; /* read pointer in the host system address space*/ + u64 wp; /* write pointer in the host system address space*/ +} __packed; + +/** + * + * struct ipa_mhi_mmio_register_set - MHI configuration registers, + * control registers, status registers, pointers to doorbell arrays, + * pointers to channel and event context arrays. + * + * The structure is defined in mhi spec (register names are taken from there). + * Only values accessed by HWP or test are documented + */ +struct ipa_mhi_mmio_register_set { + u32 mhireglen; + u32 reserved_08_04; + u32 mhiver; + u32 reserved_10_0c; + struct mhicfg { + u8 nch; + u8 reserved_15_8; + u8 ner; + u8 reserved_31_23; + } __packed mhicfg; + + u32 reserved_18_14; + u32 chdboff; + u32 reserved_20_1C; + u32 erdboff; + u32 reserved_28_24; + u32 bhioff; + u32 reserved_30_2C; + u32 debugoff; + u32 reserved_38_34; + + struct mhictrl { + u32 rs : 1; + u32 reset : 1; + u32 reserved_7_2 : 6; + u32 mhistate : 8; + u32 reserved_31_16 : 16; + } __packed mhictrl; + + u64 reserved_40_3c; + u32 reserved_44_40; + + struct mhistatus { + u32 ready : 1; + u32 reserved_3_2 : 1; + u32 syserr : 1; + u32 reserved_7_3 : 5; + u32 mhistate : 8; + u32 reserved_31_16 : 16; + } __packed mhistatus; + + /** + * Register is not accessed by HWP. + * In test register carries the handle for + * the buffer of channel context array + */ + u32 reserved_50_4c; + + u32 mhierror; + + /** + * Register is not accessed by HWP. + * In test register carries the handle for + * the buffer of event ring context array + */ + u32 reserved_58_54; + + /** + * 64-bit pointer to the channel context array in the host memory space + * host sets the pointer to the channel context array during + * initialization. + */ + u64 ccabap; + /** + * 64-bit pointer to the event context array in the host memory space + * host sets the pointer to the event context array during + * initialization + */ + u64 ecabap; + /** + * Register is not accessed by HWP. + * In test register carries the pointer of virtual address + * for the buffer of channel context array + */ + u64 crcbap; + /** + * Register is not accessed by HWP. + * In test register carries the pointer of virtual address + * for the buffer of event ring context array + */ + u64 crdb; + + u64 reserved_80_78; + + struct mhiaddr { + /** + * Base address (64-bit) of the memory region in + * the host address space where the MHI control + * data structures are allocated by the host, + * including channel context array, event context array, + * and rings. + * The device uses this information to set up its internal + * address translation tables. + * value must be aligned to 4 Kbytes. + */ + u64 mhicrtlbase; + /** + * Upper limit address (64-bit) of the memory region in + * the host address space where the MHI control + * data structures are allocated by the host. + * The device uses this information to setup its internal + * address translation tables. + * The most significant 32 bits of MHICTRLBASE and + * MHICTRLLIMIT registers must be equal. + */ + u64 mhictrllimit; + u64 reserved_18_10; + /** + * Base address (64-bit) of the memory region in + * the host address space where the MHI data buffers + * are allocated by the host. + * The device uses this information to setup its + * internal address translation tables. + * value must be aligned to 4 Kbytes. + */ + u64 mhidatabase; + /** + * Upper limit address (64-bit) of the memory region in + * the host address space where the MHI data buffers + * are allocated by the host. + * The device uses this information to setup its + * internal address translation tables. + * The most significant 32 bits of MHIDATABASE and + * MHIDATALIMIT registers must be equal. + */ + u64 mhidatalimit; + u64 reserved_30_28; + } __packed mhiaddr; + +} __packed; + +/** + * struct ipa_mhi_event_ring_element - MHI Event ring element + * + * mapping is taken from MHI spec + */ +struct ipa_mhi_event_ring_element { + /** + * pointer to ring element that generated event in + * the host system address space + */ + u64 ptr; + union { + struct { + u32 len : 24; + u32 code : 8; + } __packed bits; + u32 dword; + } __packed dword_8; + u16 reserved; + u8 type; + u8 chid; +} __packed; + +/** + * struct ipa_mhi_transfer_ring_element - MHI Transfer ring element + * + * mapping is taken from MHI spec + */ +struct ipa_mhi_transfer_ring_element { + u64 ptr; /*pointer to buffer in the host system address space*/ + u16 len; /*transaction length in bytes*/ + u16 reserved0; + union { + struct { + u16 chain : 1; + u16 reserved_7_1 : 7; + u16 ieob : 1; + u16 ieot : 1; + u16 bei : 1; + u16 reserved_15_11 : 5; + } __packed bits; + u16 word; + } __packed word_C; + u8 type; + u8 reserved1; +} __packed; + +/** + * struct ipa_test_mhi_context - MHI test context + */ +struct ipa_test_mhi_context { + void __iomem *gsi_mmio; + struct ipa_mem_buffer msi; + struct ipa_mem_buffer ch_ctx_array; + struct ipa_mem_buffer ev_ctx_array; + struct ipa_mem_buffer mmio_buf; + struct ipa_mem_buffer xfer_ring_bufs[IPA_MHI_TEST_NUM_CHANNELS]; + struct ipa_mem_buffer ev_ring_bufs[IPA_MHI_TEST_NUM_EVENT_RINGS]; + struct ipa_mem_buffer in_buffer; + struct ipa_mem_buffer out_buffer; + u32 prod_hdl; + u32 cons_hdl; + u32 test_prod_hdl; + phys_addr_t transport_phys_addr; + unsigned long transport_size; +}; + +static struct ipa_test_mhi_context *test_mhi_ctx; + +static void ipa_mhi_test_cb(void *priv, + enum ipa_mhi_event_type event, unsigned long data) +{ + IPA_UT_DBG("Entry\n"); + + if (event == IPA_MHI_EVENT_DATA_AVAILABLE) + complete_all(&mhi_test_wakeup_comp); + else if (event == IPA_MHI_EVENT_READY) + complete_all(&mhi_test_ready_comp); + else + WARN_ON(1); +} + +static void ipa_test_mhi_free_mmio_space(void) +{ + IPA_UT_DBG("Entry\n"); + + if (!test_mhi_ctx) + return; + + dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->mmio_buf.size, + test_mhi_ctx->mmio_buf.base, + test_mhi_ctx->mmio_buf.phys_base); + + dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->ev_ctx_array.size, + test_mhi_ctx->ev_ctx_array.base, + test_mhi_ctx->ev_ctx_array.phys_base); + + dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->ch_ctx_array.size, + test_mhi_ctx->ch_ctx_array.base, + test_mhi_ctx->ch_ctx_array.phys_base); + + dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->msi.size, + test_mhi_ctx->msi.base, test_mhi_ctx->msi.phys_base); +} + +static int ipa_test_mhi_alloc_mmio_space(void) +{ + int rc = 0; + struct ipa_mem_buffer *msi; + struct ipa_mem_buffer *ch_ctx_array; + struct ipa_mem_buffer *ev_ctx_array; + struct ipa_mem_buffer *mmio_buf; + struct ipa_mhi_mmio_register_set *p_mmio; + + IPA_UT_DBG("Entry\n"); + + msi = &test_mhi_ctx->msi; + ch_ctx_array = &test_mhi_ctx->ch_ctx_array; + ev_ctx_array = &test_mhi_ctx->ev_ctx_array; + mmio_buf = &test_mhi_ctx->mmio_buf; + + /* Allocate MSI */ + msi->size = 4; + msi->base = dma_alloc_coherent(ipa3_ctx->pdev, msi->size, + &msi->phys_base, GFP_KERNEL); + if (!msi->base) { + IPA_UT_ERR("no mem for msi\n"); + return -ENOMEM; + } + + IPA_UT_DBG("msi: base 0x%pK phys_addr 0x%pad size %d\n", + msi->base, &msi->phys_base, msi->size); + + /* allocate buffer for channel context */ + ch_ctx_array->size = sizeof(struct ipa_mhi_channel_context_array) * + IPA_MHI_TEST_NUM_CHANNELS; + ch_ctx_array->base = dma_alloc_coherent(ipa3_ctx->pdev, + ch_ctx_array->size, &ch_ctx_array->phys_base, GFP_KERNEL); + if (!ch_ctx_array->base) { + IPA_UT_ERR("no mem for ch ctx array\n"); + rc = -ENOMEM; + goto fail_free_msi; + } + IPA_UT_DBG("channel ctx array: base 0x%pK phys_addr %pad size %d\n", + ch_ctx_array->base, &ch_ctx_array->phys_base, + ch_ctx_array->size); + + /* allocate buffer for event context */ + ev_ctx_array->size = sizeof(struct ipa_mhi_event_context_array) * + IPA_MHI_TEST_NUM_EVENT_RINGS; + ev_ctx_array->base = dma_alloc_coherent(ipa3_ctx->pdev, + ev_ctx_array->size, &ev_ctx_array->phys_base, GFP_KERNEL); + if (!ev_ctx_array->base) { + IPA_UT_ERR("no mem for ev ctx array\n"); + rc = -ENOMEM; + goto fail_free_ch_ctx_arr; + } + IPA_UT_DBG("event ctx array: base 0x%pK phys_addr %pad size %d\n", + ev_ctx_array->base, &ev_ctx_array->phys_base, + ev_ctx_array->size); + + /* allocate buffer for mmio */ + mmio_buf->size = sizeof(struct ipa_mhi_mmio_register_set); + mmio_buf->base = dma_alloc_coherent(ipa3_ctx->pdev, mmio_buf->size, + &mmio_buf->phys_base, GFP_KERNEL); + if (!mmio_buf->base) { + IPA_UT_ERR("no mem for mmio buf\n"); + rc = -ENOMEM; + goto fail_free_ev_ctx_arr; + } + IPA_UT_DBG("mmio buffer: base 0x%pK phys_addr %pad size %d\n", + mmio_buf->base, &mmio_buf->phys_base, mmio_buf->size); + + /* initlize table */ + p_mmio = (struct ipa_mhi_mmio_register_set *)mmio_buf->base; + + /** + * 64-bit pointer to the channel context array in the host memory space; + * Host sets the pointer to the channel context array + * during initialization. + */ + p_mmio->ccabap = (u32)ch_ctx_array->phys_base - + (IPA_MHI_TEST_FIRST_CHANNEL_ID * + sizeof(struct ipa_mhi_channel_context_array)); + IPA_UT_DBG("pMmio->ccabap 0x%llx\n", p_mmio->ccabap); + + /** + * 64-bit pointer to the event context array in the host memory space; + * Host sets the pointer to the event context array + * during initialization + */ + p_mmio->ecabap = (u32)ev_ctx_array->phys_base - + (IPA_MHI_TEST_FIRST_EVENT_RING_ID * + sizeof(struct ipa_mhi_event_context_array)); + IPA_UT_DBG("pMmio->ecabap 0x%llx\n", p_mmio->ecabap); + + /** + * Register is not accessed by HWP. + * In test register carries the pointer of + * virtual address for the buffer of channel context array + */ + p_mmio->crcbap = (unsigned long)ch_ctx_array->base; + + /** + * Register is not accessed by HWP. + * In test register carries the pointer of + * virtual address for the buffer of channel context array + */ + p_mmio->crdb = (unsigned long)ev_ctx_array->base; + + /* test is running only on device. no need to translate addresses */ + p_mmio->mhiaddr.mhicrtlbase = 0x04; + p_mmio->mhiaddr.mhictrllimit = 0xFFFFFFFF; + p_mmio->mhiaddr.mhidatabase = 0x04; + p_mmio->mhiaddr.mhidatalimit = 0xFFFFFFFF; + + return rc; + +fail_free_ev_ctx_arr: + dma_free_coherent(ipa3_ctx->pdev, ev_ctx_array->size, + ev_ctx_array->base, ev_ctx_array->phys_base); + ev_ctx_array->base = NULL; +fail_free_ch_ctx_arr: + dma_free_coherent(ipa3_ctx->pdev, ch_ctx_array->size, + ch_ctx_array->base, ch_ctx_array->phys_base); + ch_ctx_array->base = NULL; +fail_free_msi: + dma_free_coherent(ipa3_ctx->pdev, msi->size, msi->base, + msi->phys_base); + msi->base = NULL; + return rc; +} + +static void ipa_mhi_test_destroy_channel_context( + struct ipa_mem_buffer transfer_ring_bufs[], + struct ipa_mem_buffer event_ring_bufs[], + u8 channel_id, + u8 event_ring_id) +{ + u32 ev_ring_idx; + u32 ch_idx; + + IPA_UT_DBG("Entry\n"); + + if ((channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) || + (channel_id > IPA_MHI_TEST_LAST_CHANNEL_ID)) { + IPA_UT_ERR("channal_id invalid %d\n", channel_id); + return; + } + + if ((event_ring_id < IPA_MHI_TEST_FIRST_EVENT_RING_ID) || + (event_ring_id > IPA_MHI_TEST_LAST_EVENT_RING_ID)) { + IPA_UT_ERR("event_ring_id invalid %d\n", event_ring_id); + return; + } + + ch_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID; + ev_ring_idx = event_ring_id - IPA_MHI_TEST_FIRST_EVENT_RING_ID; + + if (transfer_ring_bufs[ch_idx].base) { + dma_free_coherent(ipa3_ctx->pdev, + transfer_ring_bufs[ch_idx].size, + transfer_ring_bufs[ch_idx].base, + transfer_ring_bufs[ch_idx].phys_base); + transfer_ring_bufs[ch_idx].base = NULL; + } + + if (event_ring_bufs[ev_ring_idx].base) { + dma_free_coherent(ipa3_ctx->pdev, + event_ring_bufs[ev_ring_idx].size, + event_ring_bufs[ev_ring_idx].base, + event_ring_bufs[ev_ring_idx].phys_base); + event_ring_bufs[ev_ring_idx].base = NULL; + } +} + +static int ipa_mhi_test_config_channel_context( + struct ipa_mem_buffer *mmio, + struct ipa_mem_buffer transfer_ring_bufs[], + struct ipa_mem_buffer event_ring_bufs[], + u8 channel_id, + u8 event_ring_id, + u16 transfer_ring_size, + u16 event_ring_size, + u8 ch_type) +{ + struct ipa_mhi_mmio_register_set *p_mmio; + struct ipa_mhi_channel_context_array *p_channels; + struct ipa_mhi_event_context_array *p_events; + u32 ev_ring_idx; + u32 ch_idx; + + IPA_UT_DBG("Entry\n"); + + if ((channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) || + (channel_id > IPA_MHI_TEST_LAST_CHANNEL_ID)) { + IPA_UT_DBG("channal_id invalid %d\n", channel_id); + return -EFAULT; + } + + if ((event_ring_id < IPA_MHI_TEST_FIRST_EVENT_RING_ID) || + (event_ring_id > IPA_MHI_TEST_LAST_EVENT_RING_ID)) { + IPA_UT_DBG("event_ring_id invalid %d\n", event_ring_id); + return -EFAULT; + } + + p_mmio = (struct ipa_mhi_mmio_register_set *)mmio->base; + p_channels = + (struct ipa_mhi_channel_context_array *) + ((unsigned long)p_mmio->crcbap); + p_events = (struct ipa_mhi_event_context_array *) + ((unsigned long)p_mmio->crdb); + + IPA_UT_DBG("p_mmio: %pK p_channels: %pK p_events: %pK\n", + p_mmio, p_channels, p_events); + + ch_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID; + ev_ring_idx = event_ring_id - IPA_MHI_TEST_FIRST_EVENT_RING_ID; + + IPA_UT_DBG("ch_idx: %u ev_ring_idx: %u\n", ch_idx, ev_ring_idx); + if (transfer_ring_bufs[ch_idx].base) { + IPA_UT_ERR("ChannelId %d is already allocated\n", channel_id); + return -EFAULT; + } + + /* allocate and init event ring if needed */ + if (!event_ring_bufs[ev_ring_idx].base) { + IPA_UT_LOG("Configuring event ring...\n"); + event_ring_bufs[ev_ring_idx].size = + event_ring_size * + sizeof(struct ipa_mhi_event_ring_element); + event_ring_bufs[ev_ring_idx].base = + dma_alloc_coherent(ipa3_ctx->pdev, + event_ring_bufs[ev_ring_idx].size, + &event_ring_bufs[ev_ring_idx].phys_base, + GFP_KERNEL); + if (!event_ring_bufs[ev_ring_idx].base) { + IPA_UT_ERR("no mem for ev ring buf\n"); + return -ENOMEM; + } + p_events[ev_ring_idx].intmodc = 1; + p_events[ev_ring_idx].intmodt = 0; + p_events[ev_ring_idx].msivec = event_ring_id; + p_events[ev_ring_idx].rbase = + (u32)event_ring_bufs[ev_ring_idx].phys_base; + p_events[ev_ring_idx].rlen = + event_ring_bufs[ev_ring_idx].size; + p_events[ev_ring_idx].rp = + (u32)event_ring_bufs[ev_ring_idx].phys_base; + p_events[ev_ring_idx].wp = + (u32)event_ring_bufs[ev_ring_idx].phys_base + + event_ring_bufs[ev_ring_idx].size - 16; + } else { + IPA_UT_LOG("Skip configuring event ring - already done\n"); + } + + transfer_ring_bufs[ch_idx].size = + transfer_ring_size * + sizeof(struct ipa_mhi_transfer_ring_element); + transfer_ring_bufs[ch_idx].base = + dma_alloc_coherent(ipa3_ctx->pdev, + transfer_ring_bufs[ch_idx].size, + &transfer_ring_bufs[ch_idx].phys_base, + GFP_KERNEL); + if (!transfer_ring_bufs[ch_idx].base) { + IPA_UT_ERR("no mem for xfer ring buf\n"); + dma_free_coherent(ipa3_ctx->pdev, + event_ring_bufs[ev_ring_idx].size, + event_ring_bufs[ev_ring_idx].base, + event_ring_bufs[ev_ring_idx].phys_base); + event_ring_bufs[ev_ring_idx].base = NULL; + return -ENOMEM; + } + + p_channels[ch_idx].erindex = event_ring_id; + p_channels[ch_idx].rbase = (u32)transfer_ring_bufs[ch_idx].phys_base; + p_channels[ch_idx].rlen = transfer_ring_bufs[ch_idx].size; + p_channels[ch_idx].rp = (u32)transfer_ring_bufs[ch_idx].phys_base; + p_channels[ch_idx].wp = (u32)transfer_ring_bufs[ch_idx].phys_base; + p_channels[ch_idx].chtype = ch_type; + p_channels[ch_idx].brsmode = IPA_MHI_BURST_MODE_DEFAULT; + p_channels[ch_idx].pollcfg = 0; + + return 0; +} + +static void ipa_mhi_test_destroy_data_structures(void) +{ + IPA_UT_DBG("Entry\n"); + + /* Destroy OUT data buffer */ + if (test_mhi_ctx->out_buffer.base) { + dma_free_coherent(ipa3_ctx->pdev, + test_mhi_ctx->out_buffer.size, + test_mhi_ctx->out_buffer.base, + test_mhi_ctx->out_buffer.phys_base); + test_mhi_ctx->out_buffer.base = NULL; + } + + /* Destroy IN data buffer */ + if (test_mhi_ctx->in_buffer.base) { + dma_free_coherent(ipa3_ctx->pdev, + test_mhi_ctx->in_buffer.size, + test_mhi_ctx->in_buffer.base, + test_mhi_ctx->in_buffer.phys_base); + test_mhi_ctx->in_buffer.base = NULL; + } + + /* Destroy IN channel ctx */ + ipa_mhi_test_destroy_channel_context( + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1); + + /* Destroy OUT channel ctx */ + ipa_mhi_test_destroy_channel_context( + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + IPA_MHI_TEST_FIRST_EVENT_RING_ID); +} + +static int ipa_mhi_test_setup_data_structures(void) +{ + int rc = 0; + + IPA_UT_DBG("Entry\n"); + + /* Config OUT Channel Context */ + rc = ipa_mhi_test_config_channel_context( + &test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + IPA_MHI_TEST_FIRST_EVENT_RING_ID, + 0x100, + 0x80, + IPA_MHI_OUT_CHAHNNEL); + if (rc) { + IPA_UT_ERR("Fail to config OUT ch ctx - err %d", rc); + return rc; + } + + /* Config IN Channel Context */ + rc = ipa_mhi_test_config_channel_context( + &test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1, + 0x100, + 0x80, + IPA_MHI_IN_CHAHNNEL); + if (rc) { + IPA_UT_ERR("Fail to config IN ch ctx - err %d", rc); + goto fail_destroy_out_ch_ctx; + } + + /* allocate IN data buffer */ + test_mhi_ctx->in_buffer.size = IPA_MHI_TEST_MAX_DATA_BUF_SIZE; + test_mhi_ctx->in_buffer.base = dma_alloc_coherent( + ipa3_ctx->pdev, test_mhi_ctx->in_buffer.size, + &test_mhi_ctx->in_buffer.phys_base, GFP_KERNEL); + if (!test_mhi_ctx->in_buffer.base) { + IPA_UT_ERR("no mem for In data buffer\n"); + rc = -ENOMEM; + goto fail_destroy_in_ch_ctx; + } + memset(test_mhi_ctx->in_buffer.base, 0, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + + /* allocate OUT data buffer */ + test_mhi_ctx->out_buffer.size = IPA_MHI_TEST_MAX_DATA_BUF_SIZE; + test_mhi_ctx->out_buffer.base = dma_alloc_coherent( + ipa3_ctx->pdev, test_mhi_ctx->out_buffer.size, + &test_mhi_ctx->out_buffer.phys_base, GFP_KERNEL); + if (!test_mhi_ctx->out_buffer.base) { + IPA_UT_ERR("no mem for Out data buffer\n"); + rc = -EFAULT; + goto fail_destroy_in_data_buf; + } + memset(test_mhi_ctx->out_buffer.base, 0, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + + return 0; + +fail_destroy_in_data_buf: + dma_free_coherent(ipa3_ctx->pdev, + test_mhi_ctx->in_buffer.size, + test_mhi_ctx->in_buffer.base, + test_mhi_ctx->in_buffer.phys_base); + test_mhi_ctx->in_buffer.base = NULL; +fail_destroy_in_ch_ctx: + ipa_mhi_test_destroy_channel_context( + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1); +fail_destroy_out_ch_ctx: + ipa_mhi_test_destroy_channel_context( + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + IPA_MHI_TEST_FIRST_EVENT_RING_ID); + return 0; +} + +/** + * ipa_test_mhi_suite_setup() - Suite setup function + */ +static int ipa_test_mhi_suite_setup(void **ppriv) +{ + int rc = 0; + struct ipa_sys_connect_params sys_in; + + IPA_UT_DBG("Start Setup\n"); + + if (!ipa3_ctx) { + IPA_UT_ERR("No IPA ctx\n"); + return -EINVAL; + } + + test_mhi_ctx = kzalloc(sizeof(struct ipa_test_mhi_context), + GFP_KERNEL); + if (!test_mhi_ctx) { + IPA_UT_ERR("failed allocated ctx\n"); + return -ENOMEM; + } + + rc = ipa3_get_transport_info(&test_mhi_ctx->transport_phys_addr, + &test_mhi_ctx->transport_size); + if (rc != 0) { + IPA_UT_ERR("ipa3_get_transport_info() failed\n"); + rc = -EFAULT; + goto fail_free_ctx; + } + + test_mhi_ctx->gsi_mmio = + ioremap_nocache(test_mhi_ctx->transport_phys_addr, + test_mhi_ctx->transport_size); + if (!test_mhi_ctx->gsi_mmio) { + IPA_UT_ERR("failed to remap GSI HW size=%lu\n", + test_mhi_ctx->transport_size); + rc = -EFAULT; + goto fail_free_ctx; + } + + rc = ipa_test_mhi_alloc_mmio_space(); + if (rc) { + IPA_UT_ERR("failed to alloc mmio space"); + goto fail_iounmap; + } + + rc = ipa_mhi_test_setup_data_structures(); + if (rc) { + IPA_UT_ERR("failed to setup data structures"); + goto fail_free_mmio_spc; + } + + /* connect PROD pipe for remote wakeup */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_TEST_PROD; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS; + if (ipa_setup_sys_pipe(&sys_in, &test_mhi_ctx->test_prod_hdl)) { + IPA_UT_ERR("setup sys pipe failed.\n"); + goto fail_destroy_data_structures; + } + + *ppriv = test_mhi_ctx; + return 0; + +fail_destroy_data_structures: + ipa_mhi_test_destroy_data_structures(); +fail_free_mmio_spc: + ipa_test_mhi_free_mmio_space(); +fail_iounmap: + iounmap(test_mhi_ctx->gsi_mmio); +fail_free_ctx: + kfree(test_mhi_ctx); + test_mhi_ctx = NULL; + return rc; +} + +/** + * ipa_test_mhi_suite_teardown() - Suite teardown function + */ +static int ipa_test_mhi_suite_teardown(void *priv) +{ + IPA_UT_DBG("Start Teardown\n"); + + if (!test_mhi_ctx) + return 0; + + ipa_teardown_sys_pipe(test_mhi_ctx->test_prod_hdl); + ipa_mhi_test_destroy_data_structures(); + ipa_test_mhi_free_mmio_space(); + iounmap(test_mhi_ctx->gsi_mmio); + kfree(test_mhi_ctx); + test_mhi_ctx = NULL; + + return 0; +} + +/** + * ipa_mhi_test_initialize_driver() - MHI init and possibly start and connect + * + * To be run during tests + * 1. MHI init (Ready state) + * 2. Conditional MHI start and connect (M0 state) + */ +static int ipa_mhi_test_initialize_driver(bool skip_start_and_conn) +{ + int rc = 0; + struct ipa_mhi_init_params init_params; + struct ipa_mhi_start_params start_params; + struct ipa_mhi_connect_params prod_params; + struct ipa_mhi_connect_params cons_params; + struct ipa_mhi_mmio_register_set *p_mmio; + struct ipa_mhi_channel_context_array *p_ch_ctx_array; + u64 phys_addr; + + IPA_UT_LOG("Entry\n"); + + p_mmio = test_mhi_ctx->mmio_buf.base; + + /* start IPA MHI */ + memset(&init_params, 0, sizeof(init_params)); + init_params.msi.addr_low = test_mhi_ctx->msi.phys_base; + init_params.msi.data = 0x10000000; + init_params.msi.mask = ~0x10000000; + /* MMIO not needed for GSI */ + init_params.first_ch_idx = IPA_MHI_TEST_FIRST_CHANNEL_ID; + init_params.first_er_idx = IPA_MHI_TEST_FIRST_EVENT_RING_ID; + init_params.assert_bit40 = false; + init_params.notify = ipa_mhi_test_cb; + init_params.priv = NULL; + init_params.test_mode = true; + + rc = ipa_mhi_init(&init_params); + if (rc) { + IPA_UT_LOG("ipa_mhi_init failed %d\n", rc); + return rc; + } + + IPA_UT_LOG("Wait async ready event\n"); + if (wait_for_completion_timeout(&mhi_test_ready_comp, 10 * HZ) == 0) { + IPA_UT_LOG("timeout waiting for READY event"); + IPA_UT_TEST_FAIL_REPORT("failed waiting for state ready"); + return -ETIME; + } + + if (!skip_start_and_conn) { + memset(&start_params, 0, sizeof(start_params)); + start_params.channel_context_array_addr = p_mmio->ccabap; + start_params.event_context_array_addr = p_mmio->ecabap; + + IPA_UT_LOG("BEFORE mhi_start\n"); + rc = ipa_mhi_start(&start_params); + if (rc) { + IPA_UT_LOG("mhi_start failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail start mhi"); + return rc; + } + IPA_UT_LOG("AFTER mhi_start\n"); + + phys_addr = p_mmio->ccabap + (IPA_MHI_TEST_FIRST_CHANNEL_ID * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + IPA_UT_LOG("ch: %d base: 0x%pK phys_addr 0x%llx chstate: %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + p_ch_ctx_array, phys_addr, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + + memset(&prod_params, 0, sizeof(prod_params)); + prod_params.sys.client = IPA_CLIENT_MHI_PROD; + prod_params.sys.ipa_ep_cfg.mode.mode = IPA_DMA; + prod_params.sys.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS; + prod_params.sys.ipa_ep_cfg.seq.seq_type = + IPA_MHI_TEST_SEQ_TYPE_DMA; + prod_params.sys.ipa_ep_cfg.seq.set_dynamic = true; + prod_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID; + IPA_UT_LOG("BEFORE connect_pipe (PROD): client:%d ch_id:%u\n", + prod_params.sys.client, prod_params.channel_id); + rc = ipa_mhi_connect_pipe(&prod_params, + &test_mhi_ctx->prod_hdl); + if (rc) { + IPA_UT_LOG("mhi_connect_pipe failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail connect PROD pipe"); + return rc; + } + + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("MHI_PROD: chstate is not RUN chstate:%s\n", + ipa_mhi_get_state_str( + p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not run"); + return -EFAULT; + } + + phys_addr = p_mmio->ccabap + + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + IPA_UT_LOG("ch: %d base: 0x%pK phys_addr 0x%llx chstate: %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + p_ch_ctx_array, phys_addr, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + + memset(&cons_params, 0, sizeof(cons_params)); + cons_params.sys.client = IPA_CLIENT_MHI_CONS; + cons_params.sys.skip_ep_cfg = true; + cons_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID + 1; + IPA_UT_LOG("BEFORE connect_pipe (CONS): client:%d ch_id:%u\n", + cons_params.sys.client, cons_params.channel_id); + rc = ipa_mhi_connect_pipe(&cons_params, + &test_mhi_ctx->cons_hdl); + if (rc) { + IPA_UT_LOG("mhi_connect_pipe failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail connect CONS pipe"); + return rc; + } + + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("MHI_CONS: chstate is not RUN chstate:%s\n", + ipa_mhi_get_state_str( + p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not run"); + return -EFAULT; + } + } + + return 0; +} + +/** + * To be run during test + * 1. MHI destroy + * 2. re-configure the channels + */ +static int ipa_mhi_test_destroy(struct ipa_test_mhi_context *ctx) +{ + struct ipa_mhi_mmio_register_set *p_mmio; + u64 phys_addr; + struct ipa_mhi_channel_context_array *p_ch_ctx_array; + int rc; + + IPA_UT_LOG("Entry\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("Input err invalid ctx\n"); + return -EINVAL; + } + + p_mmio = ctx->mmio_buf.base; + + phys_addr = p_mmio->ccabap + + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = ctx->ch_ctx_array.base + + (phys_addr - ctx->ch_ctx_array.phys_base); + IPA_UT_LOG("channel id %d (CONS): chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + + phys_addr = p_mmio->ccabap + + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = ctx->ch_ctx_array.base + + (phys_addr - ctx->ch_ctx_array.phys_base); + IPA_UT_LOG("channel id %d (PROD): chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + + IPA_UT_LOG("MHI Destroy\n"); + ipa_mhi_destroy(); + IPA_UT_LOG("Post MHI Destroy\n"); + + ctx->prod_hdl = 0; + ctx->cons_hdl = 0; + + dma_free_coherent(ipa3_ctx->pdev, ctx->xfer_ring_bufs[1].size, + ctx->xfer_ring_bufs[1].base, ctx->xfer_ring_bufs[1].phys_base); + ctx->xfer_ring_bufs[1].base = NULL; + + IPA_UT_LOG("config channel context for channel %d (MHI CONS)\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1); + rc = ipa_mhi_test_config_channel_context( + &ctx->mmio_buf, + ctx->xfer_ring_bufs, + ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1, + 0x100, + 0x80, + IPA_MHI_IN_CHAHNNEL); + if (rc) { + IPA_UT_LOG("config channel context failed %d, channel %d\n", + rc, IPA_MHI_TEST_FIRST_CHANNEL_ID + 1); + IPA_UT_TEST_FAIL_REPORT("fail config CONS channel ctx"); + return -EFAULT; + } + + dma_free_coherent(ipa3_ctx->pdev, ctx->xfer_ring_bufs[0].size, + ctx->xfer_ring_bufs[0].base, ctx->xfer_ring_bufs[0].phys_base); + ctx->xfer_ring_bufs[0].base = NULL; + + IPA_UT_LOG("config channel context for channel %d (MHI PROD)\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID); + rc = ipa_mhi_test_config_channel_context( + &ctx->mmio_buf, + ctx->xfer_ring_bufs, + ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + IPA_MHI_TEST_FIRST_EVENT_RING_ID, + 0x100, + 0x80, + IPA_MHI_OUT_CHAHNNEL); + if (rc) { + IPA_UT_LOG("config channel context failed %d, channel %d\n", + rc, IPA_MHI_TEST_FIRST_CHANNEL_ID); + IPA_UT_TEST_FAIL_REPORT("fail config PROD channel ctx"); + return -EFAULT; + } + + return 0; +} + +/** + * To be run during test + * 1. Destroy + * 2. Initialize (to Ready or M0 states) + */ +static int ipa_mhi_test_reset(struct ipa_test_mhi_context *ctx, + bool skip_start_and_conn) +{ + int rc; + + IPA_UT_LOG("Entry\n"); + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("destroy fail"); + return rc; + } + + rc = ipa_mhi_test_initialize_driver(skip_start_and_conn); + if (rc) { + IPA_UT_LOG("driver init failed skip_start_and_con=%d rc=%d\n", + skip_start_and_conn, rc); + IPA_UT_TEST_FAIL_REPORT("init fail"); + return rc; + } + + return 0; +} + +/** + * To be run during test + * 1. disconnect cons channel + * 2. config cons channel + * 3. disconnect prod channel + * 4. config prod channel + * 5. connect prod + * 6. connect cons + */ +static int ipa_mhi_test_channel_reset(void) +{ + int rc; + struct ipa_mhi_connect_params prod_params; + struct ipa_mhi_connect_params cons_params; + struct ipa_mhi_mmio_register_set *p_mmio; + struct ipa_mhi_channel_context_array *p_ch_ctx_array; + u64 phys_addr; + + p_mmio = test_mhi_ctx->mmio_buf.base; + + IPA_UT_LOG("Before pipe disconnect (CONS) client hdl=%u=\n", + test_mhi_ctx->cons_hdl); + rc = ipa_mhi_disconnect_pipe(test_mhi_ctx->cons_hdl); + if (rc) { + IPA_UT_LOG("disconnect_pipe failed (CONS) %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("CONS pipe disconnect fail"); + return -EFAULT; + } + test_mhi_ctx->cons_hdl = 0; + + phys_addr = p_mmio->ccabap + + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_DISABLE) { + IPA_UT_LOG("chstate is not disabled! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not disabled"); + return -EFAULT; + } + + dma_free_coherent(ipa3_ctx->pdev, + test_mhi_ctx->xfer_ring_bufs[1].size, + test_mhi_ctx->xfer_ring_bufs[1].base, + test_mhi_ctx->xfer_ring_bufs[1].phys_base); + test_mhi_ctx->xfer_ring_bufs[1].base = NULL; + rc = ipa_mhi_test_config_channel_context( + &test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1, + 0x100, + 0x80, + IPA_MHI_IN_CHAHNNEL); + if (rc) { + IPA_UT_LOG("config_channel_context IN failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail config CONS channel context"); + return -EFAULT; + } + IPA_UT_LOG("Before pipe disconnect (CONS) client hdl=%u=\n", + test_mhi_ctx->prod_hdl); + rc = ipa_mhi_disconnect_pipe(test_mhi_ctx->prod_hdl); + if (rc) { + IPA_UT_LOG("disconnect_pipe failed (PROD) %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("PROD pipe disconnect fail"); + return -EFAULT; + } + test_mhi_ctx->prod_hdl = 0; + + phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_DISABLE) { + IPA_UT_LOG("chstate is not disabled! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not disabled"); + return -EFAULT; + } + + dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->xfer_ring_bufs[0].size, + test_mhi_ctx->xfer_ring_bufs[0].base, + test_mhi_ctx->xfer_ring_bufs[0].phys_base); + test_mhi_ctx->xfer_ring_bufs[0].base = NULL; + rc = ipa_mhi_test_config_channel_context( + &test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + IPA_MHI_TEST_FIRST_EVENT_RING_ID, + 0x100, + 0x80, + IPA_MHI_OUT_CHAHNNEL); + if (rc) { + IPA_UT_LOG("config_channel_context OUT failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not disabled"); + return -EFAULT; + } + + memset(&prod_params, 0, sizeof(prod_params)); + prod_params.sys.client = IPA_CLIENT_MHI_PROD; + prod_params.sys.ipa_ep_cfg.mode.mode = IPA_DMA; + prod_params.sys.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS; + prod_params.sys.ipa_ep_cfg.seq.seq_type = IPA_MHI_TEST_SEQ_TYPE_DMA; + prod_params.sys.ipa_ep_cfg.seq.set_dynamic = true; + prod_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID; + IPA_UT_LOG("BEFORE connect PROD\n"); + rc = ipa_mhi_connect_pipe(&prod_params, &test_mhi_ctx->prod_hdl); + if (rc) { + IPA_UT_LOG("connect_pipe failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail connect PROD pipe"); + return rc; + } + + phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("chstate is not run! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not run"); + return -EFAULT; + } + + memset(&cons_params, 0, sizeof(cons_params)); + cons_params.sys.client = IPA_CLIENT_MHI_CONS; + cons_params.sys.skip_ep_cfg = true; + cons_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID + 1; + IPA_UT_LOG("BEFORE connect CONS\n"); + rc = ipa_mhi_connect_pipe(&cons_params, &test_mhi_ctx->cons_hdl); + if (rc) { + IPA_UT_LOG("ipa_mhi_connect_pipe failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail connect CONS pipe"); + return rc; + } + + phys_addr = p_mmio->ccabap + + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("chstate is not run! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not run"); + return -EFAULT; + } + + return 0; +} + +/** + * To be run during test + * Send data + */ +static int ipa_mhi_test_q_transfer_re(struct ipa_mem_buffer *mmio, + struct ipa_mem_buffer xfer_ring_bufs[], + struct ipa_mem_buffer ev_ring_bufs[], + u8 channel_id, + struct ipa_mem_buffer buf_array[], + int buf_array_size, + bool ieob, + bool ieot, + bool bei, + bool trigger_db) +{ + struct ipa_mhi_transfer_ring_element *curr_re; + struct ipa_mhi_mmio_register_set *p_mmio; + struct ipa_mhi_channel_context_array *p_channels; + struct ipa_mhi_event_context_array *p_events; + u32 channel_idx; + u32 event_ring_index; + u32 wp_ofst; + u32 rp_ofst; + u32 next_wp_ofst; + int i; + u32 num_of_ed_to_queue; + u32 avail_ev; + + IPA_UT_LOG("Entry\n"); + + p_mmio = (struct ipa_mhi_mmio_register_set *)mmio->base; + p_channels = (struct ipa_mhi_channel_context_array *) + ((unsigned long)p_mmio->crcbap); + p_events = (struct ipa_mhi_event_context_array *) + ((unsigned long)p_mmio->crdb); + + if (ieob) + num_of_ed_to_queue = buf_array_size; + else + num_of_ed_to_queue = ieot ? 1 : 0; + + if (channel_id >= + (IPA_MHI_TEST_FIRST_CHANNEL_ID + IPA_MHI_TEST_NUM_CHANNELS) || + channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) { + IPA_UT_LOG("Invalid Channel ID %d\n", channel_id); + return -EFAULT; + } + + channel_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID; + + if (!xfer_ring_bufs[channel_idx].base) { + IPA_UT_LOG("Channel is not allocated\n"); + return -EFAULT; + } + if (p_channels[channel_idx].brsmode == IPA_MHI_BURST_MODE_DEFAULT || + p_channels[channel_idx].brsmode == IPA_MHI_BURST_MODE_ENABLE) + num_of_ed_to_queue += 1; /* for OOB/DB mode event */ + + /* First queue EDs */ + event_ring_index = p_channels[channel_idx].erindex - + IPA_MHI_TEST_FIRST_EVENT_RING_ID; + + wp_ofst = (u32)(p_events[event_ring_index].wp - + p_events[event_ring_index].rbase); + rp_ofst = (u32)(p_events[event_ring_index].rp - + p_events[event_ring_index].rbase); + + if (p_events[event_ring_index].rlen & 0xFFFFFFFF00000000) { + IPA_UT_LOG("invalid ev rlen %llu\n", + p_events[event_ring_index].rlen); + return -EFAULT; + } + + if (wp_ofst > rp_ofst) { + avail_ev = (wp_ofst - rp_ofst) / + sizeof(struct ipa_mhi_event_ring_element); + } else { + avail_ev = (u32)p_events[event_ring_index].rlen - + (rp_ofst - wp_ofst); + avail_ev /= sizeof(struct ipa_mhi_event_ring_element); + } + + IPA_UT_LOG("wp_ofst=0x%x rp_ofst=0x%x rlen=%llu avail_ev=%u\n", + wp_ofst, rp_ofst, p_events[event_ring_index].rlen, avail_ev); + + if (num_of_ed_to_queue > ((u32)p_events[event_ring_index].rlen / + sizeof(struct ipa_mhi_event_ring_element))) { + IPA_UT_LOG("event ring too small for %u credits\n", + num_of_ed_to_queue); + return -EFAULT; + } + + if (num_of_ed_to_queue > avail_ev) { + IPA_UT_LOG("Need to add event credits (needed=%u)\n", + num_of_ed_to_queue - avail_ev); + + next_wp_ofst = (wp_ofst + (num_of_ed_to_queue - avail_ev) * + sizeof(struct ipa_mhi_event_ring_element)) % + (u32)p_events[event_ring_index].rlen; + + /* set next WP */ + p_events[event_ring_index].wp = + (u32)p_events[event_ring_index].rbase + next_wp_ofst; + + /* write value to event ring doorbell */ + IPA_UT_LOG("DB to event 0x%llx: base %pa ofst 0x%x\n", + p_events[event_ring_index].wp, + &(test_mhi_ctx->transport_phys_addr), + GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS( + event_ring_index + ipa3_ctx->mhi_evid_limits[0], 0)); + iowrite32(p_events[event_ring_index].wp, + test_mhi_ctx->gsi_mmio + + GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS( + event_ring_index + ipa3_ctx->mhi_evid_limits[0], 0)); + } + + for (i = 0; i < buf_array_size; i++) { + /* calculate virtual pointer for current WP and RP */ + wp_ofst = (u32)(p_channels[channel_idx].wp - + p_channels[channel_idx].rbase); + rp_ofst = (u32)(p_channels[channel_idx].rp - + p_channels[channel_idx].rbase); + (void)rp_ofst; + curr_re = (struct ipa_mhi_transfer_ring_element *) + ((unsigned long)xfer_ring_bufs[channel_idx].base + + wp_ofst); + if (p_channels[channel_idx].rlen & 0xFFFFFFFF00000000) { + IPA_UT_LOG("invalid ch rlen %llu\n", + p_channels[channel_idx].rlen); + return -EFAULT; + } + next_wp_ofst = (wp_ofst + + sizeof(struct ipa_mhi_transfer_ring_element)) % + (u32)p_channels[channel_idx].rlen; + + /* write current RE */ + curr_re->type = IPA_MHI_RING_ELEMENT_TRANSFER; + curr_re->len = (u16)buf_array[i].size; + curr_re->ptr = (u32)buf_array[i].phys_base; + curr_re->word_C.bits.bei = bei; + curr_re->word_C.bits.ieob = ieob; + curr_re->word_C.bits.ieot = ieot; + + /* set next WP */ + p_channels[channel_idx].wp = + p_channels[channel_idx].rbase + next_wp_ofst; + + if (i == (buf_array_size - 1)) { + /* last buffer */ + curr_re->word_C.bits.chain = 0; + if (trigger_db) { + IPA_UT_LOG( + "DB to channel 0x%llx: base %pa ofst 0x%x\n" + , p_channels[channel_idx].wp + , &(test_mhi_ctx->transport_phys_addr) + , GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS( + channel_idx, 0)); + iowrite32(p_channels[channel_idx].wp, + test_mhi_ctx->gsi_mmio + + GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS( + channel_idx, 0)); + } + } else { + curr_re->word_C.bits.chain = 1; + } + } + + return 0; +} + +/** + * To be run during test + * Send data in loopback (from In to OUT) and compare + */ +static int ipa_mhi_test_loopback_data_transfer(void) +{ + struct ipa_mem_buffer *p_mmio; + int i; + int rc; + static int val; + bool timeout = true; + + IPA_UT_LOG("Entry\n"); + + p_mmio = &test_mhi_ctx->mmio_buf; + + /* invalidate spare register value (for msi) */ + memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size); + + val++; + + memset(test_mhi_ctx->in_buffer.base, 0, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) + memset(test_mhi_ctx->out_buffer.base + i, (val + i) & 0xFF, 1); + + /* queue RE for IN side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(p_mmio, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + &test_mhi_ctx->in_buffer, + 1, + true, + true, + false, + true); + + if (rc) { + IPA_UT_LOG("q_transfer_re failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re"); + return rc; + } + + /* queue REs for OUT side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(p_mmio, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + &test_mhi_ctx->out_buffer, + 1, + true, + true, + false, + true); + + if (rc) { + IPA_UT_LOG("q_transfer_re failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail OUT q xfer re"); + return rc; + } + + IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout); + if (timeout) { + IPA_UT_LOG("transfer timeout. MSI = 0x%x\n", + *((u32 *)test_mhi_ctx->msi.base)); + IPA_UT_TEST_FAIL_REPORT("xfter timeout"); + return -EFAULT; + } + + /* compare the two buffers */ + if (memcmp(test_mhi_ctx->in_buffer.base, test_mhi_ctx->out_buffer.base, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) { + IPA_UT_LOG("buffer are not equal\n"); + IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer"); + return -EFAULT; + } + + return 0; +} + +/** + * To be run during test + * Do suspend and check channel states to be suspend if should success + */ +static int ipa_mhi_test_suspend(bool force, bool should_success) +{ + int rc; + struct ipa_mhi_mmio_register_set *p_mmio; + struct ipa_mhi_channel_context_array *p_ch_ctx_array; + u64 phys_addr; + + IPA_UT_LOG("Entry\n"); + + rc = ipa_mhi_suspend(force); + if (should_success && rc != 0) { + IPA_UT_LOG("ipa_mhi_suspend failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("suspend failed"); + return -EFAULT; + } + + if (!should_success && rc != -EAGAIN) { + IPA_UT_LOG("ipa_mhi_suspend did not return -EAGAIN fail %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("suspend succeeded unexpectedly"); + return -EFAULT; + } + + p_mmio = test_mhi_ctx->mmio_buf.base; + + phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (should_success) { + if (p_ch_ctx_array->chstate != + IPA_HW_MHI_CHANNEL_STATE_SUSPEND) { + IPA_UT_LOG("chstate is not suspend! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("channel state not suspend"); + return -EFAULT; + } + if (!force && p_ch_ctx_array->rp != p_ch_ctx_array->wp) { + IPA_UT_LOG("rp not updated ch %d rp 0x%llx wp 0x%llx\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + p_ch_ctx_array->rp, p_ch_ctx_array->wp); + IPA_UT_TEST_FAIL_REPORT("rp was not updated"); + return -EFAULT; + } + } else { + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("chstate is not running! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("channel state not run"); + return -EFAULT; + } + } + + phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (should_success) { + if (p_ch_ctx_array->chstate != + IPA_HW_MHI_CHANNEL_STATE_SUSPEND) { + IPA_UT_LOG("chstate is not suspend! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("channel state not suspend"); + return -EFAULT; + } + if (!force && p_ch_ctx_array->rp != p_ch_ctx_array->wp) { + IPA_UT_LOG("rp not updated ch %d rp 0x%llx wp 0x%llx\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + p_ch_ctx_array->rp, p_ch_ctx_array->wp); + IPA_UT_TEST_FAIL_REPORT("rp was not updated"); + return -EFAULT; + } + } else { + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("chstate is not running! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("channel state not run"); + return -EFAULT; + } + } + + return 0; +} + +/** + * To be run during test + * Do resume and check channel state to be running + */ +static int ipa_test_mhi_resume(void) +{ + int rc; + struct ipa_mhi_mmio_register_set *p_mmio; + struct ipa_mhi_channel_context_array *p_ch_ctx_array; + u64 phys_addr; + + rc = ipa_mhi_resume(); + if (rc) { + IPA_UT_LOG("resume failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("resume failed"); + return -EFAULT; + } + + p_mmio = test_mhi_ctx->mmio_buf.base; + + phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("chstate is not running! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("channel state not run"); + return -EFAULT; + } + + phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("chstate is not running! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("channel state not run"); + return -EFAULT; + } + + return 0; +} + +/** + * To be run during test + * 1. suspend + * 2. queue RE for IN and OUT and send data + * 3. should get MSI timeout due to suspend + * 4. resume + * 5. should get the MSIs now + * 6. comapre the IN and OUT buffers + */ +static int ipa_mhi_test_suspend_resume(void) +{ + int rc; + int i; + bool timeout = true; + + IPA_UT_LOG("Entry\n"); + + IPA_UT_LOG("BEFORE suspend\n"); + rc = ipa_mhi_test_suspend(false, true); + if (rc) { + IPA_UT_LOG("suspend failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("suspend failed"); + return rc; + } + IPA_UT_LOG("AFTER suspend\n"); + + /* invalidate spare register value (for msi) */ + memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size); + + memset(test_mhi_ctx->in_buffer.base, 0, IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) + memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1); + + /* queue RE for IN side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + &test_mhi_ctx->in_buffer, + 1, + true, + true, + false, + true); + if (rc) { + IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re"); + return rc; + } + + /* queue REs for OUT side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + &test_mhi_ctx->out_buffer, + 1, + true, + true, + false, + true); + + if (rc) { + IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail OUT q xfer re"); + return rc; + } + + IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout); + if (!timeout) { + IPA_UT_LOG("Error: transfer success on suspend\n"); + IPA_UT_TEST_FAIL_REPORT("xfer suceeded unexpectedly"); + return -EFAULT; + } + + IPA_UT_LOG("BEFORE resume\n"); + rc = ipa_test_mhi_resume(); + if (rc) { + IPA_UT_LOG("ipa_mhi_resume failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("resume fail"); + return rc; + } + IPA_UT_LOG("AFTER resume\n"); + + IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout); + if (timeout) { + IPA_UT_LOG("Error: transfer timeout\n"); + IPA_UT_TEST_FAIL_REPORT("xfer timeout"); + return -EFAULT; + } + + /* compare the two buffers */ + if (memcmp(test_mhi_ctx->in_buffer.base, + test_mhi_ctx->out_buffer.base, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) { + IPA_UT_LOG("Error: buffers are not equal\n"); + IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer"); + return -EFAULT; + } + + return 0; +} + +/** + * To be run during test + * 1. enable aggregation + * 2. queue IN RE (ring element) + * 3. allocate skb with data + * 4. send it (this will create open aggr frame) + */ +static int ipa_mhi_test_create_aggr_open_frame(void) +{ + struct ipa_ep_cfg_aggr ep_aggr; + struct sk_buff *skb; + int rc; + int i; + u32 aggr_state_active; + + IPA_UT_LOG("Entry\n"); + + memset(&ep_aggr, 0, sizeof(ep_aggr)); + ep_aggr.aggr_en = IPA_ENABLE_AGGR; + ep_aggr.aggr = IPA_GENERIC; + ep_aggr.aggr_pkt_limit = 2; + + rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr); + if (rc) { + IPA_UT_LOG("failed to configure aggr"); + IPA_UT_TEST_FAIL_REPORT("failed to configure aggr"); + return rc; + } + + /* invalidate spare register value (for msi) */ + memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size); + + /* queue RE for IN side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + &test_mhi_ctx->in_buffer, + 1, + true, + true, + false, + true); + if (rc) { + IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re"); + return rc; + } + + skb = dev_alloc_skb(IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + if (!skb) { + IPA_UT_LOG("non mem for skb\n"); + IPA_UT_TEST_FAIL_REPORT("fail alloc skb"); + return -ENOMEM; + } + skb_put(skb, IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) { + memset(skb->data + i, i & 0xFF, 1); + memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1); + } + + rc = ipa_tx_dp(IPA_CLIENT_TEST_PROD, skb, NULL); + if (rc) { + IPA_UT_LOG("ipa_tx_dp failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail"); + return rc; + } + + msleep(20); + + aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE); + IPA_UT_LOG("IPA_STATE_AGGR_ACTIVE 0x%x\n", aggr_state_active); + if (aggr_state_active == 0) { + IPA_UT_LOG("No aggregation frame open!\n"); + IPA_UT_TEST_FAIL_REPORT("No aggregation frame open"); + return -EFAULT; + } + + return 0; +} + +/** + * To be run during test + * 1. create open aggr by sending data + * 2. suspend - if force it should succeed, otherwize it fails + * 3. if force - wait for wakeup event - it should arrive + * 4. if force - resume + * 5. force close the aggr. + * 6. wait for MSI - it should arrive + * 7. compare IN and OUT buffers + * 8. disable aggr. + */ +static int ipa_mhi_test_suspend_aggr_open(bool force) +{ + int rc; + struct ipa_ep_cfg_aggr ep_aggr; + bool timeout = true; + + IPA_UT_LOG("Entry\n"); + + rc = ipa_mhi_test_create_aggr_open_frame(); + if (rc) { + IPA_UT_LOG("failed create open aggr\n"); + IPA_UT_TEST_FAIL_REPORT("fail create open aggr"); + return rc; + } + + if (force) + reinit_completion(&mhi_test_wakeup_comp); + + IPA_UT_LOG("BEFORE suspend\n"); + /** + * if suspend force, then suspend should succeed. + * otherwize it should fail due to open aggr. + */ + rc = ipa_mhi_test_suspend(force, force); + if (rc) { + IPA_UT_LOG("suspend failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("suspend fail"); + return rc; + } + IPA_UT_LOG("AFTER suspend\n"); + + if (force) { + if (!wait_for_completion_timeout(&mhi_test_wakeup_comp, HZ)) { + IPA_UT_LOG("timeout waiting for wakeup event\n"); + IPA_UT_TEST_FAIL_REPORT("timeout waitinf wakeup event"); + return -ETIME; + } + + IPA_UT_LOG("BEFORE resume\n"); + rc = ipa_test_mhi_resume(); + if (rc) { + IPA_UT_LOG("resume failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("resume failed"); + return rc; + } + IPA_UT_LOG("AFTER resume\n"); + } + + ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << test_mhi_ctx->cons_hdl)); + + IPA_MHI_TEST_CHECK_MSI_INTR(false, timeout); + if (timeout) { + IPA_UT_LOG("fail: transfer not completed\n"); + IPA_UT_TEST_FAIL_REPORT("timeout on transferring data"); + return -EFAULT; + } + + /* compare the two buffers */ + if (memcmp(test_mhi_ctx->in_buffer.base, + test_mhi_ctx->out_buffer.base, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) { + IPA_UT_LOG("fail: buffer are not equal\n"); + IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer"); + return -EFAULT; + } + + memset(&ep_aggr, 0, sizeof(ep_aggr)); + rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr); + if (rc) { + IPA_UT_LOG("failed to configure aggr"); + IPA_UT_TEST_FAIL_REPORT("fail to disable aggr"); + return rc; + } + + return 0; +} + +/** + * To be run during test + * 1. suspend + * 2. queue IN RE (ring element) + * 3. allocate skb with data + * 4. send it (this will create open aggr frame) + * 5. wait for wakeup event - it should arrive + * 6. resume + * 7. wait for MSI - it should arrive + * 8. compare IN and OUT buffers + */ +static int ipa_mhi_test_suspend_host_wakeup(void) +{ + int rc; + int i; + bool timeout = true; + struct sk_buff *skb; + + reinit_completion(&mhi_test_wakeup_comp); + + IPA_UT_LOG("BEFORE suspend\n"); + rc = ipa_mhi_test_suspend(false, true); + if (rc) { + IPA_UT_LOG("suspend failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("suspend fail"); + return rc; + } + IPA_UT_LOG("AFTER suspend\n"); + + /* invalidate spare register value (for msi) */ + memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size); + + memset(test_mhi_ctx->in_buffer.base, 0, IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + /* queue RE for IN side and trigger doorbell*/ + rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + &test_mhi_ctx->in_buffer, + 1, + true, + true, + false, + true); + + if (rc) { + IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re"); + return rc; + } + + skb = dev_alloc_skb(IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + if (!skb) { + IPA_UT_LOG("non mem for skb\n"); + IPA_UT_TEST_FAIL_REPORT("no mem for skb"); + return -ENOMEM; + } + skb_put(skb, IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) { + memset(skb->data + i, i & 0xFF, 1); + memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1); + } + + rc = ipa_tx_dp(IPA_CLIENT_TEST_PROD, skb, NULL); + if (rc) { + IPA_UT_LOG("ipa_tx_dp failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail"); + return rc; + } + + if (wait_for_completion_timeout(&mhi_test_wakeup_comp, + msecs_to_jiffies(3500)) == 0) { + IPA_UT_LOG("timeout waiting for wakeup event\n"); + IPA_UT_TEST_FAIL_REPORT("timeout waiting for wakeup event"); + return -ETIME; + } + + IPA_UT_LOG("BEFORE resume\n"); + rc = ipa_test_mhi_resume(); + if (rc) { + IPA_UT_LOG("resume failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("resume fail"); + return rc; + } + IPA_UT_LOG("AFTER resume\n"); + + /* check for MSI interrupt one channels */ + IPA_MHI_TEST_CHECK_MSI_INTR(false, timeout); + if (timeout) { + IPA_UT_LOG("fail: transfer timeout\n"); + IPA_UT_TEST_FAIL_REPORT("timeout on xfer"); + return -EFAULT; + } + + /* compare the two buffers */ + if (memcmp(test_mhi_ctx->in_buffer.base, + test_mhi_ctx->out_buffer.base, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) { + IPA_UT_LOG("fail: buffer are not equal\n"); + IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer"); + return -EFAULT; + } + + return 0; +} + +/** + * To be run during test + * 1. queue OUT RE/buffer + * 2. wait for MSI on OUT + * 3. Do 1. and 2. till got MSI wait timeout (ch full / holb) + */ +static int ipa_mhi_test_create_full_channel(int *submitted_packets) +{ + int i; + bool timeout = true; + int rc; + + if (!submitted_packets) { + IPA_UT_LOG("Input error\n"); + return -EINVAL; + } + + *submitted_packets = 0; + + for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) + memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1); + + do { + /* invalidate spare register value (for msi) */ + memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size); + + IPA_UT_LOG("submitting OUT buffer\n"); + timeout = true; + /* queue REs for OUT side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + &test_mhi_ctx->out_buffer, + 1, + true, + true, + false, + true); + if (rc) { + IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("fail OUT q re"); + return rc; + } + (*submitted_packets)++; + + IPA_UT_LOG("waiting for MSI\n"); + for (i = 0; i < 10; i++) { + if (*((u32 *)test_mhi_ctx->msi.base) == + (0x10000000 | + (IPA_MHI_TEST_FIRST_EVENT_RING_ID))) { + IPA_UT_LOG("got MSI\n"); + timeout = false; + break; + } + msleep(20); + } + } while (!timeout); + + return 0; +} + +/** + * To be run during test + * 1. queue OUT RE/buffer + * 2. wait for MSI on OUT + * 3. Do 1. and 2. till got MSI wait timeout (ch full) + * 4. suspend - it should fail with -EAGAIN - M1 is rejected + * 5. foreach submitted pkt, do the next steps + * 6. queue IN RE/buffer + * 7. wait for MSI + * 8. compare IN and OUT buffers + */ +static int ipa_mhi_test_suspend_full_channel(bool force) +{ + int rc; + bool timeout; + int submitted_packets = 0; + + rc = ipa_mhi_test_create_full_channel(&submitted_packets); + if (rc) { + IPA_UT_LOG("fail create full channel\n"); + IPA_UT_TEST_FAIL_REPORT("fail create full channel"); + return rc; + } + + IPA_UT_LOG("BEFORE suspend\n"); + rc = ipa_mhi_test_suspend(force, false); + if (rc) { + IPA_UT_LOG("ipa_mhi_suspend did not returned -EAGAIN. rc %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("test suspend fail"); + return -EFAULT; + } + IPA_UT_LOG("AFTER suspend\n"); + + while (submitted_packets) { + memset(test_mhi_ctx->in_buffer.base, 0, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + + /* invalidate spare register value (for msi) */ + memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size); + + timeout = true; + /* queue RE for IN side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + &test_mhi_ctx->in_buffer, + 1, + true, + true, + false, + true); + if (rc) { + IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("fail IN q re"); + return rc; + } + + IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout); + if (timeout) { + IPA_UT_LOG("transfer failed - timeout\n"); + IPA_UT_TEST_FAIL_REPORT("timeout on xfer"); + return -EFAULT; + } + + /* compare the two buffers */ + if (memcmp(test_mhi_ctx->in_buffer.base, + test_mhi_ctx->out_buffer.base, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) { + IPA_UT_LOG("buffer are not equal\n"); + IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer"); + return -EFAULT; + } + + submitted_packets--; + } + + return 0; +} + +/** + * To be called from test + * 1. suspend + * 2. reset to M0 state + */ +static int ipa_mhi_test_suspend_and_reset(struct ipa_test_mhi_context *ctx) +{ + int rc; + + IPA_UT_LOG("BEFORE suspend\n"); + rc = ipa_mhi_test_suspend(false, true); + if (rc) { + IPA_UT_LOG("suspend failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("suspend fail"); + return rc; + } + IPA_UT_LOG("AFTER suspend\n"); + + rc = ipa_mhi_test_reset(ctx, false); + if (rc) { + IPA_UT_LOG("reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("reset fail"); + return rc; + } + + return 0; +} + +/** + * To be run during test + * 1. manualy update wp + * 2. suspend - should succeed + * 3. restore wp value + */ +static int ipa_mhi_test_suspend_wp_update(void) +{ + int rc; + struct ipa_mhi_mmio_register_set *p_mmio; + struct ipa_mhi_channel_context_array *p_ch_ctx_array; + u64 old_wp; + u64 phys_addr; + + /* simulate a write by updating the wp */ + p_mmio = test_mhi_ctx->mmio_buf.base; + phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + old_wp = p_ch_ctx_array->wp; + p_ch_ctx_array->wp += 16; + + IPA_UT_LOG("BEFORE suspend\n"); + rc = ipa_mhi_test_suspend(false, false); + if (rc) { + IPA_UT_LOG("suspend failed rc %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("suspend fail"); + p_ch_ctx_array->wp = old_wp; + return rc; + } + IPA_UT_LOG("AFTER suspend\n"); + + p_ch_ctx_array->wp = old_wp; + + return 0; +} + +/** + * To be run during test + * 1. create open aggr by sending data + * 2. channel reset (disconnect/connet) + * 3. validate no aggr. open after reset + * 4. disable aggr. + */ +static int ipa_mhi_test_channel_reset_aggr_open(void) +{ + int rc; + u32 aggr_state_active; + struct ipa_ep_cfg_aggr ep_aggr; + + IPA_UT_LOG("Entry\n"); + + rc = ipa_mhi_test_create_aggr_open_frame(); + if (rc) { + IPA_UT_LOG("failed create open aggr rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail creare open aggr frame"); + return rc; + } + + rc = ipa_mhi_test_channel_reset(); + if (rc) { + IPA_UT_LOG("channel reset failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("channel reset fail"); + return rc; + } + + aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE); + IPADBG("IPA_STATE_AGGR_ACTIVE 0x%x\n", aggr_state_active); + if (aggr_state_active != 0) { + IPA_UT_LOG("aggregation frame open after reset!\n"); + IPA_UT_LOG("IPA_STATE_AGGR_ACTIVE 0x%x\n", aggr_state_active); + IPA_UT_TEST_FAIL_REPORT("open aggr after reset"); + return -EFAULT; + } + + memset(&ep_aggr, 0, sizeof(ep_aggr)); + rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr); + if (rc) { + IPA_UT_LOG("failed to configure aggr"); + IPA_UT_TEST_FAIL_REPORT("fail to disable aggr"); + return rc; + } + + return rc; +} + +/** + * To be run during test + * 1. queue OUT RE/buffer + * 2. wait for MSI on OUT + * 3. Do 1. and 2. till got MSI wait timeout (ch full) + * 4. channel reset + * disconnect and reconnect the prod and cons + * 5. queue IN RE/buffer and ring DB + * 6. wait for MSI - should get timeout as channels were reset + * 7. reset again + */ +static int ipa_mhi_test_channel_reset_ipa_holb(void) +{ + int rc; + int submitted_packets = 0; + bool timeout; + + IPA_UT_LOG("Entry\n"); + + rc = ipa_mhi_test_create_full_channel(&submitted_packets); + if (rc) { + IPA_UT_LOG("fail create full channel rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail create full channel"); + return rc; + } + + rc = ipa_mhi_test_channel_reset(); + if (rc) { + IPA_UT_LOG("channel reset failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("channel reset fail"); + return rc; + } + + /* invalidate spare register value (for msi) */ + memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size); + timeout = true; + /* queue RE for IN side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + &test_mhi_ctx->in_buffer, + 1, + true, + true, + false, + true); + + if (rc) { + IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail IN q re"); + return rc; + } + submitted_packets--; + + IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout); + if (!timeout) { + IPA_UT_LOG("transfer succeed although we had reset\n"); + IPA_UT_TEST_FAIL_REPORT("xfer succeed although we had reset"); + return -EFAULT; + } + + rc = ipa_mhi_test_channel_reset(); + if (rc) { + IPA_UT_LOG("channel reset failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("channel reset fail"); + return rc; + } + + return rc; +} + + +/** + * TEST: mhi reset in READY state + * 1. init to ready state (without start and connect) + * 2. reset (destroy and re-init) + * 2. destroy + */ +static int ipa_mhi_test_reset_ready_state(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(true); + if (rc) { + IPA_UT_LOG("init to Ready state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to init to ready state"); + return rc; + } + + rc = ipa_mhi_test_reset(ctx, true); + if (rc) { + IPA_UT_LOG("reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi reset in M0 state + * 1. init to M0 state (with start and connect) + * 2. reset (destroy and re-init) + * 2. destroy + */ +static int ipa_mhi_test_reset_m0_state(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT + ("fail to init to M0 state (w/ start and connect)"); + return rc; + } + + rc = ipa_mhi_test_reset(ctx, false); + if (rc) { + IPA_UT_LOG("reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi in-loop reset in M0 state + * 1. init to M0 state (with start and connect) + * 2. reset (destroy and re-init) in-loop + * 3. destroy + */ +static int ipa_mhi_test_inloop_reset_m0_state(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT + ("fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_reset, rc, ctx, false); + if (rc) { + IPA_UT_LOG("in-loop reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT( + "reset (destroy/re-init) in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data with reset + * 1. init to M0 state (with start and connect) + * 2. reset (destroy and re-init) + * 3. loopback data + * 4. reset (destroy and re-init) + * 5. loopback data again + * 6. destroy + */ +static int ipa_mhi_test_loopback_data_with_reset(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + rc = ipa_mhi_test_reset(ctx, false); + if (rc) { + IPA_UT_LOG("reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_reset(ctx, false); + if (rc) { + IPA_UT_LOG("reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi reset in suspend state + * 1. init to M0 state (with start and connect) + * 2. suspend + * 3. reset (destroy and re-init) + * 4. destroy + */ +static int ipa_mhi_test_reset_on_suspend(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return -EFAULT; + } + + rc = ipa_mhi_test_suspend_and_reset(ctx); + if (rc) { + IPA_UT_LOG("suspend and reset failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("suspend and then reset failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return -EFAULT; + } + + return 0; +} + +/** + * TEST: mhi in-loop reset in suspend state + * 1. init to M0 state (with start and connect) + * 2. suspend + * 3. reset (destroy and re-init) + * 4. Do 2 and 3 in loop + * 3. destroy + */ +static int ipa_mhi_test_inloop_reset_on_suspend(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_and_reset, rc, ctx); + if (rc) { + IPA_UT_LOG("in-loop reset in suspend failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("fail to in-loop reset while suspend"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data with reset + * 1. init to M0 state (with start and connect) + * 2. suspend + * 3. reset (destroy and re-init) + * 4. loopback data + * 5. suspend + * 5. reset (destroy and re-init) + * 6. destroy + */ +static int ipa_mhi_test_loopback_data_with_reset_on_suspend(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + rc = ipa_mhi_test_suspend_and_reset(ctx); + if (rc) { + IPA_UT_LOG("suspend and reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("fail to suspend and then reset"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_suspend_and_reset(ctx); + if (rc) { + IPA_UT_LOG("suspend and reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("fail to suspend and then reset"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop suspend/resume + * 1. init to M0 state (with start and connect) + * 2. in loop suspend/resume + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_suspend_resume(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_resume, rc); + if (rc) { + IPA_UT_LOG("suspend resume failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("in loop suspend/resume failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop suspend/resume with aggr open + * 1. init to M0 state (with start and connect) + * 2. in loop suspend/resume with open aggr. + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_suspend_resume_aggr_open(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_aggr_open, + rc, false); + if (rc) { + IPA_UT_LOG("suspend resume with aggr open failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop suspend/resume with open aggr failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop force suspend/resume with aggr open + * 1. init to M0 state (with start and connect) + * 2. in loop force suspend/resume with open aggr. + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_force_suspend_resume_aggr_open(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_aggr_open, + rc, true); + if (rc) { + IPA_UT_LOG("force suspend resume with aggr open failed rc=%d", + rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop force suspend/resume with open aggr failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop suspend/host wakeup resume + * 1. init to M0 state (with start and connect) + * 2. in loop suspend/resume with host wakeup + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_suspend_host_wakeup(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_host_wakeup, rc); + if (rc) { + IPA_UT_LOG("suspend host wakeup resume failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop suspend/resume with hsot wakeup failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop rejected suspend as full channel + * 1. init to M0 state (with start and connect) + * 2. in loop rejrected suspend + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_reject_suspend_full_channel(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_full_channel, + rc, false); + if (rc) { + IPA_UT_LOG("full channel rejected suspend failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop rejected suspend due to full channel failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop rejected force suspend as full channel + * 1. init to M0 state (with start and connect) + * 2. in loop force rejected suspend + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_reject_force_suspend_full_channel(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_full_channel, + rc, true); + if (rc) { + IPA_UT_LOG("full channel rejected force suspend failed rc=%d", + rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop force rejected suspend as full ch failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop suspend after wp manual update + * 1. init to M0 state (with start and connect) + * 2. in loop suspend after wp update + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_suspend_resume_wp_update(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_wp_update, rc); + if (rc) { + IPA_UT_LOG("suspend after wp update failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop suspend after wp update failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop channel reset (disconnect/connect) + * 1. init to M0 state (with start and connect) + * 2. in loop channel reset (disconnect/connect) + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_channel_reset(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset, rc); + if (rc) { + IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d", + rc); + IPA_UT_TEST_FAIL_REPORT("in loop channel reset failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop channel reset (disconnect/connect) + * 1. init to M0 state (with start and connect) + * 2. in loop channel reset (disconnect/connect) with open aggr + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_channel_reset_aggr_open(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset_aggr_open, rc); + if (rc) { + IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d", + rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop channel reset with open aggr failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop channel reset (disconnect/connect) + * 1. init to M0 state (with start and connect) + * 2. in loop channel reset (disconnect/connect) with channel in HOLB + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_channel_reset_ipa_holb(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset_ipa_holb, rc); + if (rc) { + IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d", + rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop channel reset with channel HOLB failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/* Suite definition block */ +IPA_UT_DEFINE_SUITE_START(mhi, "MHI for GSI", + ipa_test_mhi_suite_setup, ipa_test_mhi_suite_teardown) +{ + IPA_UT_ADD_TEST(reset_ready_state, + "reset test in Ready state", + ipa_mhi_test_reset_ready_state, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(reset_m0_state, + "reset test in M0 state", + ipa_mhi_test_reset_m0_state, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(inloop_reset_m0_state, + "several reset iterations in M0 state", + ipa_mhi_test_inloop_reset_m0_state, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(loopback_data_with_reset_on_m0, + "reset before and after loopback data in M0 state", + ipa_mhi_test_loopback_data_with_reset, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(reset_on_suspend, + "reset test in suspend state", + ipa_mhi_test_reset_on_suspend, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(inloop_reset_on_suspend, + "several reset iterations in suspend state", + ipa_mhi_test_inloop_reset_on_suspend, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(loopback_data_with_reset_on_suspend, + "reset before and after loopback data in suspend state", + ipa_mhi_test_loopback_data_with_reset_on_suspend, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(suspend_resume, + "several suspend/resume iterations", + ipa_mhi_test_in_loop_suspend_resume, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(suspend_resume_with_open_aggr, + "several suspend/resume iterations with open aggregation frame", + ipa_mhi_test_in_loop_suspend_resume_aggr_open, + true, IPA_HW_v3_0, IPA_HW_v3_5_1), + IPA_UT_ADD_TEST(force_suspend_resume_with_open_aggr, + "several force suspend/resume iterations with open aggregation frame", + ipa_mhi_test_in_loop_force_suspend_resume_aggr_open, + true, IPA_HW_v3_0, IPA_HW_v3_5_1), + IPA_UT_ADD_TEST(suspend_resume_with_host_wakeup, + "several suspend and host wakeup resume iterations", + ipa_mhi_test_in_loop_suspend_host_wakeup, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(reject_suspend_channel_full, + "several rejected suspend iterations due to full channel", + ipa_mhi_test_in_loop_reject_suspend_full_channel, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(reject_force_suspend_channel_full, + "several rejected force suspend iterations due to full channel", + ipa_mhi_test_in_loop_reject_force_suspend_full_channel, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(suspend_resume_manual_wp_update, + "several suspend/resume iterations with after simulating writing by wp manual update", + ipa_mhi_test_in_loop_suspend_resume_wp_update, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(channel_reset, + "several channel reset (disconnect/connect) iterations", + ipa_mhi_test_in_loop_channel_reset, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(channel_reset_aggr_open, + "several channel reset (disconnect/connect) iterations with open aggregation frame", + ipa_mhi_test_in_loop_channel_reset_aggr_open, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(channel_reset_ipa_holb, + "several channel reset (disconnect/connect) iterations with channel in HOLB state", + ipa_mhi_test_in_loop_channel_reset_ipa_holb, + true, IPA_HW_v3_0, IPA_HW_MAX), +} IPA_UT_DEFINE_SUITE_END(mhi); diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.c b/drivers/platform/msm/ipa/test/ipa_ut_framework.c new file mode 100644 index 000000000000..9e10ce6320db --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.c @@ -0,0 +1,1115 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "../ipa_v3/ipa_i.h" +#include "ipa_ut_framework.h" +#include "ipa_ut_suite_list.h" +#include "ipa_ut_i.h" + + +#define IPA_UT_DEBUG_WRITE_BUF_SIZE 256 +#define IPA_UT_DEBUG_READ_BUF_SIZE 1024 + +#define IPA_UT_READ_WRITE_DBG_FILE_MODE 0664 + +/** + * struct ipa_ut_context - I/S context + * @inited: Will wait till IPA is ready. Will create the enable file + * @enabled: All tests and suite debugfs files are created + * @lock: Lock for mutual exclustion + * @ipa_dbgfs_root: IPA root debugfs folder + * @test_dbgfs_root: UT root debugfs folder. Sub-folder of IPA root + * @test_dbgfs_suites: Suites root debugfs folder. Sub-folder of UT root + * @wq: workqueue struct for write operations + */ +struct ipa_ut_context { + bool inited; + bool enabled; + struct mutex lock; + struct dentry *ipa_dbgfs_root; + struct dentry *test_dbgfs_root; + struct dentry *test_dbgfs_suites; + struct workqueue_struct *wq; +}; + +/** + * struct ipa_ut_dbgfs_test_write_work_ctx - work_queue context + * @dbgfs_Work: work_struct for the write_work + * @meta_type: See enum ipa_ut_meta_test_type + * @user_data: user data usually used to point to suite or test object + */ +struct ipa_ut_dbgfs_test_write_work_ctx { + struct work_struct dbgfs_work; + long meta_type; + void *user_data; +}; + +static ssize_t ipa_ut_dbgfs_enable_read(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos); +static ssize_t ipa_ut_dbgfs_enable_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos); +static ssize_t ipa_ut_dbgfs_test_read(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos); +static ssize_t ipa_ut_dbgfs_test_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos); +static int ipa_ut_dbgfs_all_test_open(struct inode *inode, + struct file *filp); +static int ipa_ut_dbgfs_regression_test_open(struct inode *inode, + struct file *filp); +static ssize_t ipa_ut_dbgfs_meta_test_read(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos); +static ssize_t ipa_ut_dbgfs_meta_test_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos); + + +static const struct file_operations ipa_ut_dbgfs_enable_fops = { + .read = ipa_ut_dbgfs_enable_read, + .write = ipa_ut_dbgfs_enable_write, +}; +static const struct file_operations ipa_ut_dbgfs_test_fops = { + .read = ipa_ut_dbgfs_test_read, + .write = ipa_ut_dbgfs_test_write, +}; +static const struct file_operations ipa_ut_dbgfs_all_test_fops = { + .open = ipa_ut_dbgfs_all_test_open, + .read = ipa_ut_dbgfs_meta_test_read, + .write = ipa_ut_dbgfs_meta_test_write, +}; +static const struct file_operations ipa_ut_dbgfs_regression_test_fops = { + .open = ipa_ut_dbgfs_regression_test_open, + .read = ipa_ut_dbgfs_meta_test_read, + .write = ipa_ut_dbgfs_meta_test_write, +}; + +static struct ipa_ut_context *ipa_ut_ctx; +char *_IPA_UT_TEST_LOG_BUF_NAME; +struct ipa_ut_tst_fail_report + _IPA_UT_TEST_FAIL_REPORT_DATA[_IPA_UT_TEST_FAIL_REPORT_SIZE]; +u32 _IPA_UT_TEST_FAIL_REPORT_IDX; + +/** + * ipa_ut_print_log_buf() - Dump given buffer via kernel error mechanism + * @buf: Buffer to print + * + * Tokenize the string according to new-line and then print + * + * Note: Assumes lock acquired + */ +static void ipa_ut_print_log_buf(char *buf) +{ + char *token; + + if (!buf) { + IPA_UT_ERR("Input error - no buf\n"); + return; + } + + for (token = strsep(&buf, "\n"); token; token = strsep(&buf, "\n")) + pr_err("%s\n", token); +} + +/** + * ipa_ut_dump_fail_report_stack() - dump the report info stack via kernel err + * + * Note: Assumes lock acquired + */ +static void ipa_ut_dump_fail_report_stack(void) +{ + int i; + + IPA_UT_DBG("Entry\n"); + + if (_IPA_UT_TEST_FAIL_REPORT_IDX == 0) { + IPA_UT_DBG("no report info\n"); + return; + } + + for (i = 0 ; i < _IPA_UT_TEST_FAIL_REPORT_IDX; i++) { + if (i == 0) + pr_err("***** FAIL INFO STACK *****:\n"); + else + pr_err("Called From:\n"); + + pr_err("\tFILE = %s\n\tFUNC = %s()\n\tLINE = %d\n", + _IPA_UT_TEST_FAIL_REPORT_DATA[i].file, + _IPA_UT_TEST_FAIL_REPORT_DATA[i].func, + _IPA_UT_TEST_FAIL_REPORT_DATA[i].line); + pr_err("\t%s\n", _IPA_UT_TEST_FAIL_REPORT_DATA[i].info); + } +} + +/** + * ipa_ut_show_suite_exec_summary() - Show tests run summary + * @suite: suite to print its running summary + * + * Print list of succeeded tests, failed tests and skipped tests + * + * Note: Assumes lock acquired + */ +static void ipa_ut_show_suite_exec_summary(const struct ipa_ut_suite *suite) +{ + int i; + + IPA_UT_DBG("Entry\n"); + + ipa_assert_on(!suite); + + pr_info("\n\n"); + pr_info("\t Suite '%s' summary\n", suite->meta_data->name); + pr_info("===========================\n"); + pr_info("Successful tests\n"); + pr_info("----------------\n"); + for (i = 0 ; i < suite->tests_cnt ; i++) { + if (suite->tests[i].res != IPA_UT_TEST_RES_SUCCESS) + continue; + pr_info("\t%s\n", suite->tests[i].name); + } + pr_info("\nFailed tests\n"); + pr_info("------------\n"); + for (i = 0 ; i < suite->tests_cnt ; i++) { + if (suite->tests[i].res != IPA_UT_TEST_RES_FAIL) + continue; + pr_info("\t%s\n", suite->tests[i].name); + } + pr_info("\nSkipped tests\n"); + pr_info("-------------\n"); + for (i = 0 ; i < suite->tests_cnt ; i++) { + if (suite->tests[i].res != IPA_UT_TEST_RES_SKIP) + continue; + pr_info("\t%s\n", suite->tests[i].name); + } + pr_info("\n"); +} + +/** + * ipa_ut_dbgfs_meta_test_write_work_func() - Debugfs write func for a + * for a meta test + * @params: work struct containing write fops and completion object + * + * Used to run all/regression tests in a suite + * Create log buffer that the test can use to store ongoing logs + * IPA clocks need to be voted. + * Run setup() once before running the tests and teardown() once after + * If no such call-backs then ignore it; if failed then fail the suite + * Print tests progress during running + * Test log and fail report will be showed only if the test failed. + * Finally show Summary of the suite tests running + * + * Note: If test supported IPA H/W version mismatch, skip it + * If a test lack run function, skip it + * If test doesn't belong to regression and it is regression run, skip it + * Note: Running mode: Do not stop running on failure + * + * Return: Negative in failure, given characters amount in success + */ +static void ipa_ut_dbgfs_meta_test_write_work_func(struct work_struct *work) +{ + struct ipa_ut_dbgfs_test_write_work_ctx *write_work_ctx; + struct ipa_ut_suite *suite; + int i; + enum ipa_hw_type ipa_ver; + int rc = 0; + long meta_type; + bool tst_fail = false; + + write_work_ctx = container_of(work, struct + ipa_ut_dbgfs_test_write_work_ctx, dbgfs_work); + + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + suite = (struct ipa_ut_suite *)(write_work_ctx->user_data); + ipa_assert_on(!suite); + meta_type = write_work_ctx->meta_type; + IPA_UT_DBG("Meta test type %ld\n", meta_type); + + _IPA_UT_TEST_LOG_BUF_NAME = kzalloc(_IPA_UT_TEST_LOG_BUF_SIZE, + GFP_KERNEL); + if (!_IPA_UT_TEST_LOG_BUF_NAME) { + IPA_UT_ERR("failed to allocate %d bytes\n", + _IPA_UT_TEST_LOG_BUF_SIZE); + rc = -ENOMEM; + goto unlock_mutex; + } + + if (!suite->tests_cnt || !suite->tests) { + pr_info("No tests for suite '%s'\n", suite->meta_data->name); + goto free_mem; + } + + ipa_ver = ipa_get_hw_type(); + + IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT"); + + if (suite->meta_data->setup) { + pr_info("*** Suite '%s': Run setup ***\n", + suite->meta_data->name); + rc = suite->meta_data->setup(&suite->meta_data->priv); + if (rc) { + IPA_UT_ERR("Setup failed for suite %s\n", + suite->meta_data->name); + rc = -EFAULT; + goto release_clock; + } + } else { + pr_info("*** Suite '%s': No Setup ***\n", + suite->meta_data->name); + } + + pr_info("*** Suite '%s': Run %s tests ***\n\n", + suite->meta_data->name, + meta_type == IPA_UT_META_TEST_REGRESSION ? "regression" : "all" + ); + for (i = 0 ; i < suite->tests_cnt ; i++) { + if (meta_type == IPA_UT_META_TEST_REGRESSION && + !suite->tests[i].run_in_regression) { + pr_info( + "*** Test '%s': Skip - Not in regression ***\n\n" + , suite->tests[i].name); + suite->tests[i].res = IPA_UT_TEST_RES_SKIP; + continue; + } + if (suite->tests[i].min_ipa_hw_ver > ipa_ver || + suite->tests[i].max_ipa_hw_ver < ipa_ver) { + pr_info( + "*** Test '%s': Skip - IPA VER mismatch ***\n\n" + , suite->tests[i].name); + suite->tests[i].res = IPA_UT_TEST_RES_SKIP; + continue; + } + if (!suite->tests[i].run) { + pr_info( + "*** Test '%s': Skip - No Run function ***\n\n" + , suite->tests[i].name); + suite->tests[i].res = IPA_UT_TEST_RES_SKIP; + continue; + } + + _IPA_UT_TEST_LOG_BUF_NAME[0] = '\0'; + _IPA_UT_TEST_FAIL_REPORT_IDX = 0; + pr_info("*** Test '%s': Running... ***\n", + suite->tests[i].name); + rc = suite->tests[i].run(suite->meta_data->priv); + if (rc) { + tst_fail = true; + suite->tests[i].res = IPA_UT_TEST_RES_FAIL; + ipa_ut_print_log_buf(_IPA_UT_TEST_LOG_BUF_NAME); + } else { + suite->tests[i].res = IPA_UT_TEST_RES_SUCCESS; + } + + pr_info(">>>>>>**** TEST '%s': %s ****<<<<<<\n", + suite->tests[i].name, tst_fail ? "FAIL" : "SUCCESS"); + + if (tst_fail) + ipa_ut_dump_fail_report_stack(); + + pr_info("\n"); + } + + if (suite->meta_data->teardown) { + pr_info("*** Suite '%s': Run Teardown ***\n", + suite->meta_data->name); + rc = suite->meta_data->teardown(suite->meta_data->priv); + if (rc) { + IPA_UT_ERR("Teardown failed for suite %s\n", + suite->meta_data->name); + rc = -EFAULT; + goto release_clock; + } + } else { + pr_info("*** Suite '%s': No Teardown ***\n", + suite->meta_data->name); + } + + ipa_ut_show_suite_exec_summary(suite); + +release_clock: + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT"); +free_mem: + kfree(_IPA_UT_TEST_LOG_BUF_NAME); + _IPA_UT_TEST_LOG_BUF_NAME = NULL; +unlock_mutex: + mutex_unlock(&ipa_ut_ctx->lock); + kfree(write_work_ctx); +} + +/* + * ipa_ut_dbgfs_meta_test_write() - Debugfs write func for a meta test + * @params: write fops + * + * Run all tests in a suite using a work queue so it does not race with + * debugfs_remove_recursive + * + * Return: Negative if failure. Amount of characters written if success. + */ +static ssize_t ipa_ut_dbgfs_meta_test_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct ipa_ut_dbgfs_test_write_work_ctx *write_work_ctx; + + write_work_ctx = kzalloc(sizeof(*write_work_ctx), GFP_KERNEL); + if (!write_work_ctx) { + IPA_UT_ERR("kzalloc err.\n"); + return -ENOMEM; + } + + write_work_ctx->user_data = file->f_inode->i_private; + write_work_ctx->meta_type = (long)(file->private_data); + + INIT_WORK(&write_work_ctx->dbgfs_work, + ipa_ut_dbgfs_meta_test_write_work_func); + + queue_work(ipa_ut_ctx->wq, &write_work_ctx->dbgfs_work); + + return count; +} + +/** + * ipa_ut_dbgfs_meta_test_read() - Debugfs read func for a meta test + * @params: read fops + * + * Meta test, is a test that describes other test or bunch of tests. + * for example, the 'all' test. Running this test will run all + * the tests in the suite. + * + * Show information regard the suite. E.g. name and description + * If regression - List the regression tests names + * + * Return: Amount of characters written to user space buffer + */ +static ssize_t ipa_ut_dbgfs_meta_test_read(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + char *buf; + struct ipa_ut_suite *suite; + int nbytes; + ssize_t cnt; + long meta_type; + int i; + + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + suite = file->f_inode->i_private; + ipa_assert_on(!suite); + meta_type = (long)(file->private_data); + IPA_UT_DBG("Meta test type %ld\n", meta_type); + + buf = kmalloc(IPA_UT_DEBUG_READ_BUF_SIZE, GFP_KERNEL); + if (!buf) { + IPA_UT_ERR("failed to allocate %d bytes\n", + IPA_UT_DEBUG_READ_BUF_SIZE); + cnt = 0; + goto unlock_mutex; + } + + if (meta_type == IPA_UT_META_TEST_ALL) { + nbytes = scnprintf(buf, IPA_UT_DEBUG_READ_BUF_SIZE, + "\tMeta-test running all the tests in the suite:\n" + "\tSuite Name: %s\n" + "\tDescription: %s\n" + "\tNumber of test in suite: %zu\n", + suite->meta_data->name, + suite->meta_data->desc ?: "", + suite->tests_cnt); + } else { + nbytes = scnprintf(buf, IPA_UT_DEBUG_READ_BUF_SIZE, + "\tMeta-test running regression tests in the suite:\n" + "\tSuite Name: %s\n" + "\tDescription: %s\n" + "\tRegression tests:\n", + suite->meta_data->name, + suite->meta_data->desc ?: ""); + for (i = 0 ; i < suite->tests_cnt ; i++) { + if (!suite->tests[i].run_in_regression) + continue; + nbytes += scnprintf(buf + nbytes, + IPA_UT_DEBUG_READ_BUF_SIZE - nbytes, + "\t\t%s\n", suite->tests[i].name); + } + } + + cnt = simple_read_from_buffer(ubuf, count, ppos, buf, nbytes); + kfree(buf); + +unlock_mutex: + mutex_unlock(&ipa_ut_ctx->lock); + return cnt; +} + +/** + * ipa_ut_dbgfs_regression_test_open() - Debugfs open function for + * 'regression' tests + * @params: open fops + * + * Mark "Regression tests" for meta-tests later operations. + * + * Return: Zero (always success). + */ +static int ipa_ut_dbgfs_regression_test_open(struct inode *inode, + struct file *filp) +{ + IPA_UT_DBG("Entry\n"); + + filp->private_data = (void *)(IPA_UT_META_TEST_REGRESSION); + + return 0; +} + +/** + * ipa_ut_dbgfs_all_test_open() - Debugfs open function for 'all' tests + * @params: open fops + * + * Mark "All tests" for meta-tests later operations. + * + * Return: Zero (always success). + */ +static int ipa_ut_dbgfs_all_test_open(struct inode *inode, + struct file *filp) +{ + IPA_UT_DBG("Entry\n"); + + filp->private_data = (void *)(IPA_UT_META_TEST_ALL); + + return 0; +} + +/** + * ipa_ut_dbgfs_test_write() - Debugfs write function for a test + * @params: write fops + * + * Used to run a test. + * Create log buffer that the test can use to store ongoing logs + * IPA clocks need to be voted. + * Run setup() before the test and teardown() after the tests. + * If no such call-backs then ignore it; if failed then fail the test + * If all succeeds, no printing to user + * If failed, test logs and failure report will be printed to user + * + * Note: Test must has run function and it's supported IPA H/W version + * must be matching. Otherwise test will fail. + * + * Return: Negative in failure, given characters amount in success + */ +static void ipa_ut_dbgfs_test_write_work_func(struct work_struct *work) +{ + struct ipa_ut_dbgfs_test_write_work_ctx *write_work_ctx; + struct ipa_ut_test *test; + struct ipa_ut_suite *suite; + bool tst_fail = false; + int rc = 0; + enum ipa_hw_type ipa_ver; + + write_work_ctx = container_of(work, struct + ipa_ut_dbgfs_test_write_work_ctx, dbgfs_work); + + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + test = (struct ipa_ut_test *)(write_work_ctx->user_data); + ipa_assert_on(!test); + + _IPA_UT_TEST_LOG_BUF_NAME = kzalloc(_IPA_UT_TEST_LOG_BUF_SIZE, + GFP_KERNEL); + if (!_IPA_UT_TEST_LOG_BUF_NAME) { + IPA_UT_ERR("failed to allocate %d bytes\n", + _IPA_UT_TEST_LOG_BUF_SIZE); + rc = -ENOMEM; + goto unlock_mutex; + } + + if (!test->run) { + IPA_UT_ERR("*** Test %s - No run func ***\n", + test->name); + rc = -EFAULT; + goto free_mem; + } + + ipa_ver = ipa_get_hw_type(); + if (test->min_ipa_hw_ver > ipa_ver || + test->max_ipa_hw_ver < ipa_ver) { + IPA_UT_ERR("Cannot run test %s on IPA HW Ver %s\n", + test->name, ipa_get_version_string(ipa_ver)); + rc = -EFAULT; + goto free_mem; + } + + suite = test->suite; + if (!suite || !suite->meta_data) { + IPA_UT_ERR("test %s with invalid suite\n", test->name); + rc = -EINVAL; + goto free_mem; + } + + IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT"); + + if (suite->meta_data->setup) { + IPA_UT_DBG("*** Suite '%s': Run setup ***\n", + suite->meta_data->name); + rc = suite->meta_data->setup(&suite->meta_data->priv); + if (rc) { + IPA_UT_ERR("Setup failed for suite %s\n", + suite->meta_data->name); + rc = -EFAULT; + goto release_clock; + } + } else { + IPA_UT_DBG("*** Suite '%s': No Setup ***\n", + suite->meta_data->name); + } + + IPA_UT_DBG("*** Test '%s': Running... ***\n", test->name); + _IPA_UT_TEST_FAIL_REPORT_IDX = 0; + rc = test->run(suite->meta_data->priv); + if (rc) + tst_fail = true; + IPA_UT_DBG("*** Test %s - ***\n", tst_fail ? "FAIL" : "SUCCESS"); + if (tst_fail) { + pr_info("=================>>>>>>>>>>>\n"); + ipa_ut_print_log_buf(_IPA_UT_TEST_LOG_BUF_NAME); + pr_info("**** TEST %s FAILED ****\n", test->name); + ipa_ut_dump_fail_report_stack(); + pr_info("<<<<<<<<<<<=================\n"); + } + + if (suite->meta_data->teardown) { + IPA_UT_DBG("*** Suite '%s': Run Teardown ***\n", + suite->meta_data->name); + rc = suite->meta_data->teardown(suite->meta_data->priv); + if (rc) { + IPA_UT_ERR("Teardown failed for suite %s\n", + suite->meta_data->name); + rc = -EFAULT; + goto release_clock; + } + } else { + IPA_UT_DBG("*** Suite '%s': No Teardown ***\n", + suite->meta_data->name); + } + +release_clock: + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT"); +free_mem: + kfree(_IPA_UT_TEST_LOG_BUF_NAME); + _IPA_UT_TEST_LOG_BUF_NAME = NULL; +unlock_mutex: + mutex_unlock(&ipa_ut_ctx->lock); + kfree(write_work_ctx); +} + +static ssize_t ipa_ut_dbgfs_test_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct ipa_ut_dbgfs_test_write_work_ctx *write_work_ctx; + + write_work_ctx = kzalloc(sizeof(*write_work_ctx), GFP_KERNEL); + if (!write_work_ctx) { + IPA_UT_ERR("kzalloc err.\n"); + return -ENOMEM; + } + + write_work_ctx->user_data = file->f_inode->i_private; + write_work_ctx->meta_type = (long)(file->private_data); + + INIT_WORK(&write_work_ctx->dbgfs_work, + ipa_ut_dbgfs_test_write_work_func); + + queue_work(ipa_ut_ctx->wq, &write_work_ctx->dbgfs_work); + + return count; +} +/** + * ipa_ut_dbgfs_test_read() - Debugfs read function for a test + * @params: read fops + * + * print information regard the test. E.g. name and description + * + * Return: Amount of characters written to user space buffer + */ +static ssize_t ipa_ut_dbgfs_test_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + char *buf; + struct ipa_ut_test *test; + int nbytes; + ssize_t cnt; + + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + test = file->f_inode->i_private; + ipa_assert_on(!test); + + buf = kmalloc(IPA_UT_DEBUG_READ_BUF_SIZE, GFP_KERNEL); + if (!buf) { + IPA_UT_ERR("failed to allocate %d bytes\n", + IPA_UT_DEBUG_READ_BUF_SIZE); + cnt = 0; + goto unlock_mutex; + } + + nbytes = scnprintf(buf, IPA_UT_DEBUG_READ_BUF_SIZE, + "\t Test Name: %s\n" + "\t Description: %s\n" + "\t Suite Name: %s\n" + "\t Run In Regression: %s\n" + "\t Supported IPA versions: [%s -> %s]\n", + test->name, test->desc ?: "", test->suite->meta_data->name, + test->run_in_regression ? "Yes" : "No", + ipa_get_version_string(test->min_ipa_hw_ver), + test->max_ipa_hw_ver == IPA_HW_MAX ? "MAX" : + ipa_get_version_string(test->max_ipa_hw_ver)); + + if (nbytes > count) + IPA_UT_ERR("User buf too small - return partial info\n"); + + cnt = simple_read_from_buffer(ubuf, count, ppos, buf, nbytes); + kfree(buf); + +unlock_mutex: + mutex_unlock(&ipa_ut_ctx->lock); + return cnt; +} + +/** + * ipa_ut_framework_load_suites() - Load tests and expose them to user space + * + * Creates debugfs folder for each suite and then file for each test in it. + * Create debugfs "all" file for each suite for meta-test to run all tests. + * + * Note: Assumes lock acquired + * + * Return: Zero in success, otherwise in failure + */ +int ipa_ut_framework_load_suites(void) +{ + int suite_idx; + int tst_idx; + struct ipa_ut_suite *suite; + struct dentry *s_dent; + struct dentry *f_dent; + + IPA_UT_DBG("Entry\n"); + + for (suite_idx = IPA_UT_SUITE_FIRST_INDEX; + suite_idx < IPA_UT_SUITES_COUNT; suite_idx++) { + suite = IPA_UT_GET_SUITE(suite_idx); + + if (!suite->meta_data->name) { + IPA_UT_ERR("No suite name\n"); + return -EFAULT; + } + + s_dent = debugfs_create_dir(suite->meta_data->name, + ipa_ut_ctx->test_dbgfs_suites); + + if (!s_dent || IS_ERR(s_dent)) { + IPA_UT_ERR("fail create dbg entry - suite %s\n", + suite->meta_data->name); + return -EFAULT; + } + + for (tst_idx = 0; tst_idx < suite->tests_cnt ; tst_idx++) { + if (!suite->tests[tst_idx].name) { + IPA_UT_ERR("No test name on suite %s\n", + suite->meta_data->name); + return -EFAULT; + } + f_dent = debugfs_create_file( + suite->tests[tst_idx].name, + IPA_UT_READ_WRITE_DBG_FILE_MODE, s_dent, + &suite->tests[tst_idx], + &ipa_ut_dbgfs_test_fops); + if (!f_dent || IS_ERR(f_dent)) { + IPA_UT_ERR("fail create dbg entry - tst %s\n", + suite->tests[tst_idx].name); + return -EFAULT; + } + } + + /* entry for meta-test all to run all tests in suites */ + f_dent = debugfs_create_file(_IPA_UT_RUN_ALL_TEST_NAME, + IPA_UT_READ_WRITE_DBG_FILE_MODE, s_dent, + suite, &ipa_ut_dbgfs_all_test_fops); + if (!f_dent || IS_ERR(f_dent)) { + IPA_UT_ERR("fail to create dbg entry - %s\n", + _IPA_UT_RUN_ALL_TEST_NAME); + return -EFAULT; + } + + /* + * entry for meta-test regression to run all regression + * tests in suites + */ + f_dent = debugfs_create_file(_IPA_UT_RUN_REGRESSION_TEST_NAME, + IPA_UT_READ_WRITE_DBG_FILE_MODE, s_dent, + suite, &ipa_ut_dbgfs_regression_test_fops); + if (!f_dent || IS_ERR(f_dent)) { + IPA_UT_ERR("fail to create dbg entry - %s\n", + _IPA_UT_RUN_ALL_TEST_NAME); + return -EFAULT; + } + } + + return 0; +} + +/** + * ipa_ut_framework_enable() - Enable the framework + * + * Creates the tests and suites debugfs entries and load them. + * This will expose the tests to user space. + * + * Return: Zero in success, otherwise in failure + */ +static int ipa_ut_framework_enable(void) +{ + int ret = 0; + + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + + if (ipa_ut_ctx->enabled) { + IPA_UT_ERR("Already enabled\n"); + goto unlock_mutex; + } + + ipa_ut_ctx->test_dbgfs_suites = debugfs_create_dir("suites", + ipa_ut_ctx->test_dbgfs_root); + if (!ipa_ut_ctx->test_dbgfs_suites || + IS_ERR(ipa_ut_ctx->test_dbgfs_suites)) { + IPA_UT_ERR("failed to create suites debugfs dir\n"); + ret = -EFAULT; + goto unlock_mutex; + } + + if (ipa_ut_framework_load_suites()) { + IPA_UT_ERR("failed to load the suites into debugfs\n"); + ret = -EFAULT; + goto fail_clean_suites_dbgfs; + } + + ipa_ut_ctx->enabled = true; + goto unlock_mutex; + +fail_clean_suites_dbgfs: + debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_suites); +unlock_mutex: + mutex_unlock(&ipa_ut_ctx->lock); + return ret; +} + +/** + * ipa_ut_framework_disable() - Disable the framework + * + * Remove the tests and suites debugfs exposure. + * + * Return: Zero in success, otherwise in failure + */ +static int ipa_ut_framework_disable(void) +{ + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + + if (!ipa_ut_ctx->enabled) { + IPA_UT_ERR("Already disabled\n"); + goto unlock_mutex; + } + + debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_suites); + + ipa_ut_ctx->enabled = false; + +unlock_mutex: + mutex_unlock(&ipa_ut_ctx->lock); + return 0; +} + +/** + * ipa_ut_dbgfs_enable_write() - Debugfs enable file write fops + * @params: write fops + * + * Input should be number. If 0, then disable. Otherwise enable. + * + * Return: if failed then negative value, if succeeds, amount of given chars + */ +static ssize_t ipa_ut_dbgfs_enable_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + char lcl_buf[IPA_UT_DEBUG_WRITE_BUF_SIZE]; + s8 option = 0; + int ret; + + IPA_UT_DBG("Entry\n"); + + if (sizeof(lcl_buf) < count + 1) { + IPA_UT_ERR("No enough space\n"); + return -E2BIG; + } + + if (copy_from_user(lcl_buf, buf, count)) { + IPA_UT_ERR("fail to copy buf from user space\n"); + return -EFAULT; + } + + lcl_buf[count] = '\0'; + if (kstrtos8(lcl_buf, 0, &option)) { + IPA_UT_ERR("fail convert str to s8\n"); + return -EINVAL; + } + + if (option == 0) + ret = ipa_ut_framework_disable(); + else + ret = ipa_ut_framework_enable(); + + return ret ?: count; +} + +/** + * ipa_ut_dbgfs_enable_read() - Debugfs enable file read fops + * @params: read fops + * + * To show to user space if the I/S is enabled or disabled. + * + * Return: amount of characters returned to user space + */ +static ssize_t ipa_ut_dbgfs_enable_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + const char *status; + + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + status = ipa_ut_ctx->enabled ? + "Enabled - Write 0 to disable\n" : + "Disabled - Write 1 to enable\n"; + mutex_unlock(&ipa_ut_ctx->lock); + return simple_read_from_buffer(ubuf, count, ppos, + status, strlen(status)); +} + +/** + * ipa_ut_framework_init() - Unit-tests framework initialization + * + * Complete tests initialization: Each tests needs to point to it's + * corresponing suite. + * Creates the framework debugfs root directory under IPA directory. + * Create enable debugfs file - to enable/disable the framework. + * + * Return: Zero in success, otherwise in failure + */ +static int ipa_ut_framework_init(void) +{ + struct dentry *dfile_enable; + int ret; + int suite_idx; + int test_idx; + struct ipa_ut_suite *suite; + + IPA_UT_DBG("Entry\n"); + + ipa_assert_on(!ipa_ut_ctx); + + ipa_ut_ctx->ipa_dbgfs_root = ipa_debugfs_get_root(); + if (!ipa_ut_ctx->ipa_dbgfs_root) { + IPA_UT_ERR("No IPA debugfs root entry\n"); + return -EFAULT; + } + + mutex_lock(&ipa_ut_ctx->lock); + + /* tests needs to point to their corresponding suites structures */ + for (suite_idx = IPA_UT_SUITE_FIRST_INDEX; + suite_idx < IPA_UT_SUITES_COUNT; suite_idx++) { + suite = IPA_UT_GET_SUITE(suite_idx); + ipa_assert_on(!suite); + if (!suite->tests) { + IPA_UT_DBG("No tests for suite %s\n", + suite->meta_data->name); + continue; + } + for (test_idx = 0; test_idx < suite->tests_cnt; test_idx++) { + suite->tests[test_idx].suite = suite; + IPA_UT_DBG("Updating test %s info for suite %s\n", + suite->tests[test_idx].name, + suite->meta_data->name); + } + } + + ipa_ut_ctx->wq = create_singlethread_workqueue("ipa_ut_dbgfs"); + if (!ipa_ut_ctx->wq) { + IPA_UT_ERR("create workqueue failed\n"); + ret = -ENOMEM; + goto unlock_mutex; + } + + ipa_ut_ctx->test_dbgfs_root = debugfs_create_dir("test", + ipa_ut_ctx->ipa_dbgfs_root); + if (!ipa_ut_ctx->test_dbgfs_root || + IS_ERR(ipa_ut_ctx->test_dbgfs_root)) { + IPA_UT_ERR("failed to create test debugfs dir\n"); + ret = -EFAULT; + destroy_workqueue(ipa_ut_ctx->wq); + goto unlock_mutex; + } + + dfile_enable = debugfs_create_file("enable", + IPA_UT_READ_WRITE_DBG_FILE_MODE, + ipa_ut_ctx->test_dbgfs_root, 0, &ipa_ut_dbgfs_enable_fops); + if (!dfile_enable || IS_ERR(dfile_enable)) { + IPA_UT_ERR("failed to create enable debugfs file\n"); + ret = -EFAULT; + destroy_workqueue(ipa_ut_ctx->wq); + goto fail_clean_dbgfs; + } + + _IPA_UT_TEST_FAIL_REPORT_IDX = 0; + ipa_ut_ctx->inited = true; + IPA_UT_DBG("Done\n"); + ret = 0; + goto unlock_mutex; + +fail_clean_dbgfs: + debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_root); +unlock_mutex: + mutex_unlock(&ipa_ut_ctx->lock); + return ret; +} + +/** + * ipa_ut_framework_destroy() - Destroy the UT framework info + * + * Disable it if enabled. + * Remove the debugfs entries using the root entry + */ +static void ipa_ut_framework_destroy(void) +{ + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + destroy_workqueue(ipa_ut_ctx->wq); + if (ipa_ut_ctx->enabled) + ipa_ut_framework_disable(); + if (ipa_ut_ctx->inited) + debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_root); + mutex_unlock(&ipa_ut_ctx->lock); +} + +/** + * ipa_ut_ipa_ready_cb() - IPA ready CB + * + * Once IPA is ready starting initializing the unit-test framework + */ +static void ipa_ut_ipa_ready_cb(void *user_data) +{ + IPA_UT_DBG("Entry\n"); + (void)ipa_ut_framework_init(); +} + +/** + * ipa_ut_module_init() - Module init + * + * Create the framework context, wait for IPA driver readiness + * and Initialize it. + * If IPA driver already ready, continue initialization immediately. + * if not, wait for IPA ready notification by IPA driver context + */ +int __init ipa_ut_module_init(void) +{ + int ret = 0; + bool init_framewok = true; + + IPA_UT_INFO("Loading IPA test module...\n"); + + ipa_ut_ctx = kzalloc(sizeof(struct ipa_ut_context), GFP_KERNEL); + if (!ipa_ut_ctx) { + IPA_UT_ERR("Failed to allocate ctx\n"); + return -ENOMEM; + } + mutex_init(&ipa_ut_ctx->lock); + + if (!ipa_is_ready()) { + init_framewok = false; + + IPA_UT_DBG("IPA driver not ready, registering callback\n"); + + ret = ipa_register_ipa_ready_cb(ipa_ut_ipa_ready_cb, NULL); + + /* + * If the call to ipa_register_ipa_ready_cb() above + * returns 0, this means that we've succeeded in + * queuing up a future call to ipa_ut_framework_init() + * and that the call to it will be made once the IPA + * becomes ready. If this is the case, the call to + * ipa_ut_framework_init() below need not be made. + * + * If the call to ipa_register_ipa_ready_cb() above + * returns -EEXIST, it means that during the call to + * ipa_register_ipa_ready_cb(), the IPA has become + * ready, and hence, no indirect call to + * ipa_ut_framework_init() will be made, so we need to + * call it ourselves below. + * + * If the call to ipa_register_ipa_ready_cb() above + * return something other than 0 or -EEXIST, that's a + * hard error. + */ + if (ret == -EEXIST) { + init_framewok = true; + } else { + if (ret) { + IPA_UT_ERR("IPA CB reg failed - %d\n", ret); + kfree(ipa_ut_ctx); + ipa_ut_ctx = NULL; + } + return ret; + } + } + + if (init_framewok) { + ret = ipa_ut_framework_init(); + if (ret) { + IPA_UT_ERR("framework init failed\n"); + kfree(ipa_ut_ctx); + ipa_ut_ctx = NULL; + } + } + + return ret; +} + +/** + * ipa_ut_module_exit() - Module exit function + * + * Destroys the Framework and removes its context + */ +void ipa_ut_module_exit(void) +{ + IPA_UT_DBG("Entry\n"); + + if (!ipa_ut_ctx) + return; + + ipa_ut_framework_destroy(); + kfree(ipa_ut_ctx); + ipa_ut_ctx = NULL; +} + +#if !defined(CONFIG_IPA_EMULATION) /* On real UE, we have a module */ +module_init(ipa_ut_module_init); +module_exit(ipa_ut_module_exit); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA Unit Test module"); +#endif diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.h b/drivers/platform/msm/ipa/test/ipa_ut_framework.h new file mode 100644 index 000000000000..4ad514c50dd5 --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_UT_FRAMEWORK_H_ +#define _IPA_UT_FRAMEWORK_H_ + +#include +#include "../ipa_common_i.h" +#include "ipa_ut_i.h" + +#define IPA_UT_DRV_NAME "ipa_ut" + +#define IPA_UT_DBG(fmt, args...) \ + do { \ + pr_debug(IPA_UT_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_UT_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_UT_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_UT_ERR(fmt, args...) \ + do { \ + pr_err(IPA_UT_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_UT_INFO(fmt, args...) \ + do { \ + pr_info(IPA_UT_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +/** + * struct ipa_ut_tst_fail_report - Information on test failure + * @valid: When a test posts a report, valid will be marked true + * @file: File name containing the failed test. + * @line: Number of line in the file where the test failed. + * @func: Function where the test failed in. + * @info: Information about the failure. + */ +struct ipa_ut_tst_fail_report { + bool valid; + const char *file; + int line; + const char *func; + const char *info; +}; + +/** + * Report on test failure + * To be used by tests to report a point were a test fail. + * Failures are saved in a stack manner. + * Dumping the failure info will dump the fail reports + * from all the function in the calling stack + */ +#define IPA_UT_TEST_FAIL_REPORT(__info) \ + do { \ + extern struct ipa_ut_tst_fail_report \ + _IPA_UT_TEST_FAIL_REPORT_DATA \ + [_IPA_UT_TEST_FAIL_REPORT_SIZE]; \ + extern u32 _IPA_UT_TEST_FAIL_REPORT_IDX; \ + struct ipa_ut_tst_fail_report *entry; \ + if (_IPA_UT_TEST_FAIL_REPORT_IDX >= \ + _IPA_UT_TEST_FAIL_REPORT_SIZE) \ + break; \ + entry = &(_IPA_UT_TEST_FAIL_REPORT_DATA \ + [_IPA_UT_TEST_FAIL_REPORT_IDX]); \ + entry->file = __FILENAME__; \ + entry->line = __LINE__; \ + entry->func = __func__; \ + if (__info) \ + entry->info = __info; \ + else \ + entry->info = ""; \ + _IPA_UT_TEST_FAIL_REPORT_IDX++; \ + } while (0) + +/** + * To be used by tests to log progress and ongoing information + * Logs are not printed to user, but saved to a buffer. + * I/S shall print the buffer at different occasions - e.g. in test failure + */ +#define IPA_UT_LOG(fmt, args...) \ + do { \ + extern char *_IPA_UT_TEST_LOG_BUF_NAME; \ + char __buf[512]; \ + IPA_UT_DBG(fmt, ## args); \ + if (!_IPA_UT_TEST_LOG_BUF_NAME) {\ + pr_err(IPA_UT_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + break; \ + } \ + scnprintf(__buf, sizeof(__buf), \ + " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + strlcat(_IPA_UT_TEST_LOG_BUF_NAME, __buf, \ + _IPA_UT_TEST_LOG_BUF_SIZE); \ + } while (0) + +/** + * struct ipa_ut_suite_meta - Suite meta-data + * @name: Suite unique name + * @desc: Suite description + * @setup: Setup Call-back of the suite + * @teardown: Teardown Call-back of the suite + * @priv: Private pointer of the suite + * + * Setup/Teardown will be called once for the suite when running a tests of it. + * priv field is shared between the Setup/Teardown and the tests + */ +struct ipa_ut_suite_meta { + char *name; + char *desc; + int (*setup)(void **ppriv); + int (*teardown)(void *priv); + void *priv; +}; + +/* Test suite data structure declaration */ +struct ipa_ut_suite; + +/** + * struct ipa_ut_test - Test information + * @name: Test name + * @desc: Test description + * @run: Test execution call-back + * @run_in_regression: To run this test as part of regression? + * @min_ipa_hw_ver: Minimum IPA H/W version where the test is supported? + * @max_ipa_hw_ver: Maximum IPA H/W version where the test is supported? + * @suite: Pointer to suite containing this test + * @res: Test execution result. Will be updated after running a test as part + * of suite tests run + */ +struct ipa_ut_test { + char *name; + char *desc; + int (*run)(void *priv); + bool run_in_regression; + int min_ipa_hw_ver; + int max_ipa_hw_ver; + struct ipa_ut_suite *suite; + enum ipa_ut_test_result res; +}; + +/** + * struct ipa_ut_suite - Suite information + * @meta_data: Pointer to meta-data structure of the suite + * @tests: Pointer to array of tests belongs to the suite + * @tests_cnt: Number of tests + */ +struct ipa_ut_suite { + struct ipa_ut_suite_meta *meta_data; + struct ipa_ut_test *tests; + size_t tests_cnt; +}; + + +/** + * Add a test to a suite. + * Will add entry to tests array and update its info with + * the given info, thus adding new test. + */ +#define IPA_UT_ADD_TEST(__name, __desc, __run, __run_in_regression, \ + __min_ipa_hw_ver, __max_ipa__hw_ver) \ + { \ + .name = #__name, \ + .desc = __desc, \ + .run = __run, \ + .run_in_regression = __run_in_regression, \ + .min_ipa_hw_ver = __min_ipa_hw_ver, \ + .max_ipa_hw_ver = __max_ipa__hw_ver, \ + .suite = NULL, \ + } + +/** + * Declare a suite + * Every suite need to be declared before it is registered. + */ +#define IPA_UT_DECLARE_SUITE(__name) \ + extern struct ipa_ut_suite _IPA_UT_SUITE_DATA(__name) + +/** + * Register a suite + * Registering a suite is mandatory so it will be considered. + */ +#define IPA_UT_REGISTER_SUITE(__name) \ + (&_IPA_UT_SUITE_DATA(__name)) + +/** + * Start/End suite definition + * Will create the suite global structures and adds adding tests to it. + * Use IPA_UT_ADD_TEST() with these macros to add tests when defining + * a suite + */ +#define IPA_UT_DEFINE_SUITE_START(__name, __desc, __setup, __teardown) \ + static struct ipa_ut_suite_meta _IPA_UT_SUITE_META_DATA(__name) = \ + { \ + .name = #__name, \ + .desc = __desc, \ + .setup = __setup, \ + .teardown = __teardown, \ + }; \ + static struct ipa_ut_test _IPA_UT_SUITE_TESTS(__name)[] = +#define IPA_UT_DEFINE_SUITE_END(__name) \ + ; \ + struct ipa_ut_suite _IPA_UT_SUITE_DATA(__name) = \ + { \ + .meta_data = &_IPA_UT_SUITE_META_DATA(__name), \ + .tests = _IPA_UT_SUITE_TESTS(__name), \ + .tests_cnt = ARRAY_SIZE(_IPA_UT_SUITE_TESTS(__name)), \ + } + +#endif /* _IPA_UT_FRAMEWORK_H_ */ diff --git a/drivers/platform/msm/ipa/test/ipa_ut_i.h b/drivers/platform/msm/ipa/test/ipa_ut_i.h new file mode 100644 index 000000000000..3753c68a3063 --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_ut_i.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_UT_I_H_ +#define _IPA_UT_I_H_ + +/* Suite data global structure name */ +#define _IPA_UT_SUITE_DATA(__name) ipa_ut_ ##__name ##_data + +/* Suite meta-data global structure name */ +#define _IPA_UT_SUITE_META_DATA(__name) ipa_ut_ ##__name ##_meta_data + +/* Suite global array of tests */ +#define _IPA_UT_SUITE_TESTS(__name) ipa_ut_ ##__name ##_tests + +/* Global array of all suites */ +#define _IPA_UT_ALL_SUITES ipa_ut_all_suites_data + +/* Meta-test "all" name - test to run all tests in given suite */ +#define _IPA_UT_RUN_ALL_TEST_NAME "all" + +/** + * Meta-test "regression" name - + * test to run all regression tests in given suite + */ +#define _IPA_UT_RUN_REGRESSION_TEST_NAME "regression" + + +/* Test Log buffer name and size */ +#define _IPA_UT_TEST_LOG_BUF_NAME ipa_ut_tst_log_buf +#define _IPA_UT_TEST_LOG_BUF_SIZE 8192 + +/* Global structure for test fail execution result information */ +#define _IPA_UT_TEST_FAIL_REPORT_DATA ipa_ut_tst_fail_report_data +#define _IPA_UT_TEST_FAIL_REPORT_SIZE 5 +#define _IPA_UT_TEST_FAIL_REPORT_IDX ipa_ut_tst_fail_report_data_index + +/* Start/End definitions of the array of suites */ +#define IPA_UT_DEFINE_ALL_SUITES_START \ + static struct ipa_ut_suite *_IPA_UT_ALL_SUITES[] = +#define IPA_UT_DEFINE_ALL_SUITES_END + +/** + * Suites iterator - Array-like container + * First index, number of elements and element fetcher + */ +#define IPA_UT_SUITE_FIRST_INDEX 0 +#define IPA_UT_SUITES_COUNT \ + ARRAY_SIZE(_IPA_UT_ALL_SUITES) +#define IPA_UT_GET_SUITE(__index) \ + _IPA_UT_ALL_SUITES[__index] + +/** + * enum ipa_ut_test_result - Test execution result + * @IPA_UT_TEST_RES_FAIL: Test executed and failed + * @IPA_UT_TEST_RES_SUCCESS: Test executed and succeeded + * @IPA_UT_TEST_RES_SKIP: Test was not executed. + * + * When running all tests in a suite, a specific test could + * be skipped and not executed. For example due to mismatch of + * IPA H/W version. + */ +enum ipa_ut_test_result { + IPA_UT_TEST_RES_FAIL, + IPA_UT_TEST_RES_SUCCESS, + IPA_UT_TEST_RES_SKIP, +}; + +/** + * enum ipa_ut_meta_test_type - Type of suite meta-test + * @IPA_UT_META_TEST_ALL: Represents all tests in suite + * @IPA_UT_META_TEST_REGRESSION: Represents all regression tests in suite + */ +enum ipa_ut_meta_test_type { + IPA_UT_META_TEST_ALL, + IPA_UT_META_TEST_REGRESSION, +}; + +#endif /* _IPA_UT_I_H_ */ diff --git a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h new file mode 100644 index 000000000000..b452da893263 --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_UT_SUITE_LIST_H_ +#define _IPA_UT_SUITE_LIST_H_ + +#include "ipa_ut_framework.h" +#include "ipa_ut_i.h" + +/** + * Declare every suite here so that it will be found later below + * No importance for order. + */ +IPA_UT_DECLARE_SUITE(mhi); +IPA_UT_DECLARE_SUITE(dma); +IPA_UT_DECLARE_SUITE(pm); +IPA_UT_DECLARE_SUITE(example); +IPA_UT_DECLARE_SUITE(hw_stats); + + +/** + * Register every suite inside the below block. + * Unregistered suites will be ignored + */ +IPA_UT_DEFINE_ALL_SUITES_START +{ + IPA_UT_REGISTER_SUITE(mhi), + IPA_UT_REGISTER_SUITE(dma), + IPA_UT_REGISTER_SUITE(pm), + IPA_UT_REGISTER_SUITE(example), + IPA_UT_REGISTER_SUITE(hw_stats), +} IPA_UT_DEFINE_ALL_SUITES_END; + +#endif /* _IPA_UT_SUITE_LIST_H_ */ diff --git a/include/linux/ecm_ipa.h b/include/linux/ecm_ipa.h new file mode 100644 index 000000000000..e50685e8c088 --- /dev/null +++ b/include/linux/ecm_ipa.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _ECM_IPA_H_ +#define _ECM_IPA_H_ + +#include + +/* + * @priv: private data given upon ipa_connect + * @evt: event enum, should be IPA_WRITE_DONE + * @data: for tx path the data field is the sent socket buffer. + */ +typedef void (*ecm_ipa_callback)(void *priv, + enum ipa_dp_evt_type evt, + unsigned long data); + +/* + * struct ecm_ipa_params - parameters for ecm_ipa initialization API + * + * @device_ready_notify: callback supplied by USB core driver. + * This callback shall be called by the Netdev once the device + * is ready to receive data from tethered PC. + * @ecm_ipa_rx_dp_notify: ecm_ipa will set this callback (out parameter). + * this callback shall be supplied for ipa_connect upon pipe + * connection (USB->IPA), once IPA driver receive data packets + * from USB pipe destined for Apps this callback will be called. + * @ecm_ipa_tx_dp_notify: ecm_ipa will set this callback (out parameter). + * this callback shall be supplied for ipa_connect upon pipe + * connection (IPA->USB), once IPA driver send packets destined + * for USB, IPA BAM will notify for Tx-complete. + * @priv: ecm_ipa will set this pointer (out parameter). + * This pointer will hold the network device for later interaction + * with ecm_ipa APIs + * @host_ethaddr: host Ethernet address in network order + * @device_ethaddr: device Ethernet address in network order + * @skip_ep_cfg: boolean field that determines if Apps-processor + * should or should not configure this end-point. + */ +struct ecm_ipa_params { + void (*device_ready_notify)(void); + ecm_ipa_callback ecm_ipa_rx_dp_notify; + ecm_ipa_callback ecm_ipa_tx_dp_notify; + u8 host_ethaddr[ETH_ALEN]; + u8 device_ethaddr[ETH_ALEN]; + void *private; + bool skip_ep_cfg; +}; + + +#ifdef CONFIG_ECM_IPA + +int ecm_ipa_init(struct ecm_ipa_params *params); + +int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, + void *priv); + +int ecm_ipa_disconnect(void *priv); + +void ecm_ipa_cleanup(void *priv); + +#else /* CONFIG_ECM_IPA*/ + +static inline int ecm_ipa_init(struct ecm_ipa_params *params) +{ + return 0; +} + +static inline int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, + void *priv) +{ + return 0; +} + +static inline int ecm_ipa_disconnect(void *priv) +{ + return 0; +} + +static inline void ecm_ipa_cleanup(void *priv) +{ + +} +#endif /* CONFIG_ECM_IPA*/ + +#endif /* _ECM_IPA_H_ */ diff --git a/include/linux/ipa.h b/include/linux/ipa.h new file mode 100644 index 000000000000..cc6f5b1a5400 --- /dev/null +++ b/include/linux/ipa.h @@ -0,0 +1,2399 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_H_ +#define _IPA_H_ + +#include +#include +#include +#include +#include "linux/msm_gsi.h" + +#define IPA_APPS_MAX_BW_IN_MBPS 700 +/** + * enum ipa_transport_type + * transport type: either GSI or SPS + */ +enum ipa_transport_type { + IPA_TRANSPORT_TYPE_SPS, + IPA_TRANSPORT_TYPE_GSI +}; + +/** + * enum ipa_nat_en_type - NAT setting type in IPA end-point + */ +enum ipa_nat_en_type { + IPA_BYPASS_NAT, + IPA_SRC_NAT, + IPA_DST_NAT, +}; + +/** + * enum ipa_ipv6ct_en_type - IPv6CT setting type in IPA end-point + */ +enum ipa_ipv6ct_en_type { + IPA_BYPASS_IPV6CT, + IPA_ENABLE_IPV6CT, +}; + +/** + * enum ipa_mode_type - mode setting type in IPA end-point + * @BASIC: basic mode + * @ENABLE_FRAMING_HDLC: not currently supported + * @ENABLE_DEFRAMING_HDLC: not currently supported + * @DMA: all data arriving IPA will not go through IPA logic blocks, this + * allows IPA to work as DMA for specific pipes. + */ +enum ipa_mode_type { + IPA_BASIC, + IPA_ENABLE_FRAMING_HDLC, + IPA_ENABLE_DEFRAMING_HDLC, + IPA_DMA, +}; + +/** + * enum ipa_aggr_en_type - aggregation setting type in IPA + * end-point + */ +enum ipa_aggr_en_type { + IPA_BYPASS_AGGR, + IPA_ENABLE_AGGR, + IPA_ENABLE_DEAGGR, +}; + +/** + * enum ipa_aggr_type - type of aggregation in IPA end-point + */ +enum ipa_aggr_type { + IPA_MBIM_16 = 0, + IPA_HDLC = 1, + IPA_TLP = 2, + IPA_RNDIS = 3, + IPA_GENERIC = 4, + IPA_QCMAP = 6, +}; + +/** + * enum ipa_aggr_mode - global aggregation mode + */ +enum ipa_aggr_mode { + IPA_MBIM_AGGR, + IPA_QCNCM_AGGR, +}; + +/** + * enum ipa_dp_evt_type - type of event client callback is + * invoked for on data path + * @IPA_RECEIVE: data is struct sk_buff + * @IPA_WRITE_DONE: data is struct sk_buff + */ +enum ipa_dp_evt_type { + IPA_RECEIVE, + IPA_WRITE_DONE, +}; + +/** + * enum hdr_total_len_or_pad_type - type of value held by TOTAL_LEN_OR_PAD + * field in header configuration register. + * @IPA_HDR_PAD: field is used as padding length + * @IPA_HDR_TOTAL_LEN: field is used as total length + */ +enum hdr_total_len_or_pad_type { + IPA_HDR_PAD = 0, + IPA_HDR_TOTAL_LEN = 1, +}; + +/** + * struct ipa_ep_cfg_nat - NAT configuration in IPA end-point + * @nat_en: This defines the default NAT mode for the pipe: in case of + * filter miss - the default NAT mode defines the NATing operation + * on the packet. Valid for Input Pipes only (IPA consumer) + */ +struct ipa_ep_cfg_nat { + enum ipa_nat_en_type nat_en; +}; + +/** + * struct ipa_ep_cfg_conn_track - IPv6 Connection tracking configuration in + * IPA end-point + * @conn_track_en: Defines speculative conn_track action, means if specific + * pipe needs to have UL/DL IPv6 Connection Tracking or Bypass + * IPv6 Connection Tracking. 0: Bypass IPv6 Connection Tracking + * 1: IPv6 UL/DL Connection Tracking. + * Valid for Input Pipes only (IPA consumer) + */ +struct ipa_ep_cfg_conn_track { + enum ipa_ipv6ct_en_type conn_track_en; +}; + +/** + * struct ipa_ep_cfg_hdr - header configuration in IPA end-point + * + * @hdr_len:Header length in bytes to be added/removed. Assuming + * header len is constant per endpoint. Valid for + * both Input and Output Pipes + * @hdr_ofst_metadata_valid: 0: Metadata_Ofst value is invalid, i.e., no + * metadata within header. + * 1: Metadata_Ofst value is valid, i.e., metadata + * within header is in offset Metadata_Ofst Valid + * for Input Pipes only (IPA Consumer) (for output + * pipes, metadata already set within the header) + * @hdr_ofst_metadata: Offset within header in which metadata resides + * Size of metadata - 4bytes + * Example - Stream ID/SSID/mux ID. + * Valid for Input Pipes only (IPA Consumer) (for output + * pipes, metadata already set within the header) + * @hdr_additional_const_len: Defines the constant length that should be added + * to the payload length in order for IPA to update + * correctly the length field within the header + * (valid only in case Hdr_Ofst_Pkt_Size_Valid=1) + * Valid for Output Pipes (IPA Producer) + * Starting IPA4.5, this field in H/W requires more bits + * to support larger range, but no spare bits to use. + * So the MSB part is done thourgh the EXT register. + * When accessing this register, need to access the EXT + * register as well. + * @hdr_ofst_pkt_size_valid: 0: Hdr_Ofst_Pkt_Size value is invalid, i.e., no + * length field within the inserted header + * 1: Hdr_Ofst_Pkt_Size value is valid, i.e., a + * packet length field resides within the header + * Valid for Output Pipes (IPA Producer) + * @hdr_ofst_pkt_size: Offset within header in which packet size reside. Upon + * Header Insertion, IPA will update this field within the + * header with the packet length . Assumption is that + * header length field size is constant and is 2Bytes + * Valid for Output Pipes (IPA Producer) + * Starting IPA4.5, this field in H/W requires more bits + * to support larger range, but no spare bits to use. + * So the MSB part is done thourgh the EXT register. + * When accessing this register, need to access the EXT + * register as well. + * @hdr_a5_mux: Determines whether A5 Mux header should be added to the packet. + * This bit is valid only when Hdr_En=01(Header Insertion) + * SW should set this bit for IPA-to-A5 pipes. + * 0: Do not insert A5 Mux Header + * 1: Insert A5 Mux Header + * Valid for Output Pipes (IPA Producer) + * @hdr_remove_additional: bool switch, remove more of the header + * based on the aggregation configuration (register + * HDR_LEN_INC_DEAGG_HDR) + * @hdr_metadata_reg_valid: bool switch, metadata from + * register INIT_HDR_METADATA_n is valid. + * (relevant only for IPA Consumer pipes) + * Starting IPA4.5, this parameter is irrelevant and H/W + * assumes it is always valid. + */ +struct ipa_ep_cfg_hdr { + u32 hdr_len; + u32 hdr_ofst_metadata_valid; + u32 hdr_ofst_metadata; + u32 hdr_additional_const_len; + u32 hdr_ofst_pkt_size_valid; + u32 hdr_ofst_pkt_size; + u32 hdr_a5_mux; + u32 hdr_remove_additional; + u32 hdr_metadata_reg_valid; +}; + +/** + * struct ipa_ep_cfg_hdr_ext - extended header configuration in IPA end-point + * @hdr_pad_to_alignment: Pad packet to specified alignment + * (2^pad to alignment value), i.e. value of 3 means pad to 2^3 = 8 bytes + * alignment. Alignment is to 0,2 up to 32 bytes (IPAv2 does not support 64 + * byte alignment). Valid for Output Pipes only (IPA Producer). + * @hdr_total_len_or_pad_offset: Offset to length field containing either + * total length or pad length, per hdr_total_len_or_pad config + * @hdr_payload_len_inc_padding: 0-IPA_ENDP_INIT_HDR_n's + * HDR_OFST_PKT_SIZE does + * not includes padding bytes size, payload_len = packet length, + * 1-IPA_ENDP_INIT_HDR_n's HDR_OFST_PKT_SIZE includes + * padding bytes size, payload_len = packet length + padding + * @hdr_total_len_or_pad: field is used as PAD length ot as Total length + * (header + packet + padding) + * @hdr_total_len_or_pad_valid: 0-Ignore TOTAL_LEN_OR_PAD field, 1-Process + * TOTAL_LEN_OR_PAD field + * @hdr_little_endian: 0-Big Endian, 1-Little Endian + * @hdr: The header structure. Used starting IPA4.5 where part of the info + * at the header structure is implemented via the EXT register at the H/W + */ +struct ipa_ep_cfg_hdr_ext { + u32 hdr_pad_to_alignment; + u32 hdr_total_len_or_pad_offset; + bool hdr_payload_len_inc_padding; + enum hdr_total_len_or_pad_type hdr_total_len_or_pad; + bool hdr_total_len_or_pad_valid; + bool hdr_little_endian; + struct ipa_ep_cfg_hdr *hdr; +}; + +/** + * struct ipa_ep_cfg_mode - mode configuration in IPA end-point + * @mode: Valid for Input Pipes only (IPA Consumer) + * @dst: This parameter specifies the output pipe to which the packets + * will be routed to. + * This parameter is valid for Mode=DMA and not valid for + * Mode=Basic + * Valid for Input Pipes only (IPA Consumer) + */ +struct ipa_ep_cfg_mode { + enum ipa_mode_type mode; + enum ipa_client_type dst; +}; + +/** + * struct ipa_ep_cfg_aggr - aggregation configuration in IPA end-point + * + * @aggr_en: Valid for both Input and Output Pipes + * @aggr: aggregation type (Valid for both Input and Output Pipes) + * @aggr_byte_limit: Limit of aggregated packet size in KB (<=32KB) When set + * to 0, there is no size limitation on the aggregation. + * When both, Aggr_Byte_Limit and Aggr_Time_Limit are set + * to 0, there is no aggregation, every packet is sent + * independently according to the aggregation structure + * Valid for Output Pipes only (IPA Producer ) + * @aggr_time_limit: Timer to close aggregated packet When set to 0, + * there is no time limitation on the aggregation. When + * both, Aggr_Byte_Limit and Aggr_Time_Limit are set to 0, + * there is no aggregation, every packet is sent + * independently according to the aggregation structure + * Valid for Output Pipes only (IPA Producer). + * Time unit is -->> usec <<-- + * @aggr_pkt_limit: Defines if EOF close aggregation or not. if set to false + * HW closes aggregation (sends EOT) only based on its + * aggregation config (byte/time limit, etc). if set to + * true EOF closes aggregation in addition to HW based + * aggregation closure. Valid for Output Pipes only (IPA + * Producer). EOF affects only Pipes configured for + * generic aggregation. + * @aggr_hard_byte_limit_en: If set to 1, byte-limit aggregation for this + * pipe will apply a hard-limit behavior which will not + * allow frames to be closed with more than byte-limit + * bytes. If set to 0, previous byte-limit behavior + * will apply - frames close once a packet causes the + * accumulated byte-count to cross the byte-limit + * threshold (closed frame will contain that packet). + * @aggr_sw_eof_active: 0: EOF does not close aggregation. HW closes aggregation + * (sends EOT) only based on its aggregation config + * (byte/time limit, etc). + * 1: EOF closes aggregation in addition to HW based + * aggregation closure. Valid for Output Pipes only (IPA + * Producer). EOF affects only Pipes configured for generic + * aggregation. + * @pulse_generator: Pulse generator number to be used. + * For internal use. + * Supported starting IPA4.5. + * @scaled_time: Time limit in accordance to the pulse generator + * granularity. + * For internal use + * Supported starting IPA4.5 + */ +struct ipa_ep_cfg_aggr { + enum ipa_aggr_en_type aggr_en; + enum ipa_aggr_type aggr; + u32 aggr_byte_limit; + u32 aggr_time_limit; + u32 aggr_pkt_limit; + u32 aggr_hard_byte_limit_en; + bool aggr_sw_eof_active; + u8 pulse_generator; + u8 scaled_time; +}; + +/** + * struct ipa_ep_cfg_route - route configuration in IPA end-point + * @rt_tbl_hdl: Defines the default routing table index to be used in case there + * is no filter rule matching, valid for Input Pipes only (IPA + * Consumer). Clients should set this to 0 which will cause default + * v4 and v6 routes setup internally by IPA driver to be used for + * this end-point + */ +struct ipa_ep_cfg_route { + u32 rt_tbl_hdl; +}; + +/** + * struct ipa_ep_cfg_holb - head of line blocking configuration in IPA end-point + * @en: enable(1 => ok to drop pkt)/disable(0 => never drop pkt) + * @tmr_val: duration in units of 128 IPA clk clock cyles [0,511], 1 clk=1.28us + * IPAv2.5 support 32 bit HOLB timeout value, previous versions + * supports 16 bit + * IPAv4.2: splitting timer value into 2 fields. Timer value is: + * BASE_VALUE * (2^SCALE) + * IPA4.5: tmr_val is in -->>msec<<--. Range is dynamic based + * on H/W configuration. (IPA4.5 absolute maximum is 0.65535*31 -> ~20sec). + * @base_val : IPA4.2 only field. base value of the timer. + * @scale : IPA4.2 only field. scale value for timer. + * @pulse_generator: Pulse generator number to be used. + * For internal use. + * Supported starting IPA4.5. + * @scaled_time: Time limit in accordance to the pulse generator granularity + * For internal use + * Supported starting IPA4.5 + */ +struct ipa_ep_cfg_holb { + u32 tmr_val; + u32 base_val; + u32 scale; + u16 en; + u8 pulse_generator; + u8 scaled_time; +}; + +/** + * struct ipa_ep_cfg_deaggr - deaggregation configuration in IPA end-point + * @deaggr_hdr_len: Deaggregation Header length in bytes. Valid only for Input + * Pipes, which are configured for 'Generic' deaggregation. + * @packet_offset_valid: - 0: PACKET_OFFSET is not used, 1: PACKET_OFFSET is + * used. + * @packet_offset_location: Location of packet offset field, which specifies + * the offset to the packet from the start of the packet offset field. + * @max_packet_len: DEAGGR Max Packet Length in Bytes. A Packet with higher + * size wil be treated as an error. 0 - Packet Length is not Bound, + * IPA should not check for a Max Packet Length. + */ +struct ipa_ep_cfg_deaggr { + u32 deaggr_hdr_len; + bool packet_offset_valid; + u32 packet_offset_location; + u32 max_packet_len; +}; + +/** + * enum ipa_cs_offload - checksum offload setting + */ +enum ipa_cs_offload { + IPA_DISABLE_CS_OFFLOAD, + IPA_ENABLE_CS_OFFLOAD_UL, + IPA_ENABLE_CS_OFFLOAD_DL, + IPA_CS_RSVD +}; + +/** + * struct ipa_ep_cfg_cfg - IPA ENDP_INIT Configuration register + * @frag_offload_en: - 0 - IP packet fragment handling is disabled. IP packet + * fragments should be sent to SW. SW is responsible for + * configuring filter rules, and IP packet filter exception should be + * used to send all fragments to SW. 1 - IP packet fragment + * handling is enabled. IPA checks for fragments and uses frag + * rules table for processing fragments. Valid only for Input Pipes + * (IPA Consumer) + * @cs_offload_en: Checksum offload enable: 00: Disable checksum offload, 01: + * Enable checksum calculation offload (UL) - For output pipe + * (IPA producer) specifies that checksum trailer is to be added. + * For input pipe (IPA consumer) specifies presence of checksum + * header and IPA checksum calculation accordingly. 10: Enable + * checksum calculation offload (DL) - For output pipe (IPA + * producer) specifies that checksum trailer is to be added. For + * input pipe (IPA consumer) specifies IPA checksum calculation. + * 11: Reserved + * @cs_metadata_hdr_offset: Offset in Words (4 bytes) within header in which + * checksum meta info header (4 bytes) starts (UL). Values are 0-15, which + * mean 0 - 60 byte checksum header offset. Valid for input + * pipes only (IPA consumer) + * @gen_qmb_master_sel: Select bit for ENDP GEN-QMB master. This is used to + * separate DDR & PCIe transactions in-order to limit them as + * a group (using MAX_WRITES/READS limiation). Valid for input and + * output pipes (IPA consumer+producer) + */ +struct ipa_ep_cfg_cfg { + bool frag_offload_en; + enum ipa_cs_offload cs_offload_en; + u8 cs_metadata_hdr_offset; + u8 gen_qmb_master_sel; +}; + +/** + * struct ipa_ep_cfg_metadata_mask - Endpoint initialization hdr metadata mask + * @metadata_mask: Mask specifying which metadata bits to write to + * IPA_ENDP_INIT_HDR_n.s HDR_OFST_METADATA. Only + * masked metadata bits (set to 1) will be written. Valid for Output + * Pipes only (IPA Producer) + */ +struct ipa_ep_cfg_metadata_mask { + u32 metadata_mask; +}; + +/** + * struct ipa_ep_cfg_metadata - Meta Data configuration in IPA end-point + * @md: This defines the meta data from tx data descriptor + * @qmap_id: qmap id + */ +struct ipa_ep_cfg_metadata { + u32 qmap_id; +}; + +/** + * struct ipa_ep_cfg_seq - HPS/DPS sequencer type configuration in IPA end-point + * @set_dynamic: 0 - HPS/DPS seq type is configured statically, + * 1 - HPS/DPS seq type is set to seq_type + * @seq_type: HPS/DPS sequencer type configuration + */ +struct ipa_ep_cfg_seq { + bool set_dynamic; + int seq_type; +}; + +/** + * struct ipa_ep_cfg - configuration of IPA end-point + * @nat: NAT parameters + * @conn_track: IPv6CT parameters + * @hdr: Header parameters + * @hdr_ext: Extended header parameters + * @mode: Mode parameters + * @aggr: Aggregation parameters + * @deaggr: Deaggregation params + * @route: Routing parameters + * @cfg: Configuration register data + * @metadata_mask: Hdr metadata mask + * @meta: Meta Data + * @seq: HPS/DPS sequencers configuration + */ +struct ipa_ep_cfg { + struct ipa_ep_cfg_nat nat; + struct ipa_ep_cfg_conn_track conn_track; + struct ipa_ep_cfg_hdr hdr; + struct ipa_ep_cfg_hdr_ext hdr_ext; + struct ipa_ep_cfg_mode mode; + struct ipa_ep_cfg_aggr aggr; + struct ipa_ep_cfg_deaggr deaggr; + struct ipa_ep_cfg_route route; + struct ipa_ep_cfg_cfg cfg; + struct ipa_ep_cfg_metadata_mask metadata_mask; + struct ipa_ep_cfg_metadata meta; + struct ipa_ep_cfg_seq seq; +}; + +/** + * struct ipa_ep_cfg_ctrl - Control configuration in IPA end-point + * @ipa_ep_suspend: 0 - ENDP is enabled, 1 - ENDP is suspended (disabled). + * Valid for PROD Endpoints + * @ipa_ep_delay: 0 - ENDP is free-running, 1 - ENDP is delayed. + * SW controls the data flow of an endpoint usind this bit. + * Valid for CONS Endpoints + */ +struct ipa_ep_cfg_ctrl { + bool ipa_ep_suspend; + bool ipa_ep_delay; +}; + +/** + * x should be in bytes + */ +#define IPA_NUM_OF_FIFO_DESC(x) (x/sizeof(struct sps_iovec)) +typedef void (*ipa_notify_cb)(void *priv, enum ipa_dp_evt_type evt, + unsigned long data); + +/** + * enum ipa_wdi_meter_evt_type - type of event client callback is + * for AP+STA mode metering + * @IPA_GET_WDI_SAP_STATS: get IPA_stats betwen SAP and STA - + * use ipa_get_wdi_sap_stats structure + * @IPA_SET_WIFI_QUOTA: set quota limit on STA - + * use ipa_set_wifi_quota structure + */ +enum ipa_wdi_meter_evt_type { + IPA_GET_WDI_SAP_STATS, + IPA_SET_WIFI_QUOTA, +}; + +struct ipa_get_wdi_sap_stats { + /* indicate to reset stats after query */ + uint8_t reset_stats; + /* indicate valid stats from wlan-fw */ + uint8_t stats_valid; + /* Tx: SAP->STA */ + uint64_t ipv4_tx_packets; + uint64_t ipv4_tx_bytes; + /* Rx: STA->SAP */ + uint64_t ipv4_rx_packets; + uint64_t ipv4_rx_bytes; + uint64_t ipv6_tx_packets; + uint64_t ipv6_tx_bytes; + uint64_t ipv6_rx_packets; + uint64_t ipv6_rx_bytes; +}; + +/** + * struct ipa_set_wifi_quota - structure used for + * IPA_SET_WIFI_QUOTA. + * + * @quota_bytes: Quota (in bytes) for the STA interface. + * @set_quota: Indicate whether to set the quota (use 1) or + * unset the quota. + * + */ +struct ipa_set_wifi_quota { + uint64_t quota_bytes; + uint8_t set_quota; + /* indicate valid quota set from wlan-fw */ + uint8_t set_valid; +}; + +typedef void (*ipa_wdi_meter_notifier_cb)(enum ipa_wdi_meter_evt_type evt, + void *data); + + +/** + * struct ipa_tx_intf - interface tx properties + * @num_props: number of tx properties + * @prop: the tx properties array + */ +struct ipa_tx_intf { + u32 num_props; + struct ipa_ioc_tx_intf_prop *prop; +}; + +/** + * struct ipa_rx_intf - interface rx properties + * @num_props: number of rx properties + * @prop: the rx properties array + */ +struct ipa_rx_intf { + u32 num_props; + struct ipa_ioc_rx_intf_prop *prop; +}; + +/** + * struct ipa_ext_intf - interface ext properties + * @excp_pipe_valid: is next field valid? + * @excp_pipe: exception packets should be routed to this pipe + * @num_props: number of ext properties + * @prop: the ext properties array + */ +struct ipa_ext_intf { + bool excp_pipe_valid; + enum ipa_client_type excp_pipe; + u32 num_props; + struct ipa_ioc_ext_intf_prop *prop; +}; + +/** + * struct ipa_sys_connect_params - information needed to setup an IPA end-point + * in system-BAM mode + * @ipa_ep_cfg: IPA EP configuration + * @client: the type of client who "owns" the EP + * @desc_fifo_sz: size of desc FIFO. This number is used to allocate the desc + * fifo for BAM. For GSI, this size is used by IPA driver as a + * baseline to calculate the GSI ring size in the following way: + * For PROD pipes, GSI ring is 4 * desc_fifo_sz. + For PROD pipes, GSI ring is 2 * desc_fifo_sz. + * @priv: callback cookie + * @notify: callback + * priv - callback cookie + * evt - type of event + * data - data relevant to event. May not be valid. See event_type + * enum for valid cases. + * @skip_ep_cfg: boolean field that determines if EP should be configured + * by IPA driver + * @keep_ipa_awake: when true, IPA will not be clock gated + * @napi_enabled: when true, IPA call client callback to start polling + */ +struct ipa_sys_connect_params { + struct ipa_ep_cfg ipa_ep_cfg; + enum ipa_client_type client; + u32 desc_fifo_sz; + void *priv; + ipa_notify_cb notify; + bool skip_ep_cfg; + bool keep_ipa_awake; + struct napi_struct *napi_obj; + bool recycle_enabled; +}; + +/** + * struct ipa_tx_meta - meta-data for the TX packet + * @dma_address: dma mapped address of TX packet + * @dma_address_valid: is above field valid? + */ +struct ipa_tx_meta { + u8 pkt_init_dst_ep; + bool pkt_init_dst_ep_valid; + bool pkt_init_dst_ep_remote; + dma_addr_t dma_address; + bool dma_address_valid; +}; + +/** + * typedef ipa_msg_free_fn - callback function + * @param buff - [in] the message payload to free + * @param len - [in] size of message payload + * @param type - [in] the message type + * + * Message callback registered by kernel client with IPA driver to + * free message payload after IPA driver processing is complete + * + * No return value + */ +typedef void (*ipa_msg_free_fn)(void *buff, u32 len, u32 type); + +/** + * typedef ipa_msg_pull_fn - callback function + * @param buff - [in] where to copy message payload + * @param len - [in] size of buffer to copy payload into + * @param type - [in] the message type + * + * Message callback registered by kernel client with IPA driver for + * IPA driver to pull messages from the kernel client upon demand from + * user-space + * + * Returns how many bytes were copied into the buffer. + */ +typedef int (*ipa_msg_pull_fn)(void *buff, u32 len, u32 type); + +/** + * enum ipa_voltage_level - IPA Voltage levels + */ +enum ipa_voltage_level { + IPA_VOLTAGE_UNSPECIFIED, + IPA_VOLTAGE_SVS2 = IPA_VOLTAGE_UNSPECIFIED, + IPA_VOLTAGE_SVS, + IPA_VOLTAGE_NOMINAL, + IPA_VOLTAGE_TURBO, + IPA_VOLTAGE_MAX, +}; + +/** + * enum ipa_rm_event - IPA RM events + * + * Indicate the resource state change + */ +enum ipa_rm_event { + IPA_RM_RESOURCE_GRANTED, + IPA_RM_RESOURCE_RELEASED +}; + +typedef void (*ipa_rm_notify_cb)(void *user_data, + enum ipa_rm_event event, + unsigned long data); +/** + * struct ipa_rm_register_params - information needed to + * register IPA RM client with IPA RM + * + * @user_data: IPA RM client provided information + * to be passed to notify_cb callback below + * @notify_cb: callback which is called by resource + * to notify the IPA RM client about its state + * change IPA RM client is expected to perform non + * blocking operations only in notify_cb and + * release notification context as soon as + * possible. + */ +struct ipa_rm_register_params { + void *user_data; + ipa_rm_notify_cb notify_cb; +}; + +/** + * struct ipa_rm_create_params - information needed to initialize + * the resource + * @name: resource name + * @floor_voltage: floor voltage needed for client to operate in maximum + * bandwidth. + * @reg_params: register parameters, contains are ignored + * for consumer resource NULL should be provided + * for consumer resource + * @request_resource: function which should be called to request resource, + * NULL should be provided for producer resource + * @release_resource: function which should be called to release resource, + * NULL should be provided for producer resource + * + * IPA RM client is expected to perform non blocking operations only + * in request_resource and release_resource functions and + * release notification context as soon as possible. + */ +struct ipa_rm_create_params { + enum ipa_rm_resource_name name; + enum ipa_voltage_level floor_voltage; + struct ipa_rm_register_params reg_params; + int (*request_resource)(void); + int (*release_resource)(void); +}; + +/** + * struct ipa_rm_perf_profile - information regarding IPA RM client performance + * profile + * + * @max_bandwidth_mbps: maximum bandwidth need of the client in Mbps + */ +struct ipa_rm_perf_profile { + u32 max_supported_bandwidth_mbps; +}; + +#define A2_MUX_HDR_NAME_V4_PREF "dmux_hdr_v4_" +#define A2_MUX_HDR_NAME_V6_PREF "dmux_hdr_v6_" + +/** + * enum teth_tethering_mode - Tethering mode (Rmnet / MBIM) + */ +enum teth_tethering_mode { + TETH_TETHERING_MODE_RMNET, + TETH_TETHERING_MODE_MBIM, + TETH_TETHERING_MODE_MAX, +}; + +/** + * teth_bridge_init_params - Parameters used for in/out USB API + * @usb_notify_cb: Callback function which should be used by the caller. + * Output parameter. + * @private_data: Data for the callback function. Should be used by the + * caller. Output parameter. + * @skip_ep_cfg: boolean field that determines if Apps-processor + * should or should not confiugre this end-point. + */ +struct teth_bridge_init_params { + ipa_notify_cb usb_notify_cb; + void *private_data; + enum ipa_client_type client; + bool skip_ep_cfg; +}; + +/** + * struct teth_bridge_connect_params - Parameters used in teth_bridge_connect() + * @ipa_usb_pipe_hdl: IPA to USB pipe handle, returned from ipa_connect() + * @usb_ipa_pipe_hdl: USB to IPA pipe handle, returned from ipa_connect() + * @tethering_mode: Rmnet or MBIM + * @ipa_client_type: IPA "client" name (IPA_CLIENT_USB#_PROD) + */ +struct teth_bridge_connect_params { + u32 ipa_usb_pipe_hdl; + u32 usb_ipa_pipe_hdl; + enum teth_tethering_mode tethering_mode; + enum ipa_client_type client_type; +}; + +/** + * struct ipa_tx_data_desc - information needed + * to send data packet to HW link: link to data descriptors + * priv: client specific private data + * @pyld_buffer: pointer to the data buffer that holds frame + * @pyld_len: length of the data packet + */ +struct ipa_tx_data_desc { + struct list_head link; + void *priv; + void *pyld_buffer; + u16 pyld_len; +}; + +/** + * struct ipa_rx_data - information needed + * to send to wlan driver on receiving data from ipa hw + * @skb: skb + * @dma_addr: DMA address of this Rx packet + */ +struct ipa_rx_data { + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +/** + * enum ipa_irq_type - IPA Interrupt Type + * Used to register handlers for IPA interrupts + * + * Below enum is a logical mapping and not the actual interrupt bit in HW + */ +enum ipa_irq_type { + IPA_BAD_SNOC_ACCESS_IRQ, + IPA_UC_IRQ_0, + IPA_UC_IRQ_1, + IPA_UC_IRQ_2, + IPA_UC_IRQ_3, + IPA_UC_IN_Q_NOT_EMPTY_IRQ, + IPA_UC_RX_CMD_Q_NOT_FULL_IRQ, + IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ, + IPA_RX_ERR_IRQ, + IPA_DEAGGR_ERR_IRQ, + IPA_TX_ERR_IRQ, + IPA_STEP_MODE_IRQ, + IPA_PROC_ERR_IRQ, + IPA_TX_SUSPEND_IRQ, + IPA_TX_HOLB_DROP_IRQ, + IPA_BAM_GSI_IDLE_IRQ, + IPA_PIPE_YELLOW_MARKER_BELOW_IRQ, + IPA_PIPE_RED_MARKER_BELOW_IRQ, + IPA_PIPE_YELLOW_MARKER_ABOVE_IRQ, + IPA_PIPE_RED_MARKER_ABOVE_IRQ, + IPA_UCP_IRQ, + IPA_DCMP_IRQ, + IPA_GSI_EE_IRQ, + IPA_GSI_IPA_IF_TLV_RCVD_IRQ, + IPA_GSI_UC_IRQ, + IPA_TLV_LEN_MIN_DSM_IRQ, + IPA_IRQ_MAX +}; + +/** + * struct ipa_tx_suspend_irq_data - interrupt data for IPA_TX_SUSPEND_IRQ + * @endpoints: bitmask of endpoints which case IPA_TX_SUSPEND_IRQ interrupt + * @dma_addr: DMA address of this Rx packet + */ +struct ipa_tx_suspend_irq_data { + u32 endpoints; +}; + + +/** + * typedef ipa_irq_handler_t - irq handler/callback type + * @param ipa_irq_type - [in] interrupt type + * @param private_data - [in, out] the client private data + * @param interrupt_data - [out] interrupt information data + * + * callback registered by ipa_add_interrupt_handler function to + * handle a specific interrupt type + * + * No return value + */ +typedef void (*ipa_irq_handler_t)(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data); + +/** + * struct IpaHwBamStats_t - Structure holding the BAM statistics + * + * @bamFifoFull : Number of times Bam Fifo got full - For In Ch: Good, + * For Out Ch: Bad + * @bamFifoEmpty : Number of times Bam Fifo got empty - For In Ch: Bad, + * For Out Ch: Good + * @bamFifoUsageHigh : Number of times Bam fifo usage went above 75% - + * For In Ch: Good, For Out Ch: Bad + * @bamFifoUsageLow : Number of times Bam fifo usage went below 25% - + * For In Ch: Bad, For Out Ch: Good + */ +struct IpaHwBamStats_t { + u32 bamFifoFull; + u32 bamFifoEmpty; + u32 bamFifoUsageHigh; + u32 bamFifoUsageLow; + u32 bamUtilCount; +} __packed; + +/** + * struct IpaHwRingStats_t - Structure holding the Ring statistics + * + * @ringFull : Number of times Transfer Ring got full - For In Ch: Good, + * For Out Ch: Bad + * @ringEmpty : Number of times Transfer Ring got empty - For In Ch: Bad, + * For Out Ch: Good + * @ringUsageHigh : Number of times Transfer Ring usage went above 75% - + * For In Ch: Good, For Out Ch: Bad + * @ringUsageLow : Number of times Transfer Ring usage went below 25% - + * For In Ch: Bad, For Out Ch: Good + */ +struct IpaHwRingStats_t { + u32 ringFull; + u32 ringEmpty; + u32 ringUsageHigh; + u32 ringUsageLow; + u32 RingUtilCount; +} __packed; + +/** + * struct IpaHwStatsWDIRxInfoData_t - Structure holding the WDI Rx channel + * structures + * + * @max_outstanding_pkts : Number of outstanding packets in Rx Ring + * @num_pkts_processed : Number of packets processed - cumulative + * @rx_ring_rp_value : Read pointer last advertized to the WLAN FW + * @rx_ind_ring_stats : Ring info + * @bam_stats : BAM info + * @num_bam_int_handled : Number of Bam Interrupts handled by FW + * @num_db : Number of times the doorbell was rung + * @num_unexpected_db : Number of unexpected doorbells + * @num_pkts_in_dis_uninit_state : number of completions we + * received in disabled or uninitialized state + * @num_ic_inj_vdev_change : Number of times the Imm Cmd is + * injected due to vdev_id change + * @num_ic_inj_fw_desc_change : Number of times the Imm Cmd is + * injected due to fw_desc change + * @num_qmb_int_handled : Number of QMB interrupts handled + */ +struct IpaHwStatsWDIRxInfoData_t { + u32 max_outstanding_pkts; + u32 num_pkts_processed; + u32 rx_ring_rp_value; + struct IpaHwRingStats_t rx_ind_ring_stats; + struct IpaHwBamStats_t bam_stats; + u32 num_bam_int_handled; + u32 num_db; + u32 num_unexpected_db; + u32 num_pkts_in_dis_uninit_state; + u32 num_ic_inj_vdev_change; + u32 num_ic_inj_fw_desc_change; + u32 num_qmb_int_handled; + u32 reserved1; + u32 reserved2; +} __packed; + +/** + * struct IpaHwStatsWDITxInfoData_t - Structure holding the WDI Tx channel + * structures + * + * @num_pkts_processed : Number of packets processed - cumulative + * @copy_engine_doorbell_value : latest value of doorbell written to copy engine + * @num_db_fired : Number of DB from uC FW to Copy engine + * @tx_comp_ring_stats : ring info + * @bam_stats : BAM info + * @num_db : Number of times the doorbell was rung + * @num_unexpected_db : Number of unexpected doorbells + * @num_bam_int_handled : Number of Bam Interrupts handled by FW + * @num_bam_int_in_non_running_state : Number of Bam interrupts while not in + * Running state + * @num_qmb_int_handled : Number of QMB interrupts handled + */ +struct IpaHwStatsWDITxInfoData_t { + u32 num_pkts_processed; + u32 copy_engine_doorbell_value; + u32 num_db_fired; + struct IpaHwRingStats_t tx_comp_ring_stats; + struct IpaHwBamStats_t bam_stats; + u32 num_db; + u32 num_unexpected_db; + u32 num_bam_int_handled; + u32 num_bam_int_in_non_running_state; + u32 num_qmb_int_handled; + u32 num_bam_int_handled_while_wait_for_bam; +} __packed; + +/** + * struct IpaHwStatsWDIInfoData_t - Structure holding the WDI channel structures + * + * @rx_ch_stats : RX stats + * @tx_ch_stats : TX stats + */ +struct IpaHwStatsWDIInfoData_t { + struct IpaHwStatsWDIRxInfoData_t rx_ch_stats; + struct IpaHwStatsWDITxInfoData_t tx_ch_stats; +} __packed; + + +/** + * struct ipa_wdi_ul_params - WDI_RX configuration + * @rdy_ring_base_pa: physical address of the base of the Rx ring (containing + * Rx buffers) + * @rdy_ring_size: size of the Rx ring in bytes + * @rdy_ring_rp_pa: physical address of the location through which IPA uc is + * reading (WDI-1.0) + * @rdy_comp_ring_base_pa: physical address of the base of the Rx completion + * ring (WDI-2.0) + * @rdy_comp_ring_wp_pa: physical address of the location through which IPA + * uc is writing (WDI-2.0) + * @rdy_comp_ring_size: size of the Rx_completion ring in bytes + * expected to communicate about the Read pointer into the Rx Ring + */ +struct ipa_wdi_ul_params { + phys_addr_t rdy_ring_base_pa; + u32 rdy_ring_size; + phys_addr_t rdy_ring_rp_pa; + phys_addr_t rdy_comp_ring_base_pa; + phys_addr_t rdy_comp_ring_wp_pa; + u32 rdy_comp_ring_size; + u32 *rdy_ring_rp_va; + u32 *rdy_comp_ring_wp_va; +}; + +/** + * struct ipa_wdi_ul_params_smmu - WDI_RX configuration (with WLAN SMMU) + * @rdy_ring: SG table describing the Rx ring (containing Rx buffers) + * @rdy_ring_size: size of the Rx ring in bytes + * @rdy_ring_rp_pa: physical address of the location through which IPA uc is + * expected to communicate about the Read pointer into the Rx Ring + */ +struct ipa_wdi_ul_params_smmu { + struct sg_table rdy_ring; + u32 rdy_ring_size; + phys_addr_t rdy_ring_rp_pa; + struct sg_table rdy_comp_ring; + phys_addr_t rdy_comp_ring_wp_pa; + u32 rdy_comp_ring_size; + u32 *rdy_ring_rp_va; + u32 *rdy_comp_ring_wp_va; +}; + +/** + * struct ipa_wdi_dl_params - WDI_TX configuration + * @comp_ring_base_pa: physical address of the base of the Tx completion ring + * @comp_ring_size: size of the Tx completion ring in bytes + * @ce_ring_base_pa: physical address of the base of the Copy Engine Source + * Ring + * @ce_door_bell_pa: physical address of the doorbell that the IPA uC has to + * write into to trigger the copy engine + * @ce_ring_size: Copy Engine Ring size in bytes + * @num_tx_buffers: Number of pkt buffers allocated + */ +struct ipa_wdi_dl_params { + phys_addr_t comp_ring_base_pa; + u32 comp_ring_size; + phys_addr_t ce_ring_base_pa; + phys_addr_t ce_door_bell_pa; + u32 ce_ring_size; + u32 num_tx_buffers; +}; + +/** + * struct ipa_wdi_dl_params_smmu - WDI_TX configuration (with WLAN SMMU) + * @comp_ring: SG table describing the Tx completion ring + * @comp_ring_size: size of the Tx completion ring in bytes + * @ce_ring: SG table describing the Copy Engine Source Ring + * @ce_door_bell_pa: physical address of the doorbell that the IPA uC has to + * write into to trigger the copy engine + * @ce_ring_size: Copy Engine Ring size in bytes + * @num_tx_buffers: Number of pkt buffers allocated + */ +struct ipa_wdi_dl_params_smmu { + struct sg_table comp_ring; + u32 comp_ring_size; + struct sg_table ce_ring; + phys_addr_t ce_door_bell_pa; + u32 ce_ring_size; + u32 num_tx_buffers; +}; + +/** + * struct ipa_wdi_in_params - information provided by WDI client + * @sys: IPA EP configuration info + * @ul: WDI_RX configuration info + * @dl: WDI_TX configuration info + * @ul_smmu: WDI_RX configuration info when WLAN uses SMMU + * @dl_smmu: WDI_TX configuration info when WLAN uses SMMU + * @smmu_enabled: true if WLAN uses SMMU + * @ipa_wdi_meter_notifier_cb: Get WDI stats and quato info + */ +struct ipa_wdi_in_params { + struct ipa_sys_connect_params sys; + union { + struct ipa_wdi_ul_params ul; + struct ipa_wdi_dl_params dl; + struct ipa_wdi_ul_params_smmu ul_smmu; + struct ipa_wdi_dl_params_smmu dl_smmu; + } u; + bool smmu_enabled; +#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN + ipa_wdi_meter_notifier_cb wdi_notify; +#endif +}; + +enum ipa_upstream_type { + IPA_UPSTEAM_MODEM = 1, + IPA_UPSTEAM_WLAN, + IPA_UPSTEAM_MAX +}; + +/** + * struct ipa_wdi_out_params - information provided to WDI client + * @uc_door_bell_pa: physical address of IPA uc doorbell + * @clnt_hdl: opaque handle assigned to client + */ +struct ipa_wdi_out_params { + phys_addr_t uc_door_bell_pa; + u32 clnt_hdl; +}; + +/** + * struct ipa_wdi_db_params - information provided to retrieve + * physical address of uC doorbell + * @client: type of "client" (IPA_CLIENT_WLAN#_PROD/CONS) + * @uc_door_bell_pa: physical address of IPA uc doorbell + */ +struct ipa_wdi_db_params { + enum ipa_client_type client; + phys_addr_t uc_door_bell_pa; +}; + +/** + * struct ipa_wdi_uc_ready_params - uC ready CB parameters + * @is_uC_ready: uC loaded or not + * @priv : callback cookie + * @notify: callback + */ +typedef void (*ipa_uc_ready_cb)(void *priv); +struct ipa_wdi_uc_ready_params { + bool is_uC_ready; + void *priv; + ipa_uc_ready_cb notify; +}; + +/** + * struct ipa_wdi_buffer_info - address info of a WLAN allocated buffer + * @pa: physical address of the buffer + * @iova: IOVA of the buffer as embedded inside the WDI descriptors + * @size: size in bytes of the buffer + * @result: result of map or unmap operations (out param) + * + * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa + */ +struct ipa_wdi_buffer_info { + phys_addr_t pa; + unsigned long iova; + size_t size; + int result; +}; + +/** + * struct ipa_gsi_ep_config - IPA GSI endpoint configurations + * + * @ipa_ep_num: IPA EP pipe number + * @ipa_gsi_chan_num: GSI channel number + * @ipa_if_tlv: number of IPA_IF TLV + * @ipa_if_aos: number of IPA_IF AOS + * @ee: Execution environment + * @prefetch_mode: Prefetch mode to be used + * @prefetch_threshold: Prefetch empty level threshold. + * relevant for smart and free prefetch modes + */ +struct ipa_gsi_ep_config { + int ipa_ep_num; + int ipa_gsi_chan_num; + int ipa_if_tlv; + int ipa_if_aos; + int ee; + enum gsi_prefetch_mode prefetch_mode; + uint8_t prefetch_threshold; +}; + +/** + * struct ipa_tz_unlock_reg_info - Used in order unlock regions of memory by TZ + * @reg_addr - Physical address of the start of the region + * @size - Size of the region in bytes + */ +struct ipa_tz_unlock_reg_info { + u64 reg_addr; + u64 size; +}; + +/** + * struct ipa_smmu_in_params - information provided from client + * @ipa_smmu_client_type: clinet requesting for the smmu info. + */ + +enum ipa_smmu_client_type { + IPA_SMMU_WLAN_CLIENT, + IPA_SMMU_CLIENT_MAX +}; + +struct ipa_smmu_in_params { + enum ipa_smmu_client_type smmu_client; +}; + +/** + * struct ipa_smmu_out_params - information provided to IPA client + * @ipa_smmu_s1_enable: IPA S1 SMMU enable/disable status + */ +struct ipa_smmu_out_params { + bool smmu_enable; +}; + +#if defined CONFIG_IPA || defined CONFIG_IPA3 + +/* + * Resume / Suspend + */ +int ipa_reset_endpoint(u32 clnt_hdl); + +/* + * Remove ep delay + */ +int ipa_clear_endpoint_delay(u32 clnt_hdl); + +/* + * Disable ep + */ +int ipa_disable_endpoint(u32 clnt_hdl); + +/* + * Configuration + */ +int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg); + +int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg); + +int ipa_cfg_ep_conn_track(u32 clnt_hdl, + const struct ipa_ep_cfg_conn_track *ep_conn_track); + +int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg); + +int ipa_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg); + +int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg); + +int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg); + +int ipa_cfg_ep_deaggr(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ipa_ep_cfg); + +int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg); + +int ipa_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg); + +int ipa_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg); + +int ipa_cfg_ep_metadata_mask(u32 clnt_hdl, const struct ipa_ep_cfg_metadata_mask + *ipa_ep_cfg); + +int ipa_cfg_ep_holb_by_client(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ipa_ep_cfg); + +int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl); + +/* + * Header removal / addition + */ +int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs); + +int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only); + +int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls); + +int ipa_commit_hdr(void); + +int ipa_reset_hdr(bool user_only); + +int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup); + +int ipa_put_hdr(u32 hdr_hdl); + +int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy); + +/* + * Header Processing Context + */ +int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only); + +int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); + +/* + * Routing + */ +int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules); + +int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only); + +int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls); + +int ipa_commit_rt(enum ipa_ip_type ip); + +int ipa_reset_rt(enum ipa_ip_type ip, bool user_only); + +int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup); + +int ipa_put_rt_tbl(u32 rt_tbl_hdl); + +int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in); + +int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules); + +/* + * Filtering + */ +int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules); + +int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only); + +int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls); + +int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules); + +int ipa_commit_flt(enum ipa_ip_type ip); + +int ipa_reset_flt(enum ipa_ip_type ip, bool user_only); + +/* + * NAT\IPv6CT + */ +int ipa_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem); +int ipa_allocate_nat_table(struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc); +int ipa_allocate_ipv6ct_table( + struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc); + +int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init); +int ipa_ipv6ct_init_cmd(struct ipa_ioc_ipv6ct_init *init); + +int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma); +int ipa_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma); + +int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del); +int ipa_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del); +int ipa_del_ipv6ct_table(struct ipa_ioc_nat_ipv6ct_table_del *del); + +int ipa_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn); + +/* + * Messaging + */ +int ipa_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback); +int ipa_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback); +int ipa_deregister_pull_msg(struct ipa_msg_meta *meta); + +/* + * Interface + */ +int ipa_register_intf(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx); +int ipa_register_intf_ext(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext); +int ipa_deregister_intf(const char *name); + +/* + * Aggregation + */ +int ipa_set_aggr_mode(enum ipa_aggr_mode mode); + +int ipa_set_qcncm_ndp_sig(char sig[3]); + +int ipa_set_single_ndp_per_mbim(bool enable); + +/* + * Data path + */ +int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *metadata); + +/* + * To transfer multiple data packets + * While passing the data descriptor list, the anchor node + * should be of type struct ipa_tx_data_desc not list_head + */ +int ipa_tx_dp_mul(enum ipa_client_type dst, + struct ipa_tx_data_desc *data_desc); + +void ipa_free_skb(struct ipa_rx_data *data); +int ipa_rx_poll(u32 clnt_hdl, int budget); +void ipa_recycle_wan_skb(struct sk_buff *skb); + +/* + * System pipes + */ +int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl); + +int ipa_teardown_sys_pipe(u32 clnt_hdl); + +int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out); +int ipa_disconnect_wdi_pipe(u32 clnt_hdl); +int ipa_enable_wdi_pipe(u32 clnt_hdl); +int ipa_disable_wdi_pipe(u32 clnt_hdl); +int ipa_resume_wdi_pipe(u32 clnt_hdl); +int ipa_suspend_wdi_pipe(u32 clnt_hdl); +int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats); +u16 ipa_get_smem_restr_bytes(void); +int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid, + uint64_t num_bytes); + +/* + * To retrieve doorbell physical address of + * wlan pipes + */ +int ipa_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out); + +/* + * To register uC ready callback if uC not ready + * and also check uC readiness + * if uC not ready only, register callback + */ +int ipa_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param); +/* + * To de-register uC ready callback + */ +int ipa_uc_dereg_rdyCB(void); + +int ipa_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); +int ipa_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); + +/* + * Resource manager + */ +int ipa_rm_create_resource(struct ipa_rm_create_params *create_params); + +int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name); + +int ipa_rm_register(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params); + +int ipa_rm_deregister(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params); + +int ipa_rm_set_perf_profile(enum ipa_rm_resource_name resource_name, + struct ipa_rm_perf_profile *profile); + +int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name); + +int ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name); + +int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name); + +int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name); + +int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name); + +int ipa_rm_notify_completion(enum ipa_rm_event event, + enum ipa_rm_resource_name resource_name); + +int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name, + unsigned long msecs); + +int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name); + +int ipa_rm_inactivity_timer_request_resource( + enum ipa_rm_resource_name resource_name); + +int ipa_rm_inactivity_timer_release_resource( + enum ipa_rm_resource_name resource_name); + +/* + * Tethering bridge (Rmnet / MBIM) + */ +int teth_bridge_init(struct teth_bridge_init_params *params); + +int teth_bridge_disconnect(enum ipa_client_type client); + +int teth_bridge_connect(struct teth_bridge_connect_params *connect_params); + +/* + * Tethering client info + */ +void ipa_set_client(int index, enum ipacm_client_enum client, bool uplink); + +enum ipacm_client_enum ipa_get_client(int pipe_idx); + +bool ipa_get_client_uplink(int pipe_idx); + +/* + * IPADMA + */ +int ipa_dma_init(void); + +int ipa_dma_enable(void); + +int ipa_dma_disable(void); + +int ipa_dma_sync_memcpy(u64 dest, u64 src, int len); + +int ipa_dma_async_memcpy(u64 dest, u64 src, int len, + void (*user_cb)(void *user1), void *user_param); + +int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len); + +void ipa_dma_destroy(void); + +/* + * mux id + */ +int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in); + +/* + * interrupts + */ +int ipa_add_interrupt_handler(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data); + +int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt); + +int ipa_restore_suspend_handler(void); + +/* + * Miscellaneous + */ +void ipa_bam_reg_dump(void); + +int ipa_get_ep_mapping(enum ipa_client_type client); + +bool ipa_is_ready(void); + +void ipa_proxy_clk_vote(void); +void ipa_proxy_clk_unvote(void); + +enum ipa_hw_type ipa_get_hw_type(void); + +bool ipa_is_client_handle_valid(u32 clnt_hdl); + +enum ipa_client_type ipa_get_client_mapping(int pipe_idx); + +enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(int pipe_idx); + +bool ipa_get_modem_cfg_emb_pipe_flt(void); + +enum ipa_transport_type ipa_get_transport_type(void); + +struct device *ipa_get_dma_dev(void); +struct iommu_domain *ipa_get_smmu_domain(void); + +int ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count); + +const struct ipa_gsi_ep_config *ipa_get_gsi_ep_info + (enum ipa_client_type client); + +int ipa_stop_gsi_channel(u32 clnt_hdl); + +typedef void (*ipa_ready_cb)(void *user_data); + +/** + * ipa_register_ipa_ready_cb() - register a callback to be invoked + * when IPA core driver initialization is complete. + * + * @ipa_ready_cb: CB to be triggered. + * @user_data: Data to be sent to the originator of the CB. + * + * Note: This function is expected to be utilized when ipa_is_ready + * function returns false. + * An IPA client may also use this function directly rather than + * calling ipa_is_ready beforehand, as if this API returns -EEXIST, + * this means IPA initialization is complete (and no callback will + * be triggered). + * When the callback is triggered, the client MUST perform his + * operations in a different context. + * + * The function will return 0 on success, -ENOMEM on memory issues and + * -EEXIST if IPA initialization is complete already. + */ +int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data), + void *user_data); + +/** + * ipa_tz_unlock_reg - Unlocks memory regions so that they become accessible + * from AP. + * @reg_info - Pointer to array of memory regions to unlock + * @num_regs - Number of elements in the array + * + * Converts the input array of regions to a struct that TZ understands and + * issues an SCM call. + * Also flushes the memory cache to DDR in order to make sure that TZ sees the + * correct data structure. + * + * Returns: 0 on success, negative on failure + */ +int ipa_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs); +int ipa_get_smmu_params(struct ipa_smmu_in_params *in, + struct ipa_smmu_out_params *out); +/** + * ipa_is_vlan_mode - check if a LAN driver should load in VLAN mode + * @iface - type of vlan capable device + * @res - query result: true for vlan mode, false for non vlan mode + * + * API must be called after ipa_is_ready() returns true, otherwise it will fail + * + * Returns: 0 on success, negative on failure + */ +int ipa_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res); +#else /* (CONFIG_IPA || CONFIG_IPA3) */ + +/* + * Resume / Suspend + */ +static inline int ipa_reset_endpoint(u32 clnt_hdl) +{ + return -EPERM; +} + +/* + * Remove ep delay + */ +static inline int ipa_clear_endpoint_delay(u32 clnt_hdl) +{ + return -EPERM; +} + +/* + * Disable ep + */ +static inline int ipa_disable_endpoint(u32 clnt_hdl) +{ + return -EPERM; +} + +/* + * Configuration + */ +static inline int ipa_cfg_ep(u32 clnt_hdl, + const struct ipa_ep_cfg *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_nat(u32 clnt_hdl, + const struct ipa_ep_cfg_nat *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_conn_track(u32 clnt_hdl, + const struct ipa_ep_cfg_conn_track *ep_conn_track) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_hdr(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_mode(u32 clnt_hdl, + const struct ipa_ep_cfg_mode *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_aggr(u32 clnt_hdl, + const struct ipa_ep_cfg_aggr *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_deaggr(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_route(u32 clnt_hdl, + const struct ipa_ep_cfg_route *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_holb(u32 clnt_hdl, + const struct ipa_ep_cfg_holb *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_holb_by_client(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ep_holb) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_cfg(u32 clnt_hdl, + const struct ipa_ep_cfg_cfg *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_metadata_mask(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_ctrl(u32 clnt_hdl, + const struct ipa_ep_cfg_ctrl *ep_ctrl) +{ + return -EPERM; +} + +/* + * Header removal / addition + */ +static inline int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs) +{ + return -EPERM; +} + +static inline int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, + bool user_only) +{ + return -EPERM; +} + +static inline int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls) +{ + return -EPERM; +} + +static inline int ipa_commit_hdr(void) +{ + return -EPERM; +} + +static inline int ipa_reset_hdr(bool user_only) +{ + return -EPERM; +} + +static inline int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup) +{ + return -EPERM; +} + +static inline int ipa_put_hdr(u32 hdr_hdl) +{ + return -EPERM; +} + +static inline int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy) +{ + return -EPERM; +} + +/* + * Header Processing Context + */ +static inline int ipa_add_hdr_proc_ctx( + struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only) +{ + return -EPERM; +} + +static inline int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls) +{ + return -EPERM; +} +/* + * Routing + */ +static inline int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) +{ + return -EPERM; +} + +static inline int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, + bool user_only) +{ + return -EPERM; +} + +static inline int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls) +{ + return -EPERM; +} + +static inline int ipa_commit_rt(enum ipa_ip_type ip) +{ + return -EPERM; +} + +static inline int ipa_reset_rt(enum ipa_ip_type ip, bool user_only) +{ + return -EPERM; +} + +static inline int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup) +{ + return -EPERM; +} + +static inline int ipa_put_rt_tbl(u32 rt_tbl_hdl) +{ + return -EPERM; +} + +static inline int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in) +{ + return -EPERM; +} + +static inline int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules) +{ + return -EPERM; +} + +/* + * Filtering + */ +static inline int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) +{ + return -EPERM; +} + +static inline int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, + bool user_only) +{ + return -EPERM; +} + +static inline int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls) +{ + return -EPERM; +} + +static inline int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules) +{ + return -EPERM; +} + +static inline int ipa_commit_flt(enum ipa_ip_type ip) +{ + return -EPERM; +} + +static inline int ipa_reset_flt(enum ipa_ip_type ip, bool user_only) +{ + return -EPERM; +} + +/* + * NAT + */ +static inline int ipa_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem) +{ + return -EPERM; +} + +static inline int ipa_allocate_nat_table( + struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc) +{ + return -EPERM; +} + +static inline int ipa_allocate_ipv6ct_table( + struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc) +{ + return -EPERM; +} + +static inline int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) +{ + return -EPERM; +} + +static inline int ipa_ipv6ct_init_cmd(struct ipa_ioc_ipv6ct_init *init) +{ + return -EPERM; +} + +static inline int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma) +{ + return -EPERM; +} + +static inline int ipa_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma) +{ + return -EPERM; +} + +static inline int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del) +{ + return -EPERM; +} + +static inline int ipa_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del) +{ + return -EPERM; +} + +static inline int ipa_del_ipv6ct_table( + struct ipa_ioc_nat_ipv6ct_table_del *del) +{ + return -EPERM; +} + +static inline int ipa_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn) +{ + return -EPERM; +} + +/* + * Messaging + */ +static inline int ipa_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback) +{ + return -EPERM; +} + +static inline int ipa_register_pull_msg(struct ipa_msg_meta *meta, + ipa_msg_pull_fn callback) +{ + return -EPERM; +} + +static inline int ipa_deregister_pull_msg(struct ipa_msg_meta *meta) +{ + return -EPERM; +} + +/* + * Interface + */ +static inline int ipa_register_intf(const char *name, + const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx) +{ + return -EPERM; +} + +static inline int ipa_register_intf_ext(const char *name, + const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext) +{ + return -EPERM; +} + +static inline int ipa_deregister_intf(const char *name) +{ + return -EPERM; +} + +/* + * Aggregation + */ +static inline int ipa_set_aggr_mode(enum ipa_aggr_mode mode) +{ + return -EPERM; +} + +static inline int ipa_set_qcncm_ndp_sig(char sig[3]) +{ + return -EPERM; +} + +static inline int ipa_set_single_ndp_per_mbim(bool enable) +{ + return -EPERM; +} + +/* + * Data path + */ +static inline int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *metadata) +{ + return -EPERM; +} + +/* + * To transfer multiple data packets + */ +static inline int ipa_tx_dp_mul( + enum ipa_client_type dst, + struct ipa_tx_data_desc *data_desc) +{ + return -EPERM; +} + +static inline void ipa_free_skb(struct ipa_rx_data *rx_in) +{ +} + +static inline int ipa_rx_poll(u32 clnt_hdl, int budget) +{ + return -EPERM; +} + +static inline void ipa_recycle_wan_skb(struct sk_buff *skb) +{ +} + +/* + * System pipes + */ +static inline u16 ipa_get_smem_restr_bytes(void) +{ + return -EPERM; +} + +static inline int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, + u32 *clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_teardown_sys_pipe(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out) +{ + return -EPERM; +} + +static inline int ipa_disconnect_wdi_pipe(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_enable_wdi_pipe(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_disable_wdi_pipe(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_resume_wdi_pipe(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_suspend_wdi_pipe(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid, + uint64_t num_bytes) +{ + return -EPERM; +} + +static inline int ipa_uc_wdi_get_dbpa( + struct ipa_wdi_db_params *out) +{ + return -EPERM; +} + +static inline int ipa_uc_reg_rdyCB( + struct ipa_wdi_uc_ready_params *param) +{ + return -EPERM; +} + +static inline int ipa_uc_dereg_rdyCB(void) +{ + return -EPERM; +} + + +/* + * Resource manager + */ +static inline int ipa_rm_create_resource( + struct ipa_rm_create_params *create_params) +{ + return -EPERM; +} + +static inline int ipa_rm_delete_resource( + enum ipa_rm_resource_name resource_name) +{ + return -EPERM; +} + +static inline int ipa_rm_register(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params) +{ + return -EPERM; +} + +static inline int ipa_rm_set_perf_profile( + enum ipa_rm_resource_name resource_name, + struct ipa_rm_perf_profile *profile) +{ + return -EPERM; +} + +static inline int ipa_rm_deregister(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params) +{ + return -EPERM; +} + +static inline int ipa_rm_add_dependency( + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return -EPERM; +} + +static inline int ipa_rm_add_dependency_sync( + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return -EPERM; +} + +static inline int ipa_rm_delete_dependency( + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return -EPERM; +} + +static inline int ipa_rm_request_resource( + enum ipa_rm_resource_name resource_name) +{ + return -EPERM; +} + +static inline int ipa_rm_release_resource( + enum ipa_rm_resource_name resource_name) +{ + return -EPERM; +} + +static inline int ipa_rm_notify_completion(enum ipa_rm_event event, + enum ipa_rm_resource_name resource_name) +{ + return -EPERM; +} + +static inline int ipa_rm_inactivity_timer_init( + enum ipa_rm_resource_name resource_name, + unsigned long msecs) +{ + return -EPERM; +} + +static inline int ipa_rm_inactivity_timer_destroy( + enum ipa_rm_resource_name resource_name) +{ + return -EPERM; +} + +static inline int ipa_rm_inactivity_timer_request_resource( + enum ipa_rm_resource_name resource_name) +{ + return -EPERM; +} + +static inline int ipa_rm_inactivity_timer_release_resource( + enum ipa_rm_resource_name resource_name) +{ + return -EPERM; +} + +/* + * Tethering bridge (Rmnet / MBIM) + */ +static inline int teth_bridge_init(struct teth_bridge_init_params *params) +{ + return -EPERM; +} + +static inline int teth_bridge_disconnect(enum ipa_client_type client) +{ + return -EPERM; +} + +static inline int teth_bridge_connect(struct teth_bridge_connect_params + *connect_params) +{ + return -EPERM; +} + +/* + * Tethering client info + */ +static inline void ipa_set_client(int index, enum ipacm_client_enum client, + bool uplink) +{ +} + +static inline enum ipacm_client_enum ipa_get_client(int pipe_idx) +{ + return -EPERM; +} + +static inline bool ipa_get_client_uplink(int pipe_idx) +{ + return -EPERM; +} + +/* + * IPADMA + */ +static inline int ipa_dma_init(void) +{ + return -EPERM; +} + +static inline int ipa_dma_enable(void) +{ + return -EPERM; +} + +static inline int ipa_dma_disable(void) +{ + return -EPERM; +} + +static inline int ipa_dma_sync_memcpy(phys_addr_t dest, phys_addr_t src + , int len) +{ + return -EPERM; +} + +static inline int ipa_dma_async_memcpy(phys_addr_t dest, phys_addr_t src + , int len, void (*user_cb)(void *user1), + void *user_param) +{ + return -EPERM; +} + +static inline int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len) +{ + return -EPERM; +} + +static inline void ipa_dma_destroy(void) +{ +} + +/* + * mux id + */ +static inline int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in) +{ + return -EPERM; +} + +/* + * interrupts + */ +static inline int ipa_add_interrupt_handler(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data) +{ + return -EPERM; +} + +static inline int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt) +{ + return -EPERM; +} + +static inline int ipa_restore_suspend_handler(void) +{ + return -EPERM; +} + +/* + * Miscellaneous + */ +static inline void ipa_bam_reg_dump(void) +{ +} + +static inline int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats) +{ + return -EPERM; +} + +static inline int ipa_get_ep_mapping(enum ipa_client_type client) +{ + return -EPERM; +} + +static inline bool ipa_is_ready(void) +{ + return false; +} + +static inline void ipa_proxy_clk_vote(void) +{ +} + +static inline void ipa_proxy_clk_unvote(void) +{ +} + +static inline enum ipa_hw_type ipa_get_hw_type(void) +{ + return IPA_HW_None; +} + +static inline bool ipa_is_client_handle_valid(u32 clnt_hdl) +{ + return -EINVAL; +} + +static inline enum ipa_client_type ipa_get_client_mapping(int pipe_idx) +{ + return -EINVAL; +} + +static inline enum ipa_rm_resource_name ipa_get_rm_resource_from_ep( + int pipe_idx) +{ + return -EFAULT; +} + +static inline bool ipa_get_modem_cfg_emb_pipe_flt(void) +{ + return -EINVAL; +} + +static inline enum ipa_transport_type ipa_get_transport_type(void) +{ + return -EFAULT; +} + +static inline struct device *ipa_get_dma_dev(void) +{ + return NULL; +} + +static inline struct iommu_domain *ipa_get_smmu_domain(void) +{ + return NULL; +} + +static inline int ipa_create_wdi_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info) +{ + return -EINVAL; +} + +static inline int ipa_release_wdi_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info) +{ + return -EINVAL; +} + +static inline int ipa_disable_apps_wan_cons_deaggr(void) +{ + return -EINVAL; +} + +static inline const struct ipa_gsi_ep_config *ipa_get_gsi_ep_info + (enum ipa_client_type client) +{ + return NULL; +} + +static inline int ipa_stop_gsi_channel(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_register_ipa_ready_cb( + void (*ipa_ready_cb)(void *user_data), + void *user_data) +{ + return -EPERM; +} + +static inline int ipa_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, + u16 num_regs) +{ + return -EPERM; +} + + +static inline int ipa_get_smmu_params(struct ipa_smmu_in_params *in, + struct ipa_smmu_out_params *out) +{ + return -EPERM; +} + +static inline int ipa_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res) +{ + return -EPERM; +} +#endif /* (CONFIG_IPA || CONFIG_IPA3) */ + +#endif /* _IPA_H_ */ diff --git a/include/linux/ipa_mhi.h b/include/linux/ipa_mhi.h new file mode 100644 index 000000000000..f30f2035b2fc --- /dev/null +++ b/include/linux/ipa_mhi.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef IPA_MHI_H_ +#define IPA_MHI_H_ + +#include +#include + +/** + * enum ipa_mhi_event_type - event type for mhi callback + * + * @IPA_MHI_EVENT_READY: IPA MHI is ready and IPA uC is loaded. After getting + * this event MHI client is expected to call to ipa_mhi_start() API + * @IPA_MHI_EVENT_DATA_AVAILABLE: downlink data available on MHI channel + */ +enum ipa_mhi_event_type { + IPA_MHI_EVENT_READY, + IPA_MHI_EVENT_DATA_AVAILABLE, + IPA_MHI_EVENT_MAX, +}; + +typedef void (*mhi_client_cb)(void *priv, enum ipa_mhi_event_type event, + unsigned long data); + +/** + * struct ipa_mhi_msi_info - parameters for MSI (Message Signaled Interrupts) + * @addr_low: MSI lower base physical address + * @addr_hi: MSI higher base physical address + * @data: Data Pattern to use when generating the MSI + * @mask: Mask indicating number of messages assigned by the host to device + * + * msi value is written according to this formula: + * ((data & ~mask) | (mmio.msiVec & mask)) + */ +struct ipa_mhi_msi_info { + u32 addr_low; + u32 addr_hi; + u32 data; + u32 mask; +}; + +/** + * struct ipa_mhi_init_params - parameters for IPA MHI initialization API + * + * @msi: MSI (Message Signaled Interrupts) parameters + * @mmio_addr: MHI MMIO physical address + * @first_ch_idx: First channel ID for hardware accelerated channels. + * @first_er_idx: First event ring ID for hardware accelerated channels. + * @assert_bit40: should assert bit 40 in order to access host space. + * if PCIe iATU is configured then not need to assert bit40 + * @notify: client callback + * @priv: client private data to be provided in client callback + * @test_mode: flag to indicate if IPA MHI is in unit test mode + */ +struct ipa_mhi_init_params { + struct ipa_mhi_msi_info msi; + u32 mmio_addr; + u32 first_ch_idx; + u32 first_er_idx; + bool assert_bit40; + mhi_client_cb notify; + void *priv; + bool test_mode; +}; + +/** + * struct ipa_mhi_start_params - parameters for IPA MHI start API + * + * @host_ctrl_addr: Base address of MHI control data structures + * @host_data_addr: Base address of MHI data buffers + * @channel_context_addr: channel context array address in host address space + * @event_context_addr: event context array address in host address space + */ +struct ipa_mhi_start_params { + u32 host_ctrl_addr; + u32 host_data_addr; + u64 channel_context_array_addr; + u64 event_context_array_addr; +}; + +/** + * struct ipa_mhi_connect_params - parameters for IPA MHI channel connect API + * + * @sys: IPA EP configuration info + * @channel_id: MHI channel id + */ +struct ipa_mhi_connect_params { + struct ipa_sys_connect_params sys; + u8 channel_id; +}; + +/* bit #40 in address should be asserted for MHI transfers over pcie */ +#define IPA_MHI_HOST_ADDR(addr) ((addr) | BIT_ULL(40)) + +#if defined CONFIG_IPA || defined CONFIG_IPA3 + +int ipa_mhi_init(struct ipa_mhi_init_params *params); + +int ipa_mhi_start(struct ipa_mhi_start_params *params); + +int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl); + +int ipa_mhi_disconnect_pipe(u32 clnt_hdl); + +int ipa_mhi_suspend(bool force); + +int ipa_mhi_resume(void); + +void ipa_mhi_destroy(void); + +#else /* (CONFIG_IPA || CONFIG_IPA3) */ + +static inline int ipa_mhi_init(struct ipa_mhi_init_params *params) +{ + return -EPERM; +} + +static inline int ipa_mhi_start(struct ipa_mhi_start_params *params) +{ + return -EPERM; +} + +static inline int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, + u32 *clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_mhi_disconnect_pipe(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_mhi_suspend(bool force) +{ + return -EPERM; +} + +static inline int ipa_mhi_resume(void) +{ + return -EPERM; +} + +static inline void ipa_mhi_destroy(void) +{ + +} + +#endif /* (CONFIG_IPA || CONFIG_IPA3) */ + +#endif /* IPA_MHI_H_ */ diff --git a/include/linux/ipa_odu_bridge.h b/include/linux/ipa_odu_bridge.h new file mode 100644 index 000000000000..3861414abe4a --- /dev/null +++ b/include/linux/ipa_odu_bridge.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_ODO_BRIDGE_H_ +#define _IPA_ODO_BRIDGE_H_ + +#include + +/** + * struct odu_bridge_params - parameters for odu bridge initialization API + * + * @netdev_name: network interface name + * @priv: private data that will be supplied to client's callback + * @tx_dp_notify: callback for handling SKB. the following event are supported: + * IPA_WRITE_DONE: will be called after client called to odu_bridge_tx_dp() + * Client is expected to free the skb. + * IPA_RECEIVE: will be called for delivering skb to APPS. + * Client is expected to deliver the skb to network stack. + * @send_dl_skb: callback for sending skb on downlink direction to adapter. + * Client is expected to free the skb. + * @device_ethaddr: device Ethernet address in network order. + * @ipa_desc_size: IPA Sys Pipe Desc Size + */ +struct odu_bridge_params { + const char *netdev_name; + void *priv; + ipa_notify_cb tx_dp_notify; + int (*send_dl_skb)(void *priv, struct sk_buff *skb); + u8 device_ethaddr[ETH_ALEN]; + u32 ipa_desc_size; +}; + +/** + * struct ipa_bridge_init_params - parameters for IPA bridge initialization API + * + * @info: structure contains initialization information + * @wakeup_request: callback to client to indicate there is downlink data + * available. Client is expected to call ipa_bridge_resume() to start + * receiving data + */ +struct ipa_bridge_init_params { + struct odu_bridge_params info; + void (*wakeup_request)(void *cl_priv); +}; + +#ifdef CONFIG_IPA3 + +int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl); + +int ipa_bridge_connect(u32 hdl); + +int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth); + +int ipa_bridge_disconnect(u32 hdl); + +int ipa_bridge_suspend(u32 hdl); + +int ipa_bridge_resume(u32 hdl); + +int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb, + struct ipa_tx_meta *metadata); + +int ipa_bridge_cleanup(u32 hdl); + +#else + +static inline int ipa_bridge_init(struct odu_bridge_params *params, u32 *hdl) +{ + return -EPERM; +} + +static inline int ipa_bridge_connect(u32 hdl) +{ + return -EPERM; +} + +static inline int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth) +{ + return -EPERM; +} + +static inline int ipa_bridge_disconnect(u32 hdl) +{ + return -EPERM; +} + +static inline int ipa_bridge_suspend(u32 hdl) +{ + return -EPERM; +} + +static inline int ipa_bridge_resume(u32 hdl) +{ + return -EPERM; +} + +static inline int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb, +struct ipa_tx_meta *metadata) +{ + return -EPERM; +} + +static inline int ipa_bridge_cleanup(u32 hdl) +{ + return -EPERM; +} + +#endif /* CONFIG_IPA3 */ + +/* Below API is deprecated. Please use the API above */ +# if defined CONFIG_IPA || defined CONFIG_IPA3 + +int odu_bridge_init(struct odu_bridge_params *params); + +int odu_bridge_connect(void); + +int odu_bridge_disconnect(void); + +int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata); + +int odu_bridge_cleanup(void); + +#else + +static inline int odu_bridge_init(struct odu_bridge_params *params) +{ + return -EPERM; +} + +static inline int odu_bridge_disconnect(void) +{ + return -EPERM; +} + +static inline int odu_bridge_connect(void) +{ + return -EPERM; +} + +static inline int odu_bridge_tx_dp(struct sk_buff *skb, + struct ipa_tx_meta *metadata) +{ + return -EPERM; +} + +static inline int odu_bridge_cleanup(void) +{ + return -EPERM; +} + +#endif /* CONFIG_IPA || defined CONFIG_IPA3 */ + +#endif /* _IPA_ODO_BRIDGE_H */ diff --git a/include/linux/ipa_uc_offload.h b/include/linux/ipa_uc_offload.h new file mode 100644 index 000000000000..b6e666615f9a --- /dev/null +++ b/include/linux/ipa_uc_offload.h @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_UC_OFFLOAD_H_ +#define _IPA_UC_OFFLOAD_H_ + +#include + +/** + * enum ipa_uc_offload_proto + * Protocol type: either WDI or Neutrino + * + * @IPA_UC_WDI: wdi Protocol + * @IPA_UC_NTN: Neutrino Protocol + */ +enum ipa_uc_offload_proto { + IPA_UC_INVALID = 0, + IPA_UC_WDI = 1, + IPA_UC_NTN = 2, + IPA_UC_MAX_PROT_SIZE +}; + +/** + * struct ipa_hdr_info - Header to install on IPA HW + * + * @hdr: header to install on IPA HW + * @hdr_len: length of header + * @dst_mac_addr_offset: destination mac address offset + * @hdr_type: layer two header type + */ +struct ipa_hdr_info { + u8 *hdr; + u8 hdr_len; + u8 dst_mac_addr_offset; + enum ipa_hdr_l2_type hdr_type; +}; + +/** + * struct ipa_uc_offload_intf_params - parameters for uC offload + * interface registration + * + * @netdev_name: network interface name + * @notify: callback for exception/embedded packets + * @priv: callback cookie + * @hdr_info: header information + * @meta_data: meta data if any + * @meta_data_mask: meta data mask + * @proto: uC offload protocol type + * @alt_dst_pipe: alternate routing output pipe + */ +struct ipa_uc_offload_intf_params { + const char *netdev_name; + ipa_notify_cb notify; + void *priv; + struct ipa_hdr_info hdr_info[IPA_IP_MAX]; + u8 is_meta_data_valid; + u32 meta_data; + u32 meta_data_mask; + enum ipa_uc_offload_proto proto; + enum ipa_client_type alt_dst_pipe; +}; + +/** + * struct ntn_buff_smmu_map - IPA iova->pa SMMU mapping + * @iova: virtual address of the data buffer + * @pa: physical address of the data buffer + */ +struct ntn_buff_smmu_map { + dma_addr_t iova; + phys_addr_t pa; +}; + +/** + * struct ipa_ntn_setup_info - NTN TX/Rx configuration + * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS) + * @smmu_enabled: SMMU is enabled for uC or not + * @ring_base_pa: physical address of the base of the Tx/Rx ring + * @ring_base_iova: virtual address of the base of the Tx/Rx ring + * @ring_base_sgt:Scatter table for ntn_rings,contains valid non NULL + * value when ENAC S1-SMMU enabed, else NULL. + * @ntn_ring_size: size of the Tx/Rx ring (in terms of elements) + * @buff_pool_base_pa: physical address of the base of the Tx/Rx buffer pool + * @buff_pool_base_iova: virtual address of the base of the Tx/Rx buffer pool + * @buff_pool_base_sgt: Scatter table for buffer pools,contains valid non NULL + * value when EMAC S1-SMMU enabed, else NULL. + * @num_buffers: Rx/Tx buffer pool size (in terms of elements) + * @data_buff_size: size of the each data buffer allocated in DDR + * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN Ring's + * tail pointer + */ +struct ipa_ntn_setup_info { + enum ipa_client_type client; + bool smmu_enabled; + phys_addr_t ring_base_pa; + dma_addr_t ring_base_iova; + struct sg_table *ring_base_sgt; + + u32 ntn_ring_size; + + phys_addr_t buff_pool_base_pa; + dma_addr_t buff_pool_base_iova; + struct sg_table *buff_pool_base_sgt; + + struct ntn_buff_smmu_map *data_buff_list; + + u32 num_buffers; + + u32 data_buff_size; + + phys_addr_t ntn_reg_base_ptr_pa; +}; + +/** + * struct ipa_uc_offload_out_params - out parameters for uC offload + * + * @clnt_hndl: Handle that client need to pass during + * further operations + */ +struct ipa_uc_offload_out_params { + u32 clnt_hndl; +}; + +/** + * struct ipa_ntn_conn_in_params - NTN TX/Rx connect parameters + * @ul: parameters to connect UL pipe(from Neutrino to IPA) + * @dl: parameters to connect DL pipe(from IPA to Neutrino) + */ +struct ipa_ntn_conn_in_params { + struct ipa_ntn_setup_info ul; + struct ipa_ntn_setup_info dl; +}; + +/** + * struct ipa_ntn_conn_out_params - information provided + * to uC offload client + * @ul_uc_db_pa: physical address of IPA uc doorbell for UL + * @dl_uc_db_pa: physical address of IPA uc doorbell for DL + * @clnt_hdl: opaque handle assigned to offload client + */ +struct ipa_ntn_conn_out_params { + phys_addr_t ul_uc_db_pa; + phys_addr_t dl_uc_db_pa; +}; + +/** + * struct ipa_uc_offload_conn_in_params - information provided by + * uC offload client + * @clnt_hndl: Handle that return as part of reg interface + * @proto: Protocol to use for offload data path + * @ntn: uC RX/Tx configuration info + */ +struct ipa_uc_offload_conn_in_params { + u32 clnt_hndl; + union { + struct ipa_ntn_conn_in_params ntn; + } u; +}; + +/** + * struct ipa_uc_offload_conn_out_params - information provided + * to uC offload client + * @ul_uc_db_pa: physical address of IPA uc doorbell for UL + * @dl_uc_db_pa: physical address of IPA uc doorbell for DL + * @clnt_hdl: opaque handle assigned to offload client + */ +struct ipa_uc_offload_conn_out_params { + union { + struct ipa_ntn_conn_out_params ntn; + } u; +}; + +/** + * struct ipa_perf_profile - To set BandWidth profile + * + * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS) + * @max_supported_bw_mbps: maximum bandwidth needed (in Mbps) + */ +struct ipa_perf_profile { + enum ipa_client_type client; + u32 max_supported_bw_mbps; +}; + +/** + * struct ipa_uc_ready_params - uC ready CB parameters + * @is_uC_ready: uC loaded or not + * @priv : callback cookie + * @notify: callback + * @proto: uC offload protocol type + */ +struct ipa_uc_ready_params { + bool is_uC_ready; + void *priv; + ipa_uc_ready_cb notify; + enum ipa_uc_offload_proto proto; +}; + +#if defined CONFIG_IPA || defined CONFIG_IPA3 + +/** + * ipa_uc_offload_reg_intf - Client should call this function to + * init uC offload data path + * + * @init: [in] initialization parameters + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +int ipa_uc_offload_reg_intf( + struct ipa_uc_offload_intf_params *in, + struct ipa_uc_offload_out_params *out); + +/** + * ipa_uc_offload_cleanup - Client Driver should call this + * function before unload and after disconnect + * + * @Return 0 on success, negative on failure + */ +int ipa_uc_offload_cleanup(u32 clnt_hdl); + +/** + * ipa_uc_offload_conn_pipes - Client should call this + * function to connect uC pipe for offload data path + * + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *in, + struct ipa_uc_offload_conn_out_params *out); + +/** + * ipa_uc_offload_disconn_pipes() - Client should call this + * function to disconnect uC pipe to disable offload data path + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +int ipa_uc_offload_disconn_pipes(u32 clnt_hdl); + +/** + * ipa_set_perf_profile() - Client should call this function to + * set IPA clock Band Width based on data rates + * @profile: [in] BandWidth profile to use + * + * Returns: 0 on success, negative on failure + */ +int ipa_set_perf_profile(struct ipa_perf_profile *profile); + + +/* + * To register uC ready callback if uC not ready + * and also check uC readiness + * if uC not ready only, register callback + */ +int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *param); + +/* + * To de-register uC ready callback + */ +void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto); + +#else /* (CONFIG_IPA || CONFIG_IPA3) */ + +static inline int ipa_uc_offload_reg_intf( + struct ipa_uc_offload_intf_params *in, + struct ipa_uc_offload_out_params *out) +{ + return -EPERM; +} + +static inline int ipa_uC_offload_cleanup(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_uc_offload_conn_pipes( + struct ipa_uc_offload_conn_in_params *in, + struct ipa_uc_offload_conn_out_params *out) +{ + return -EPERM; +} + +static inline int ipa_uc_offload_disconn_pipes(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_set_perf_profile(struct ipa_perf_profile *profile) +{ + return -EPERM; +} + +static inline int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *param) +{ + return -EPERM; +} + +static inline void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto) +{ +} + +#endif /* CONFIG_IPA3 */ + +#endif /* _IPA_UC_OFFLOAD_H_ */ diff --git a/include/linux/ipa_usb.h b/include/linux/ipa_usb.h new file mode 100644 index 000000000000..f2f5b2a5aafb --- /dev/null +++ b/include/linux/ipa_usb.h @@ -0,0 +1,329 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_USB_H_ +#define _IPA_USB_H_ + +enum ipa_usb_teth_prot { + IPA_USB_RNDIS = 0, + IPA_USB_ECM = 1, + IPA_USB_RMNET = 2, + IPA_USB_MBIM = 3, + IPA_USB_DIAG = 4, + IPA_USB_MAX_TETH_PROT_SIZE +}; + +/** + * ipa_usb_teth_params - parameters for RDNIS/ECM initialization API + * + * @host_ethaddr: host Ethernet address in network order + * @device_ethaddr: device Ethernet address in network order + */ +struct ipa_usb_teth_params { + u8 host_ethaddr[ETH_ALEN]; + u8 device_ethaddr[ETH_ALEN]; +}; + +enum ipa_usb_notify_event { + IPA_USB_DEVICE_READY, + IPA_USB_REMOTE_WAKEUP, + IPA_USB_SUSPEND_COMPLETED +}; + +enum ipa_usb_max_usb_packet_size { + IPA_USB_FULL_SPEED_64B = 64, + IPA_USB_HIGH_SPEED_512B = 512, + IPA_USB_SUPER_SPEED_1024B = 1024 +}; + +/** + * ipa_usb_teth_prot_params - parameters for connecting RNDIS + * + * @max_xfer_size_bytes_to_dev: max size of UL packets in bytes + * @max_packet_number_to_dev: max number of UL aggregated packets + * @max_xfer_size_bytes_to_host: max size of DL packets in bytes + * + */ +struct ipa_usb_teth_prot_params { + u32 max_xfer_size_bytes_to_dev; + u32 max_packet_number_to_dev; + u32 max_xfer_size_bytes_to_host; +}; + +/** + * ipa_usb_xdci_connect_params - parameters required to start IN, OUT + * channels, and connect RNDIS/ECM/teth_bridge + * + * @max_pkt_size: USB speed (full/high/super/super-speed plus) + * @ipa_to_usb_xferrscidx: Transfer Resource Index (XferRscIdx) for IN channel. + * The hardware-assigned transfer resource index for the + * transfer, which was returned in response to the + * Start Transfer command. This field is used for + * "Update Transfer" command. + * Should be 0 =< ipa_to_usb_xferrscidx <= 127. + * @ipa_to_usb_xferrscidx_valid: true if xferRscIdx should be updated for IN + * channel + * @usb_to_ipa_xferrscidx: Transfer Resource Index (XferRscIdx) for OUT channel + * Should be 0 =< usb_to_ipa_xferrscidx <= 127. + * @usb_to_ipa_xferrscidx_valid: true if xferRscIdx should be updated for OUT + * channel + * @teth_prot: tethering protocol + * @teth_prot_params: parameters for connecting the tethering protocol. + * @max_supported_bandwidth_mbps: maximum bandwidth need of the client in Mbps + */ +struct ipa_usb_xdci_connect_params { + enum ipa_usb_max_usb_packet_size max_pkt_size; + u8 ipa_to_usb_xferrscidx; + bool ipa_to_usb_xferrscidx_valid; + u8 usb_to_ipa_xferrscidx; + bool usb_to_ipa_xferrscidx_valid; + enum ipa_usb_teth_prot teth_prot; + struct ipa_usb_teth_prot_params teth_prot_params; + u32 max_supported_bandwidth_mbps; +}; + +/** + * ipa_usb_xdci_chan_scratch - xDCI protocol SW config area of + * channel scratch + * + * @last_trb_addr_iova: Address (iova LSB - based on alignment restrictions) of + * last TRB in queue. Used to identify roll over case + * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregation + * configuration). Must be aligned to max USB Packet Size. + * Should be 1 <= const_buffer_size <= 31. + * @depcmd_low_addr: Used to generate "Update Transfer" command + * @depcmd_hi_addr: Used to generate "Update Transfer" command. + */ +struct ipa_usb_xdci_chan_scratch { + u16 last_trb_addr_iova; + u8 const_buffer_size; + u32 depcmd_low_addr; + u8 depcmd_hi_addr; +}; + +/** + * ipa_usb_xdci_chan_params - xDCI channel related properties + * + * @client: type of "client" + * @ipa_ep_cfg: IPA EP configuration + * @keep_ipa_awake: when true, IPA will not be clock gated + * @teth_prot: tethering protocol for which the channel is created + * @gevntcount_low_addr: GEVNCOUNT low address for event scratch + * @gevntcount_hi_addr: GEVNCOUNT high address for event scratch + * @dir: channel direction + * @xfer_ring_len: length of transfer ring in bytes (must be integral + * multiple of transfer element size - 16B for xDCI) + * @xfer_scratch: parameters for xDCI channel scratch + * @xfer_ring_base_addr_iova: IO virtual address mapped to pysical base address + * @data_buff_base_len: length of data buffer allocated by USB driver + * @data_buff_base_addr_iova: IO virtual address mapped to pysical base address + * @sgt_xfer_rings: Scatter table for Xfer rings,contains valid non NULL + * value + * when USB S1-SMMU enabed, else NULL. + * @sgt_data_buff: Scatter table for data buffs,contains valid non NULL + * value + * when USB S1-SMMU enabed, else NULL. + * + */ +struct ipa_usb_xdci_chan_params { + /* IPA EP params */ + enum ipa_client_type client; + struct ipa_ep_cfg ipa_ep_cfg; + bool keep_ipa_awake; + enum ipa_usb_teth_prot teth_prot; + /* event ring params */ + u32 gevntcount_low_addr; + u8 gevntcount_hi_addr; + /* transfer ring params */ + enum gsi_chan_dir dir; + u16 xfer_ring_len; + struct ipa_usb_xdci_chan_scratch xfer_scratch; + u64 xfer_ring_base_addr_iova; + u32 data_buff_base_len; + u64 data_buff_base_addr_iova; + struct sg_table *sgt_xfer_rings; + struct sg_table *sgt_data_buff; +}; + +/** + * ipa_usb_chan_out_params - out parameters for channel request + * + * @clnt_hdl: opaque client handle assigned by IPA to client + * @db_reg_phs_addr_lsb: Physical address of doorbell register where the 32 + * LSBs of the doorbell value should be written + * @db_reg_phs_addr_msb: Physical address of doorbell register where the 32 + * MSBs of the doorbell value should be written + * + */ +struct ipa_req_chan_out_params { + u32 clnt_hdl; + u32 db_reg_phs_addr_lsb; + u32 db_reg_phs_addr_msb; +}; + +#ifdef CONFIG_IPA3 + +/** + * ipa_usb_init_teth_prot - Peripheral should call this function to initialize + * RNDIS/ECM/teth_bridge/DPL, prior to calling ipa_usb_xdci_connect() + * + * @usb_teth_type: tethering protocol type + * @teth_params: pointer to tethering protocol parameters. + * Should be struct ipa_usb_teth_params for RNDIS/ECM, + * or NULL for teth_bridge + * @ipa_usb_notify_cb: will be called to notify USB driver on certain events + * @user_data: cookie used for ipa_usb_notify_cb + * + * @Return 0 on success, negative on failure + */ +int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot, + struct ipa_usb_teth_params *teth_params, + int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, + void *), + void *user_data); + +/** + * ipa_usb_xdci_connect - Peripheral should call this function to start IN & + * OUT xDCI channels, and connect RNDIS/ECM/MBIM/RMNET. + * For DPL, only starts IN channel. + * + * @ul_chan_params: parameters for allocating UL xDCI channel. containing + * required info on event and transfer rings, and IPA EP + * configuration + * @ul_out_params: [out] opaque client handle assigned by IPA to client & DB + * registers physical address for UL channel + * @dl_chan_params: parameters for allocating DL xDCI channel. containing + * required info on event and transfer rings, and IPA EP + * configuration + * @dl_out_params: [out] opaque client handle assigned by IPA to client & DB + * registers physical address for DL channel + * @connect_params: handles and scratch params of the required channels, + * tethering protocol and the tethering protocol parameters. + * + * Note: Should not be called from atomic context + * + * @Return 0 on success, negative on failure + */ +int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params, + struct ipa_usb_xdci_chan_params *dl_chan_params, + struct ipa_req_chan_out_params *ul_out_params, + struct ipa_req_chan_out_params *dl_out_params, + struct ipa_usb_xdci_connect_params *connect_params); + +/** + * ipa_usb_xdci_disconnect - Peripheral should call this function to stop + * IN & OUT xDCI channels + * For DPL, only stops IN channel. + * + * @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect() + * for OUT channel + * @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect() + * for IN channel + * @teth_prot: tethering protocol + * + * Note: Should not be called from atomic context + * + * @Return 0 on success, negative on failure + */ +int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot); + +/** + * ipa_usb_deinit_teth_prot - Peripheral should call this function to deinit + * RNDIS/ECM/MBIM/RMNET + * + * @teth_prot: tethering protocol + * + * @Return 0 on success, negative on failure + */ +int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot); + +/** + * ipa_usb_xdci_suspend - Peripheral should call this function to suspend + * IN & OUT or DPL xDCI channels + * + * @ul_clnt_hdl: client handle previously obtained from + * ipa_usb_xdci_connect() for OUT channel + * @dl_clnt_hdl: client handle previously obtained from + * ipa_usb_xdci_connect() for IN channel + * @teth_prot: tethering protocol + * @with_remote_wakeup: Does host support remote wakeup? + * + * Note: Should not be called from atomic context + * Note: for DPL, the ul will be ignored as irrelevant + * + * @Return 0 on success, negative on failure + */ +int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot, + bool with_remote_wakeup); + +/** + * ipa_usb_xdci_resume - Peripheral should call this function to resume + * IN & OUT or DPL xDCI channels + * + * @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect() + * for OUT channel + * @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect() + * for IN channel + * @teth_prot: tethering protocol + * + * Note: Should not be called from atomic context + * Note: for DPL, the ul will be ignored as irrelevant + * + * @Return 0 on success, negative on failure + */ +int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot); + +#else /* CONFIG_IPA3 */ + +static inline int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot, + struct ipa_usb_teth_params *teth_params, + int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, + void *), + void *user_data) +{ + return -EPERM; +} + +static inline int ipa_usb_xdci_connect( + struct ipa_usb_xdci_chan_params *ul_chan_params, + struct ipa_usb_xdci_chan_params *dl_chan_params, + struct ipa_req_chan_out_params *ul_out_params, + struct ipa_req_chan_out_params *dl_out_params, + struct ipa_usb_xdci_connect_params *connect_params) +{ + return -EPERM; +} + +static inline int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + return -EPERM; +} + +static inline int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot) +{ + return -EPERM; +} + +static inline int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot, + bool with_remote_wakeup) +{ + return -EPERM; +} + +static inline int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + return -EPERM; +} + + +#endif /* CONFIG_IPA3 */ + +#endif /* _IPA_USB_H_ */ diff --git a/include/linux/ipa_wdi3.h b/include/linux/ipa_wdi3.h new file mode 100644 index 000000000000..2844ab6103fd --- /dev/null +++ b/include/linux/ipa_wdi3.h @@ -0,0 +1,407 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_WDI3_H_ +#define _IPA_WDI3_H_ + +#include + +#define IPA_HW_WDI3_TCL_DATA_CMD_ER_DESC_SIZE 32 +#define IPA_HW_WDI3_IPA2FW_ER_DESC_SIZE 8 + +#define IPA_HW_WDI3_MAX_ER_DESC_SIZE \ + (((IPA_HW_WDI3_TCL_DATA_CMD_ER_DESC_SIZE) > \ + (IPA_HW_WDI3_IPA2FW_ER_DESC_SIZE)) ? \ + (IPA_HW_WDI3_TCL_DATA_CMD_ER_DESC_SIZE) : \ + (IPA_HW_WDI3_IPA2FW_ER_DESC_SIZE)) + +#define IPA_WDI_MAX_SUPPORTED_SYS_PIPE 3 + +enum ipa_wdi_version { + IPA_WDI_1, + IPA_WDI_2, + IPA_WDI_3 +}; + +/** + * struct ipa_wdi_init_in_params - wdi init input parameters + * + * @wdi_version: wdi version + * @notify: uc ready callback + * @priv: uc ready callback cookie + */ +struct ipa_wdi_init_in_params { + enum ipa_wdi_version wdi_version; + ipa_uc_ready_cb notify; + void *priv; +#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN + ipa_wdi_meter_notifier_cb wdi_notify; +#endif +}; + +/** + * struct ipa_wdi_init_out_params - wdi init output parameters + * + * @is_uC_ready: is uC ready. No API should be called until uC + is ready. + * @is_smmu_enable: is smmu enabled + */ +struct ipa_wdi_init_out_params { + bool is_uC_ready; + bool is_smmu_enabled; +}; + +/** + * struct ipa_wdi_hdr_info - Header to install on IPA HW + * + * @hdr: header to install on IPA HW + * @hdr_len: length of header + * @dst_mac_addr_offset: destination mac address offset + * @hdr_type: layer two header type + */ +struct ipa_wdi_hdr_info { + u8 *hdr; + u8 hdr_len; + u8 dst_mac_addr_offset; + enum ipa_hdr_l2_type hdr_type; +}; + +/** + * struct ipa_wdi_reg_intf_in_params - parameters for uC offload + * interface registration + * + * @netdev_name: network interface name + * @hdr_info: header information + * @is_meta_data_valid: if meta data is valid + * @meta_data: meta data if any + * @meta_data_mask: meta data mask + */ +struct ipa_wdi_reg_intf_in_params { + const char *netdev_name; + struct ipa_wdi_hdr_info hdr_info[IPA_IP_MAX]; + enum ipa_client_type alt_dst_pipe; + u8 is_meta_data_valid; + u32 meta_data; + u32 meta_data_mask; +}; + +/** + * struct ipa_wdi_pipe_setup_info - WDI TX/Rx configuration + * @ipa_ep_cfg: ipa endpoint configuration + * @client: type of "client" + * @transfer_ring_base_pa: physical address of the base of the transfer ring + * @transfer_ring_size: size of the transfer ring + * @transfer_ring_doorbell_pa: physical address of the doorbell that + IPA uC will update the tailpointer of the transfer ring + * @event_ring_base_pa: physical address of the base of the event ring + * @event_ring_size: event ring size + * @event_ring_doorbell_pa: physical address of the doorbell that IPA uC + will update the headpointer of the event ring + * @num_pkt_buffers: Number of pkt buffers allocated. The size of the event + ring and the transfer ring has to be atleast ( num_pkt_buffers + 1) + * @pkt_offset: packet offset (wdi header length) + * @desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE]: Holds a cached + template of the desc format + */ +struct ipa_wdi_pipe_setup_info { + struct ipa_ep_cfg ipa_ep_cfg; + enum ipa_client_type client; + phys_addr_t transfer_ring_base_pa; + u32 transfer_ring_size; + phys_addr_t transfer_ring_doorbell_pa; + + phys_addr_t event_ring_base_pa; + u32 event_ring_size; + phys_addr_t event_ring_doorbell_pa; + u16 num_pkt_buffers; + + u16 pkt_offset; + + u32 desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE]; +}; + +/** + * struct ipa_wdi_pipe_setup_info_smmu - WDI TX/Rx configuration + * @ipa_ep_cfg: ipa endpoint configuration + * @client: type of "client" + * @transfer_ring_base_pa: physical address of the base of the transfer ring + * @transfer_ring_size: size of the transfer ring + * @transfer_ring_doorbell_pa: physical address of the doorbell that + IPA uC will update the tailpointer of the transfer ring + * @event_ring_base_pa: physical address of the base of the event ring + * @event_ring_size: event ring size + * @event_ring_doorbell_pa: physical address of the doorbell that IPA uC + will update the headpointer of the event ring + * @num_pkt_buffers: Number of pkt buffers allocated. The size of the event + ring and the transfer ring has to be atleast ( num_pkt_buffers + 1) + * @pkt_offset: packet offset (wdi header length) + * @desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE]: Holds a cached + template of the desc format + */ +struct ipa_wdi_pipe_setup_info_smmu { + struct ipa_ep_cfg ipa_ep_cfg; + enum ipa_client_type client; + struct sg_table transfer_ring_base; + u32 transfer_ring_size; + phys_addr_t transfer_ring_doorbell_pa; + + struct sg_table event_ring_base; + u32 event_ring_size; + phys_addr_t event_ring_doorbell_pa; + u16 num_pkt_buffers; + + u16 pkt_offset; + + u32 desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE]; +}; + +/** + * struct ipa_wdi_conn_in_params - information provided by + * uC offload client + * @notify: client callback function + * @priv: client cookie + * @is_smmu_enabled: if smmu is enabled + * @num_sys_pipe_needed: number of sys pipe needed + * @sys_in: parameters to setup sys pipe in mcc mode + * @tx: parameters to connect TX pipe(from IPA to WLAN) + * @tx_smmu: smmu parameters to connect TX pipe(from IPA to WLAN) + * @rx: parameters to connect RX pipe(from WLAN to IPA) + * @rx_smmu: smmu parameters to connect RX pipe(from WLAN to IPA) + */ +struct ipa_wdi_conn_in_params { + ipa_notify_cb notify; + void *priv; + bool is_smmu_enabled; + u8 num_sys_pipe_needed; + struct ipa_sys_connect_params sys_in[IPA_WDI_MAX_SUPPORTED_SYS_PIPE]; + union { + struct ipa_wdi_pipe_setup_info tx; + struct ipa_wdi_pipe_setup_info_smmu tx_smmu; + } u_tx; + union { + struct ipa_wdi_pipe_setup_info rx; + struct ipa_wdi_pipe_setup_info_smmu rx_smmu; + } u_rx; +}; + +/** + * struct ipa_wdi_conn_out_params - information provided + * to WLAN driver + * @tx_uc_db_pa: physical address of IPA uC doorbell for TX + * @rx_uc_db_pa: physical address of IPA uC doorbell for RX + */ +struct ipa_wdi_conn_out_params { + phys_addr_t tx_uc_db_pa; + phys_addr_t rx_uc_db_pa; +}; + +/** + * struct ipa_wdi_perf_profile - To set BandWidth profile + * + * @client: type of client + * @max_supported_bw_mbps: maximum bandwidth needed (in Mbps) + */ +struct ipa_wdi_perf_profile { + enum ipa_client_type client; + u32 max_supported_bw_mbps; +}; + +#if defined CONFIG_IPA || defined CONFIG_IPA3 + +/** + * ipa_wdi_init - Client should call this function to + * init WDI IPA offload data path + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +int ipa_wdi_init(struct ipa_wdi_init_in_params *in, + struct ipa_wdi_init_out_params *out); + +/** + * ipa_wdi_cleanup - Client should call this function to + * clean up WDI IPA offload data path + * + * @Return 0 on success, negative on failure + */ +int ipa_wdi_cleanup(void); + +/** + * ipa_wdi_reg_intf - Client should call this function to + * register interface + * + * Note: Should not be called from atomic context + * + * @Return 0 on success, negative on failure + */ +int ipa_wdi_reg_intf( + struct ipa_wdi_reg_intf_in_params *in); + +/** + * ipa_wdi_dereg_intf - Client Driver should call this + * function to deregister before unload and after disconnect + * + * @Return 0 on success, negative on failure + */ +int ipa_wdi_dereg_intf(const char *netdev_name); + +/** + * ipa_wdi_conn_pipes - Client should call this + * function to connect pipes + * + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Note: Should not be called from atomic context + * + * @Return 0 on success, negative on failure + */ +int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in, + struct ipa_wdi_conn_out_params *out); + +/** + * ipa_wdi_disconn_pipes() - Client should call this + * function to disconnect pipes + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +int ipa_wdi_disconn_pipes(void); + +/** + * ipa_wdi_enable_pipes() - Client should call this + * function to enable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +int ipa_wdi_enable_pipes(void); + +/** + * ipa_wdi_disable_pipes() - Client should call this + * function to disable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +int ipa_wdi_disable_pipes(void); + +/** + * ipa_wdi_set_perf_profile() - Client should call this function to + * set IPA clock bandwidth based on data rates + * + * @profile: [in] BandWidth profile to use + * + * Returns: 0 on success, negative on failure + */ +int ipa_wdi_set_perf_profile(struct ipa_wdi_perf_profile *profile); + +/** + * ipa_wdi_create_smmu_mapping() - Create smmu mapping + * + * @num_buffers: number of buffers + * + * @info: wdi buffer info + */ +int ipa_wdi_create_smmu_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info); + +/** + * ipa_wdi_release_smmu_mapping() - Release smmu mapping + * + * @num_buffers: number of buffers + * + * @info: wdi buffer info + */ +int ipa_wdi_release_smmu_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info); + +/** + * ipa_wdi_get_stats() - Query WDI statistics + * @stats: [inout] stats blob from client populated by driver + * + * Returns: 0 on success, negative on failure + * + * @note Cannot be called from atomic context + * + */ +int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats); + +#else /* (CONFIG_IPA || CONFIG_IPA3) */ + +static inline int ipa_wdi_init(struct ipa_wdi_init_in_params *in, + struct ipa_wdi_init_out_params *out) +{ + return -EPERM; +} + +static inline int ipa_wdi_cleanup(void) +{ + return -EPERM; +} + +static inline int ipa_wdi_reg_intf( + struct ipa_wdi_reg_intf_in_params *in) +{ + return -EPERM; +} + +static inline int ipa_wdi_dereg_intf(const char *netdev_name) +{ + return -EPERM; +} + +static inline int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in, + struct ipa_wdi_conn_out_params *out) +{ + return -EPERM; +} + +static inline int ipa_wdi_disconn_pipes(void) +{ + return -EPERM; +} + +static inline int ipa_wdi_enable_pipes(void) +{ + return -EPERM; +} + +static inline int ipa_wdi_disable_pipes(void) +{ + return -EPERM; +} + +static inline int ipa_wdi_set_perf_profile( + struct ipa_wdi_perf_profile *profile) +{ + return -EPERM; +} + +static inline int ipa_wdi_create_smmu_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info) +{ + return -EPERM; +} + +static inline int ipa_wdi_release_smmu_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info) +{ + return -EPERM; +} + +static inline int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats) +{ + return -EPERM; +} + +#endif /* CONFIG_IPA3 */ + +#endif /* _IPA_WDI3_H_ */ diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h new file mode 100644 index 000000000000..62732f64f1d7 --- /dev/null +++ b/include/linux/msm_gsi.h @@ -0,0 +1,1686 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef MSM_GSI_H +#define MSM_GSI_H +#include +#include + +enum gsi_ver { + GSI_VER_ERR = 0, + GSI_VER_1_0 = 1, + GSI_VER_1_2 = 2, + GSI_VER_1_3 = 3, + GSI_VER_2_0 = 4, + GSI_VER_2_2 = 5, + GSI_VER_2_5 = 6, + GSI_VER_MAX, +}; + +enum gsi_status { + GSI_STATUS_SUCCESS = 0, + GSI_STATUS_ERROR = 1, + GSI_STATUS_RING_INSUFFICIENT_SPACE = 2, + GSI_STATUS_RING_EMPTY = 3, + GSI_STATUS_RES_ALLOC_FAILURE = 4, + GSI_STATUS_BAD_STATE = 5, + GSI_STATUS_INVALID_PARAMS = 6, + GSI_STATUS_UNSUPPORTED_OP = 7, + GSI_STATUS_NODEV = 8, + GSI_STATUS_POLL_EMPTY = 9, + GSI_STATUS_EVT_RING_INCOMPATIBLE = 10, + GSI_STATUS_TIMED_OUT = 11, + GSI_STATUS_AGAIN = 12, + GSI_STATUS_PENDING_IRQ = 13, +}; + +enum gsi_per_evt { + GSI_PER_EVT_GLOB_ERROR, + GSI_PER_EVT_GLOB_GP1, + GSI_PER_EVT_GLOB_GP2, + GSI_PER_EVT_GLOB_GP3, + GSI_PER_EVT_GENERAL_BREAK_POINT, + GSI_PER_EVT_GENERAL_BUS_ERROR, + GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW, + GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW, +}; + +/** + * gsi_per_notify - Peripheral callback info + * + * @user_data: cookie supplied in gsi_register_device + * @evt_id: type of notification + * @err_desc: error related information + * + */ +struct gsi_per_notify { + void *user_data; + enum gsi_per_evt evt_id; + union { + uint16_t err_desc; + } data; +}; + +enum gsi_intr_type { + GSI_INTR_MSI = 0x0, + GSI_INTR_IRQ = 0x1 +}; + + +/** + * gsi_per_props - Peripheral related properties + * + * @gsi: GSI core version + * @ee: EE where this driver and peripheral driver runs + * @intr: control interrupt type + * @intvec: write data for MSI write + * @msi_addr: MSI address + * @irq: IRQ number + * @phys_addr: physical address of GSI block + * @size: register size of GSI block + * @emulator_intcntrlr_addr: the location of emulator's interrupt control block + * @emulator_intcntrlr_size: the sise of emulator_intcntrlr_addr + * @emulator_intcntrlr_client_isr: client's isr. Called by the emulator's isr + * @mhi_er_id_limits_valid: valid flag for mhi_er_id_limits + * @mhi_er_id_limits: MHI event ring start and end ids + * @notify_cb: general notification callback + * @req_clk_cb: callback to request peripheral clock + * granted should be set to true if request is completed + * synchronously, false otherwise (peripheral needs + * to call gsi_complete_clk_grant later when request is + * completed) + * if this callback is not provided, then GSI will assume + * peripheral is clocked at all times + * @rel_clk_cb: callback to release peripheral clock + * @user_data: cookie used for notifications + * + * All the callbacks are in interrupt context + * + */ +struct gsi_per_props { + enum gsi_ver ver; + unsigned int ee; + enum gsi_intr_type intr; + uint32_t intvec; + uint64_t msi_addr; + unsigned int irq; + phys_addr_t phys_addr; + unsigned long size; + phys_addr_t emulator_intcntrlr_addr; + unsigned long emulator_intcntrlr_size; + irq_handler_t emulator_intcntrlr_client_isr; + bool mhi_er_id_limits_valid; + uint32_t mhi_er_id_limits[2]; + void (*notify_cb)(struct gsi_per_notify *notify); + void (*req_clk_cb)(void *user_data, bool *granted); + int (*rel_clk_cb)(void *user_data); + void *user_data; +}; + +enum gsi_evt_err { + GSI_EVT_OUT_OF_BUFFERS_ERR = 0x0, + GSI_EVT_OUT_OF_RESOURCES_ERR = 0x1, + GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR = 0x2, + GSI_EVT_EVT_RING_EMPTY_ERR = 0x3, +}; + +/** + * gsi_evt_err_notify - event ring error callback info + * + * @user_data: cookie supplied in gsi_alloc_evt_ring + * @evt_id: type of error + * @err_desc: more info about the error + * + */ +struct gsi_evt_err_notify { + void *user_data; + enum gsi_evt_err evt_id; + uint16_t err_desc; +}; + +enum gsi_evt_chtype { + GSI_EVT_CHTYPE_MHI_EV = 0x0, + GSI_EVT_CHTYPE_XHCI_EV = 0x1, + GSI_EVT_CHTYPE_GPI_EV = 0x2, + GSI_EVT_CHTYPE_XDCI_EV = 0x3, + GSI_EVT_CHTYPE_WDI2_EV = 0x4, + GSI_EVT_CHTYPE_WDI3_EV = 0x5, + GSI_EVT_CHTYPE_GCI_EV = 0x6, + GSI_EVT_CHTYPE_MHIP_EV = 0x7, + GSI_EVT_CHTYPE_AQC_EV = 0x8, + GSI_EVT_CHTYPE_11AD_EV = 0x9, +}; + +enum gsi_evt_ring_elem_size { + GSI_EVT_RING_RE_SIZE_4B = 4, + GSI_EVT_RING_RE_SIZE_8B = 8, + GSI_EVT_RING_RE_SIZE_16B = 16, +}; + +/** + * gsi_evt_ring_props - Event ring related properties + * + * @intf: interface type (of the associated channel) + * @intr: interrupt type + * @re_size: size of event ring element + * @ring_len: length of ring in bytes (must be integral multiple of + * re_size) + * @ring_base_addr: physical base address of ring. Address must be aligned to + * ring_len rounded to power of two + * @ring_base_vaddr: virtual base address of ring (set to NULL when not + * applicable) + * @int_modt: cycles base interrupt moderation (32KHz clock) + * @int_modc: interrupt moderation packet counter + * @intvec: write data for MSI write + * @msi_addr: MSI address + * @rp_update_addr: physical address to which event read pointer should be + * written on every event generation. must be set to 0 when + * no update is desdired + * @exclusive: if true, only one GSI channel can be associated with this + * event ring. if false, the event ring can be shared among + * multiple GSI channels but in that case no polling + * (GSI_CHAN_MODE_POLL) is supported on any of those channels + * @err_cb: error notification callback + * @user_data: cookie used for error notifications + * @evchid_valid: is evchid valid? + * @evchid: the event ID that is being specifically requested (this is + * relevant for MHI where doorbell routing requires ERs to be + * physically contiguous) + */ +struct gsi_evt_ring_props { + enum gsi_evt_chtype intf; + enum gsi_intr_type intr; + enum gsi_evt_ring_elem_size re_size; + uint16_t ring_len; + uint64_t ring_base_addr; + void *ring_base_vaddr; + uint16_t int_modt; + uint8_t int_modc; + uint32_t intvec; + uint64_t msi_addr; + uint64_t rp_update_addr; + bool exclusive; + void (*err_cb)(struct gsi_evt_err_notify *notify); + void *user_data; + bool evchid_valid; + uint8_t evchid; +}; + +enum gsi_chan_mode { + GSI_CHAN_MODE_CALLBACK = 0x0, + GSI_CHAN_MODE_POLL = 0x1, +}; + +enum gsi_chan_prot { + GSI_CHAN_PROT_MHI = 0x0, + GSI_CHAN_PROT_XHCI = 0x1, + GSI_CHAN_PROT_GPI = 0x2, + GSI_CHAN_PROT_XDCI = 0x3, + GSI_CHAN_PROT_WDI2 = 0x4, + GSI_CHAN_PROT_WDI3 = 0x5, + GSI_CHAN_PROT_GCI = 0x6, + GSI_CHAN_PROT_MHIP = 0x7, + GSI_CHAN_PROT_AQC = 0x8, + GSI_CHAN_PROT_11AD = 0x9, +}; + +enum gsi_chan_dir { + GSI_CHAN_DIR_FROM_GSI = 0x0, + GSI_CHAN_DIR_TO_GSI = 0x1 +}; + +enum gsi_max_prefetch { + GSI_ONE_PREFETCH_SEG = 0x0, + GSI_TWO_PREFETCH_SEG = 0x1 +}; + +/** + * @GSI_USE_PREFETCH_BUFS: Channel will use normal prefetch buffers if possible + * @GSI_ESCAPE_BUF_ONLY: Channel will always use escape buffers only + * @GSI_SMART_PRE_FETCH: Channel will work in smart prefetch mode. + * relevant starting GSI 2.5 + * @GSI_FREE_PRE_FETCH: Channel will work in free prefetch mode. + * relevant starting GSI 2.5 + */ +enum gsi_prefetch_mode { + GSI_USE_PREFETCH_BUFS = 0x0, + GSI_ESCAPE_BUF_ONLY = 0x1, + GSI_SMART_PRE_FETCH = 0x2, + GSI_FREE_PRE_FETCH = 0x3, +}; + +enum gsi_chan_evt { + GSI_CHAN_EVT_INVALID = 0x0, + GSI_CHAN_EVT_SUCCESS = 0x1, + GSI_CHAN_EVT_EOT = 0x2, + GSI_CHAN_EVT_OVERFLOW = 0x3, + GSI_CHAN_EVT_EOB = 0x4, + GSI_CHAN_EVT_OOB = 0x5, + GSI_CHAN_EVT_DB_MODE = 0x6, + GSI_CHAN_EVT_UNDEFINED = 0x10, + GSI_CHAN_EVT_RE_ERROR = 0x11, +}; + +/** + * gsi_chan_xfer_notify - Channel callback info + * + * @chan_user_data: cookie supplied in gsi_alloc_channel + * @xfer_user_data: cookie of the gsi_xfer_elem that caused the + * event to be generated + * @evt_id: type of event triggered by the associated TRE + * (corresponding to xfer_user_data) + * @bytes_xfered: number of bytes transferred by the associated TRE + * (corresponding to xfer_user_data) + * + */ +struct gsi_chan_xfer_notify { + void *chan_user_data; + void *xfer_user_data; + enum gsi_chan_evt evt_id; + uint16_t bytes_xfered; +}; + +enum gsi_chan_err { + GSI_CHAN_INVALID_TRE_ERR = 0x0, + GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR = 0x1, + GSI_CHAN_OUT_OF_BUFFERS_ERR = 0x2, + GSI_CHAN_OUT_OF_RESOURCES_ERR = 0x3, + GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR = 0x4, + GSI_CHAN_HWO_1_ERR = 0x5 +}; + +/** + * gsi_chan_err_notify - Channel general callback info + * + * @chan_user_data: cookie supplied in gsi_alloc_channel + * @evt_id: type of error + * @err_desc: more info about the error + * + */ +struct gsi_chan_err_notify { + void *chan_user_data; + enum gsi_chan_err evt_id; + uint16_t err_desc; +}; + +enum gsi_chan_ring_elem_size { + GSI_CHAN_RE_SIZE_4B = 4, + GSI_CHAN_RE_SIZE_8B = 8, + GSI_CHAN_RE_SIZE_16B = 16, + GSI_CHAN_RE_SIZE_32B = 32, +}; + +enum gsi_chan_use_db_eng { + GSI_CHAN_DIRECT_MODE = 0x0, + GSI_CHAN_DB_MODE = 0x1, +}; + +/** + * gsi_chan_props - Channel related properties + * + * @prot: interface type + * @dir: channel direction + * @ch_id: virtual channel ID + * @evt_ring_hdl: handle of associated event ring. set to ~0 if no + * event ring associated + * @re_size: size of channel ring element + * @ring_len: length of ring in bytes (must be integral multiple of + * re_size) + * @max_re_expected: maximal number of ring elements expected to be queued. + * used for data path statistics gathering. if 0 provided + * ring_len / re_size will be used. + * @ring_base_addr: physical base address of ring. Address must be aligned to + * ring_len rounded to power of two + * @ring_base_vaddr: virtual base address of ring (set to NULL when not + * applicable) + * @use_db_eng: 0 => direct mode (doorbells are written directly to RE + * engine) + * 1 => DB mode (doorbells are written to DB engine) + * @max_prefetch: limit number of pre-fetch segments for channel + * @low_weight: low channel weight (priority of channel for RE engine + * round robin algorithm); must be >= 1 + * @empty_lvl_threshold: + * The thershold number of free entries available in the + * receiving fifos of GSI-peripheral. If Smart PF mode + * is used, REE will fetch/send new TRE to peripheral only + * if peripheral's empty_level_count is higher than + * EMPTY_LVL_THRSHOLD defined for this channel + * @xfer_cb: transfer notification callback, this callback happens + * on event boundaries + * + * e.g. 1 + * + * out TD with 3 REs + * + * RE1: EOT=0, EOB=0, CHAIN=1; + * RE2: EOT=0, EOB=0, CHAIN=1; + * RE3: EOT=1, EOB=0, CHAIN=0; + * + * the callback will be triggered for RE3 using the + * xfer_user_data of that RE + * + * e.g. 2 + * + * in REs + * + * RE1: EOT=1, EOB=0, CHAIN=0; + * RE2: EOT=1, EOB=0, CHAIN=0; + * RE3: EOT=1, EOB=0, CHAIN=0; + * + * received packet consumes all of RE1, RE2 and part of RE3 + * for EOT condition. there will be three callbacks in below + * order + * + * callback for RE1 using GSI_CHAN_EVT_OVERFLOW + * callback for RE2 using GSI_CHAN_EVT_OVERFLOW + * callback for RE3 using GSI_CHAN_EVT_EOT + * + * @err_cb: error notification callback + * @chan_user_data: cookie used for notifications + * + * All the callbacks are in interrupt context + * + */ +struct gsi_chan_props { + enum gsi_chan_prot prot; + enum gsi_chan_dir dir; + uint8_t ch_id; + unsigned long evt_ring_hdl; + enum gsi_chan_ring_elem_size re_size; + uint16_t ring_len; + uint16_t max_re_expected; + uint64_t ring_base_addr; + void *ring_base_vaddr; + enum gsi_chan_use_db_eng use_db_eng; + enum gsi_max_prefetch max_prefetch; + uint8_t low_weight; + enum gsi_prefetch_mode prefetch_mode; + uint8_t empty_lvl_threshold; + void (*xfer_cb)(struct gsi_chan_xfer_notify *notify); + void (*err_cb)(struct gsi_chan_err_notify *notify); + void *chan_user_data; +}; + +enum gsi_xfer_flag { + GSI_XFER_FLAG_CHAIN = 0x1, + GSI_XFER_FLAG_EOB = 0x100, + GSI_XFER_FLAG_EOT = 0x200, + GSI_XFER_FLAG_BEI = 0x400 +}; + +enum gsi_xfer_elem_type { + GSI_XFER_ELEM_DATA, + GSI_XFER_ELEM_IMME_CMD, + GSI_XFER_ELEM_NOP, +}; + +/** + * gsi_xfer_elem - Metadata about a single transfer + * + * @addr: physical address of buffer + * @len: size of buffer for GSI_XFER_ELEM_DATA: + * for outbound transfers this is the number of bytes to + * transfer. + * for inbound transfers, this is the maximum number of + * bytes the host expects from device in this transfer + * + * immediate command opcode for GSI_XFER_ELEM_IMME_CMD + * @flags: transfer flags, OR of all the applicable flags + * + * GSI_XFER_FLAG_BEI: Block event interrupt + * 1: Event generated by this ring element must not assert + * an interrupt to the host + * 0: Event generated by this ring element must assert an + * interrupt to the host + * + * GSI_XFER_FLAG_EOT: Interrupt on end of transfer + * 1: If an EOT condition is encountered when processing + * this ring element, an event is generated by the device + * with its completion code set to EOT. + * 0: If an EOT condition is encountered for this ring + * element, a completion event is not be generated by the + * device, unless IEOB is 1 + * + * GSI_XFER_FLAG_EOB: Interrupt on end of block + * 1: Device notifies host after processing this ring element + * by sending a completion event + * 0: Completion event is not required after processing this + * ring element + * + * GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring + * elements in a TD + * + * @type: transfer type + * + * GSI_XFER_ELEM_DATA: for all data transfers + * GSI_XFER_ELEM_IMME_CMD: for IPA immediate commands + * GSI_XFER_ELEM_NOP: for event generation only + * + * @xfer_user_data: cookie used in xfer_cb + * + */ +struct gsi_xfer_elem { + uint64_t addr; + uint16_t len; + uint16_t flags; + enum gsi_xfer_elem_type type; + void *xfer_user_data; +}; + +/** + * gsi_gpi_channel_scratch - GPI protocol SW config area of + * channel scratch + * + * @dl_nlo_channel: Whether this is DL NLO Channel or not? Relevant for + * GSI 2.5 and above where DL NLO introduced. + * @max_outstanding_tre: Used for the prefetch management sequence by the + * sequencer. Defines the maximum number of allowed + * outstanding TREs in IPA/GSI (in Bytes). RE engine + * prefetch will be limited by this configuration. It + * is suggested to configure this value to IPA_IF + * channel TLV queue size times element size. To disable + * the feature in doorbell mode (DB Mode=1). Maximum + * outstanding TREs should be set to 64KB + * (or any value larger or equal to ring length . RLEN) + * The field is irrelevant starting GSI 2.5 where smart + * prefetch implemented by the H/W. + * @outstanding_threshold: Used for the prefetch management sequence by the + * sequencer. Defines the threshold (in Bytes) as to when + * to update the channel doorbell. Should be smaller than + * Maximum outstanding TREs. value. It is suggested to + * configure this value to 2 * element size. + * The field is irrelevant starting GSI 2.5 where smart + * prefetch implemented by the H/W. + */ +struct __packed gsi_gpi_channel_scratch { + uint64_t dl_nlo_channel:1; /* Relevant starting GSI 2.5 */ + uint64_t resvd1:63; + uint32_t resvd2:16; + uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */ + uint32_t resvd3:16; + uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */ +}; + +/** + * gsi_mhi_channel_scratch - MHI protocol SW config area of + * channel scratch + * + * @mhi_host_wp_addr: Valid only when UL/DL Sync En is asserted. Defines + * address in host from which channel write pointer + * should be read in polling mode + * @assert_bit40: 1: bit #41 in address should be asserted upon + * IPA_IF.ProcessDescriptor routine (for MHI over PCIe + * transfers) + * 0: bit #41 in address should be deasserted upon + * IPA_IF.ProcessDescriptor routine (for non-MHI over + * PCIe transfers) + * @polling_configuration: Uplink channels: Defines timer to poll on MHI + * context. Range: 1 to 31 milliseconds. + * Downlink channel: Defines transfer ring buffer + * availability threshold to poll on MHI context in + * multiple of 8. Range: 0 to 31, meaning 0 to 258 ring + * elements. E.g., value of 2 indicates 16 ring elements. + * Valid only when Burst Mode Enabled is set to 1 + * @burst_mode_enabled: 0: Burst mode is disabled for this channel + * 1: Burst mode is enabled for this channel + * @polling_mode: 0: the channel is not in polling mode, meaning the + * host should ring DBs. + * 1: the channel is in polling mode, meaning the host + * @oob_mod_threshold: Defines OOB moderation threshold. Units are in 8 + * ring elements. + * should not ring DBs until notified of DB mode/OOB mode + * @max_outstanding_tre: Used for the prefetch management sequence by the + * sequencer. Defines the maximum number of allowed + * outstanding TREs in IPA/GSI (in Bytes). RE engine + * prefetch will be limited by this configuration. It + * is suggested to configure this value to IPA_IF + * channel TLV queue size times element size. + * To disable the feature in doorbell mode (DB Mode=1). + * Maximum outstanding TREs should be set to 64KB + * (or any value larger or equal to ring length . RLEN) + * The field is irrelevant starting GSI 2.5 where smart + * prefetch implemented by the H/W. + * @outstanding_threshold: Used for the prefetch management sequence by the + * sequencer. Defines the threshold (in Bytes) as to when + * to update the channel doorbell. Should be smaller than + * Maximum outstanding TREs. value. It is suggested to + * configure this value to min(TLV_FIFO_SIZE/2,8) * + * element size. + * The field is irrelevant starting GSI 2.5 where smart + * prefetch implemented by the H/W. + */ +struct __packed gsi_mhi_channel_scratch { + uint64_t mhi_host_wp_addr; + uint32_t rsvd1:1; + uint32_t assert_bit40:1; + uint32_t polling_configuration:5; + uint32_t burst_mode_enabled:1; + uint32_t polling_mode:1; + uint32_t oob_mod_threshold:5; + uint32_t resvd2:2; + uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */ + uint32_t resvd3:16; + uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */ +}; + +/** + * gsi_xdci_channel_scratch - xDCI protocol SW config area of + * channel scratch + * + * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregationi + * configuration). Must be aligned to Max USB Packet Size + * @xferrscidx: Transfer Resource Index (XferRscIdx). The hardware-assigned + * transfer resource index for the transfer, which was + * returned in response to the Start Transfer command. + * This field is used for "Update Transfer" command + * @last_trb_addr: Address (LSB - based on alignment restrictions) of + * last TRB in queue. Used to identify rollover case + * @depcmd_low_addr: Used to generate "Update Transfer" command + * @max_outstanding_tre: Used for the prefetch management sequence by the + * sequencer. Defines the maximum number of allowed + * outstanding TREs in IPA/GSI (in Bytes). RE engine + * prefetch will be limited by this configuration. It + * is suggested to configure this value to IPA_IF + * channel TLV queue size times element size. + * To disable the feature in doorbell mode (DB Mode=1) + * Maximum outstanding TREs should be set to 64KB + * (or any value larger or equal to ring length . RLEN) + * The field is irrelevant starting GSI 2.5 where smart + * prefetch implemented by the H/W. + * @depcmd_hi_addr: Used to generate "Update Transfer" command + * @outstanding_threshold: Used for the prefetch management sequence by the + * sequencer. Defines the threshold (in Bytes) as to when + * to update the channel doorbell. Should be smaller than + * Maximum outstanding TREs. value. It is suggested to + * configure this value to 2 * element size. for MBIM the + * suggested configuration is the element size. + * The field is irrelevant starting GSI 2.5 where smart + * prefetch implemented by the H/W. + */ +struct __packed gsi_xdci_channel_scratch { + uint32_t last_trb_addr:16; + uint32_t resvd1:4; + uint32_t xferrscidx:7; + uint32_t const_buffer_size:5; + uint32_t depcmd_low_addr; + uint32_t depcmd_hi_addr:8; + uint32_t resvd2:8; + uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */ + uint32_t resvd3:16; + uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */ +}; + +/** + * gsi_wdi_channel_scratch - WDI protocol SW config area of + * channel scratch + * + * @wifi_rx_ri_addr_low: Low 32 bits of Transfer ring Read Index address. + * @wifi_rx_ri_addr_high: High 32 bits of Transfer ring Read Index address. + * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index + * N is the number of packets that IPA will + * process before Wifi transfer ring Ri will + * be updated. + * @update_ri_moderation_counter: This field is incremented with each TRE + * processed in MCS. + * @wdi_rx_tre_proc_in_progress: It is set if IPA IF returned BECAME FULL + * status after MCS submitted an inline immediate + * command to update the metadata. It allows MCS + * to know that it has to retry sending the TRE + * to IPA. + * @wdi_rx_vdev_id: Rx only. Initialized to 0xFF by SW after allocating channel + * and before starting it. Both FW_DESC and VDEV_ID are part + * of a scratch word that is Read/Write for both MCS and SW. + * To avoid race conditions, SW should not update this field + * after starting the channel. + * @wdi_rx_fw_desc: Rx only. Initialized to 0xFF by SW after allocating channel + * and before starting it. After Start, this is a Read only + * field for SW. + * @endp_metadatareg_offset: Rx only, the offset of IPA_ENDP_INIT_HDR_METADATA + * of the corresponding endpoint in 4B words from IPA + * base address. Read only field for MCS. + * Write for SW. + * @qmap_id: Rx only, used for setting metadata register in IPA. Read only field + * for MCS. Write for SW. + * @wdi_rx_pkt_length: If WDI_RX_TRE_PROC_IN_PROGRESS is set, this field is + * valid and contains the packet length of the TRE that + * needs to be submitted to IPA. + * @resv1: reserved bits. + * @pkt_comp_count: It is incremented on each AOS received. When event ring + * Write index is updated, it is decremented by the same + * amount. + * @stop_in_progress_stm: If a Stop request is in progress, this will indicate + * the current stage of processing of the stop within MCS + * @resv2: reserved bits. + * wdi_rx_qmap_id_internal: Initialized to 0 by MCS when the channel is + * allocated. It is updated to the current value of SW + * QMAP ID that is being written by MCS to the IPA + * metadata register. + */ +struct __packed gsi_wdi_channel_scratch { + uint32_t wifi_rx_ri_addr_low; + uint32_t wifi_rx_ri_addr_high; + uint32_t update_ri_moderation_threshold:5; + uint32_t update_ri_moderation_counter:6; + uint32_t wdi_rx_tre_proc_in_progress:1; + uint32_t resv1:4; + uint32_t wdi_rx_vdev_id:8; + uint32_t wdi_rx_fw_desc:8; + uint32_t endp_metadatareg_offset:16; + uint32_t qmap_id:16; + uint32_t wdi_rx_pkt_length:16; + uint32_t resv2:2; + uint32_t pkt_comp_count:11; + uint32_t stop_in_progress_stm:3; + uint32_t resv3:16; + uint32_t wdi_rx_qmap_id_internal:16; +}; + +/** + * gsi_11ad_rx_channel_scratch - 11AD protocol SW config area of + * RX channel scratch + * + * @status_ring_hwtail_address_lsb: Low 32 bits of status ring hwtail address. + * @status_ring_hwtail_address_msb: High 32 bits of status ring hwtail address. + * @data_buffers_base_address_lsb: Low 32 bits of the data buffers address. + * @data_buffers_base_address_msb: High 32 bits of the data buffers address. + * @fixed_data_buffer_size: the fixed buffer size (> MTU). + * @resv1: reserved bits. + */ +struct __packed gsi_11ad_rx_channel_scratch { + uint32_t status_ring_hwtail_address_lsb; + uint32_t status_ring_hwtail_address_msb; + uint32_t data_buffers_base_address_lsb; + uint32_t data_buffers_base_address_msb:8; + uint32_t fixed_data_buffer_size:16; + uint32_t resv1:8; +}; + +/** + * gsi_11ad_tx_channel_scratch - 11AD protocol SW config area of + * TX channel scratch + * + * @status_ring_hwtail_address_lsb: Low 32 bits of status ring hwtail address. + * @status_ring_hwtail_address_msb: High 32 bits of status ring hwtail address. + * @update_status_hwtail_mod_threshold: The threshold in (32B) elements for + * updating descriptor ring 11ad HWTAIL pointer moderation. + * @resv1: reserved bits. + * @resv2: reserved bit. + * @fixed_data_buffer_size: the fixed buffer size (> MTU). + * @resv3: reserved bits. + */ +struct __packed gsi_11ad_tx_channel_scratch { + uint32_t status_ring_hwtail_address_lsb; + uint32_t status_ring_hwtail_address_msb; + uint32_t update_status_hwtail_mod_threshold:8; + uint32_t resv1:24; + uint32_t resv2:8; + uint32_t fixed_data_buffer_size:16; + uint32_t resv3:8; +}; + +/** + * gsi_channel_scratch - channel scratch SW config area + * + */ +union __packed gsi_channel_scratch { + struct __packed gsi_gpi_channel_scratch gpi; + struct __packed gsi_mhi_channel_scratch mhi; + struct __packed gsi_xdci_channel_scratch xdci; + struct __packed gsi_wdi_channel_scratch wdi; + struct __packed gsi_11ad_rx_channel_scratch rx_11ad; + struct __packed gsi_11ad_tx_channel_scratch tx_11ad; + struct __packed { + uint32_t word1; + uint32_t word2; + uint32_t word3; + uint32_t word4; + } data; +}; + +/** + * gsi_wdi_channel_scratch3 - WDI protocol SW config area of + * channel scratch3 + */ + +struct __packed gsi_wdi_channel_scratch3 { + uint32_t endp_metadatareg_offset:16; + uint32_t qmap_id:16; +}; + +/** + * gsi_wdi_channel_scratch3_reg - channel scratch3 SW config area + * + */ + +union __packed gsi_wdi_channel_scratch3_reg { + struct __packed gsi_wdi_channel_scratch3 wdi; + struct __packed { + uint32_t word1; + } data; +}; + +/** + * gsi_mhi_evt_scratch - MHI protocol SW config area of + * event scratch + */ +struct __packed gsi_mhi_evt_scratch { + uint32_t resvd1; + uint32_t resvd2; +}; + +/** + * gsi_xdci_evt_scratch - xDCI protocol SW config area of + * event scratch + * + */ +struct __packed gsi_xdci_evt_scratch { + uint32_t gevntcount_low_addr; + uint32_t gevntcount_hi_addr:8; + uint32_t resvd1:24; +}; + +/** + * gsi_wdi_evt_scratch - WDI protocol SW config area of + * event scratch + * + */ + +struct __packed gsi_wdi_evt_scratch { + uint32_t update_ri_moderation_config:8; + uint32_t resvd1:8; + uint32_t update_ri_mod_timer_running:1; + uint32_t evt_comp_count:14; + uint32_t resvd2:1; + uint32_t last_update_ri:16; + uint32_t resvd3:16; +}; + +/** + * gsi_11ad_evt_scratch - 11AD protocol SW config area of + * event scratch + * + */ +struct __packed gsi_11ad_evt_scratch { + uint32_t update_status_hwtail_mod_threshold : 8; + uint32_t resvd1:8; + uint32_t resvd2:16; + uint32_t resvd3; +}; + +/** + * gsi_evt_scratch - event scratch SW config area + * + */ +union __packed gsi_evt_scratch { + struct __packed gsi_mhi_evt_scratch mhi; + struct __packed gsi_xdci_evt_scratch xdci; + struct __packed gsi_wdi_evt_scratch wdi; + struct __packed gsi_11ad_evt_scratch ad11; + struct __packed { + uint32_t word1; + uint32_t word2; + } data; +}; + +/** + * gsi_device_scratch - EE scratch config parameters + * + * @mhi_base_chan_idx_valid: is mhi_base_chan_idx valid? + * @mhi_base_chan_idx: base index of IPA MHI channel indexes. + * IPA MHI channel index = GSI channel ID + + * MHI base channel index + * @max_usb_pkt_size_valid: is max_usb_pkt_size valid? + * @max_usb_pkt_size: max USB packet size in bytes (valid values are + * 64, 512 and 1024) + */ +struct gsi_device_scratch { + bool mhi_base_chan_idx_valid; + uint8_t mhi_base_chan_idx; + bool max_usb_pkt_size_valid; + uint16_t max_usb_pkt_size; +}; + +/** + * gsi_chan_info - information about channel occupancy + * + * @wp: channel write pointer (physical address) + * @rp: channel read pointer (physical address) + * @evt_valid: is evt* info valid? + * @evt_wp: event ring write pointer (physical address) + * @evt_rp: event ring read pointer (physical address) + */ +struct gsi_chan_info { + uint64_t wp; + uint64_t rp; + bool evt_valid; + uint64_t evt_wp; + uint64_t evt_rp; +}; + +#ifdef CONFIG_GSI +/** + * gsi_register_device - Peripheral should call this function to + * register itself with GSI before invoking any other APIs + * + * @props: Peripheral properties + * @dev_hdl: Handle populated by GSI, opaque to client + * + * @Return -GSI_STATUS_AGAIN if request should be re-tried later + * other error codes for failure + */ +int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl); + +/** + * gsi_complete_clk_grant - Peripheral should call this function to + * grant the clock resource requested by GSI previously that could not + * be granted synchronously. GSI will release the clock resource using + * the rel_clk_cb when appropriate + * + * @dev_hdl: Client handle previously obtained from + * gsi_register_device + * + * @Return gsi_status + */ +int gsi_complete_clk_grant(unsigned long dev_hdl); + +/** + * gsi_write_device_scratch - Peripheral should call this function to + * write to the EE scratch area + * + * @dev_hdl: Client handle previously obtained from + * gsi_register_device + * @val: Value to write + * + * @Return gsi_status + */ +int gsi_write_device_scratch(unsigned long dev_hdl, + struct gsi_device_scratch *val); + +/** + * gsi_deregister_device - Peripheral should call this function to + * de-register itself with GSI + * + * @dev_hdl: Client handle previously obtained from + * gsi_register_device + * @force: When set to true, cleanup is performed even if there + * are in use resources like channels, event rings, etc. + * this would be used after GSI reset to recover from some + * fatal error + * When set to false, there must not exist any allocated + * channels and event rings. + * + * @Return gsi_status + */ +int gsi_deregister_device(unsigned long dev_hdl, bool force); + +/** + * gsi_alloc_evt_ring - Peripheral should call this function to + * allocate an event ring + * + * @props: Event ring properties + * @dev_hdl: Client handle previously obtained from + * gsi_register_device + * @evt_ring_hdl: Handle populated by GSI, opaque to client + * + * This function can sleep + * + * @Return gsi_status + */ +int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl, + unsigned long *evt_ring_hdl); + +/** + * gsi_write_evt_ring_scratch - Peripheral should call this function to + * write to the scratch area of the event ring context + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * @val: Value to write + * + * @Return gsi_status + */ +int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl, + union __packed gsi_evt_scratch val); + +/** + * gsi_dealloc_evt_ring - Peripheral should call this function to + * de-allocate an event ring. There should not exist any active + * channels using this event ring + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * + * This function can sleep + * + * @Return gsi_status + */ +int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl); + +/** + * gsi_query_evt_ring_db_addr - Peripheral should call this function to + * query the physical addresses of the event ring doorbell registers + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * @db_addr_wp_lsb: Physical address of doorbell register where the 32 + * LSBs of the doorbell value should be written + * @db_addr_wp_msb: Physical address of doorbell register where the 32 + * MSBs of the doorbell value should be written + * + * @Return gsi_status + */ +int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl, + uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb); + +/** + * gsi_ring_evt_ring_db - Peripheral should call this function for + * ringing the event ring doorbell with given value + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * @value: The value to be used for ringing the doorbell + * + * @Return gsi_status + */ +int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value); + +/** + * gsi_ring_ch_ring_db - Peripheral should call this function for + * ringing the channel ring doorbell with given value + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @value: The value to be used for ringing the doorbell + * + * @Return gsi_status + */ +int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value); + +/** + * gsi_reset_evt_ring - Peripheral should call this function to + * reset an event ring to recover from error state + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * + * This function can sleep + * + * @Return gsi_status + */ +int gsi_reset_evt_ring(unsigned long evt_ring_hdl); + +/** + * gsi_get_evt_ring_cfg - This function returns the current config + * of the specified event ring + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * @props: where to copy properties to + * @scr: where to copy scratch info to + * + * @Return gsi_status + */ +int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl, + struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr); + +/** + * gsi_set_evt_ring_cfg - This function applies the supplied config + * to the specified event ring. + * + * exclusive property of the event ring cannot be changed after + * gsi_alloc_evt_ring + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * @props: the properties to apply + * @scr: the scratch info to apply + * + * @Return gsi_status + */ +int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl, + struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr); + +/** + * gsi_alloc_channel - Peripheral should call this function to + * allocate a channel + * + * @props: Channel properties + * @dev_hdl: Client handle previously obtained from + * gsi_register_device + * @chan_hdl: Handle populated by GSI, opaque to client + * + * This function can sleep + * + * @Return gsi_status + */ +int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl, + unsigned long *chan_hdl); + +/** + * gsi_write_channel_scratch - Peripheral should call this function to + * write to the scratch area of the channel context + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @val: Value to write + * + * @Return gsi_status + */ +int gsi_write_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch val); + +/** + * gsi_write_channel_scratch3_reg - Peripheral should call this function to + * write to the scratch3 reg area of the channel context + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @val: Value to write + * + * @Return gsi_status + */ +int gsi_write_channel_scratch3_reg(unsigned long chan_hdl, + union __packed gsi_wdi_channel_scratch3_reg val); + +/** + * gsi_read_channel_scratch - Peripheral should call this function to + * read to the scratch area of the channel context + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @val: Read value + * + * @Return gsi_status + */ +int gsi_read_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch *val); + +/** + * gsi_update_mhi_channel_scratch - MHI Peripheral should call this + * function to update the scratch area of the channel context. Updating + * will be by read-modify-write method, so non SWI fields will not be + * affected + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @mscr: MHI Channel Scratch value + * + * @Return gsi_status + */ +int gsi_update_mhi_channel_scratch(unsigned long chan_hdl, + struct __packed gsi_mhi_channel_scratch mscr); + +/** + * gsi_start_channel - Peripheral should call this function to + * start a channel i.e put into running state + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * + * This function can sleep + * + * @Return gsi_status + */ +int gsi_start_channel(unsigned long chan_hdl); + +/** + * gsi_stop_channel - Peripheral should call this function to + * stop a channel. Stop will happen on a packet boundary + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * + * This function can sleep + * + * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again + * other error codes for failure + */ +int gsi_stop_channel(unsigned long chan_hdl); + +/** + * gsi_reset_channel - Peripheral should call this function to + * reset a channel to recover from error state + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * + * This function can sleep + * + * @Return gsi_status + */ +int gsi_reset_channel(unsigned long chan_hdl); + +/** + * gsi_dealloc_channel - Peripheral should call this function to + * de-allocate a channel + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * + * This function can sleep + * + * @Return gsi_status + */ +int gsi_dealloc_channel(unsigned long chan_hdl); + +/** + * gsi_stop_db_channel - Peripheral should call this function to + * stop a channel when all transfer elements till the doorbell + * have been processed + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * + * This function can sleep + * + * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again + * other error codes for failure + */ +int gsi_stop_db_channel(unsigned long chan_hdl); + +/** + * gsi_query_channel_db_addr - Peripheral should call this function to + * query the physical addresses of the channel doorbell registers + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @db_addr_wp_lsb: Physical address of doorbell register where the 32 + * LSBs of the doorbell value should be written + * @db_addr_wp_msb: Physical address of doorbell register where the 32 + * MSBs of the doorbell value should be written + * + * @Return gsi_status + */ +int gsi_query_channel_db_addr(unsigned long chan_hdl, + uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb); + +/** + * gsi_query_channel_info - Peripheral can call this function to query the + * channel and associated event ring (if any) status. + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @info: Where to read the values into + * + * @Return gsi_status + */ +int gsi_query_channel_info(unsigned long chan_hdl, + struct gsi_chan_info *info); + +/** + * gsi_is_channel_empty - Peripheral can call this function to query if + * the channel is empty. This is only applicable to GPI. "Empty" means + * GSI has consumed all descriptors for a TO_GSI channel and SW has + * processed all completed descriptors for a FROM_GSI channel. + * + * @chan_hdl: Client handle previously obtained from gsi_alloc_channel + * @is_empty: set by GSI based on channel emptiness + * + * @Return gsi_status + */ +int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty); + +/** + * gsi_get_channel_cfg - This function returns the current config + * of the specified channel + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @props: where to copy properties to + * @scr: where to copy scratch info to + * + * @Return gsi_status + */ +int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props, + union gsi_channel_scratch *scr); + +/** + * gsi_set_channel_cfg - This function applies the supplied config + * to the specified channel + * + * ch_id and evt_ring_hdl of the channel cannot be changed after + * gsi_alloc_channel + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @props: the properties to apply + * @scr: the scratch info to apply + * + * @Return gsi_status + */ +int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props, + union gsi_channel_scratch *scr); + +/** + * gsi_poll_channel - Peripheral should call this function to query for + * completed transfer descriptors. + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @notify: Information about the completed transfer if any + * + * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers + * completed) + */ +int gsi_poll_channel(unsigned long chan_hdl, + struct gsi_chan_xfer_notify *notify); + + +/** + * gsi_poll_n_channel - Peripheral should call this function to query for + * completed transfer descriptors. + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @notify: Information about the completed transfer if any + * @expected_num: Number of descriptor we want to poll each time. + * @actual_num: Actual number of descriptor we polled successfully. + * + * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers + * completed) + */ +int gsi_poll_n_channel(unsigned long chan_hdl, + struct gsi_chan_xfer_notify *notify, + int expected_num, int *actual_num); + + +/** + * gsi_config_channel_mode - Peripheral should call this function + * to configure the channel mode. + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @mode: Mode to move the channel into + * + * @Return gsi_status + */ +int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode); + +/** + * gsi_queue_xfer - Peripheral should call this function + * to queue transfers on the given channel + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @num_xfers: Number of transfer in the array @ xfer + * @xfer: Array of num_xfers transfer descriptors + * @ring_db: If true, tell HW about these queued xfers + * If false, do not notify HW at this time + * + * @Return gsi_status + */ +int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers, + struct gsi_xfer_elem *xfer, bool ring_db); + +/** + * gsi_start_xfer - Peripheral should call this function to + * inform HW about queued xfers + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * + * @Return gsi_status + */ +int gsi_start_xfer(unsigned long chan_hdl); + +/** + * gsi_configure_regs - Peripheral should call this function + * to configure the GSI registers before/after the FW is + * loaded but before it is enabled. + * + * @per_base_addr: Base address of the peripheral using GSI + * @ver: GSI core version + * + * @Return gsi_status + */ +int gsi_configure_regs(phys_addr_t per_base_addr, enum gsi_ver ver); + +/** + * gsi_enable_fw - Peripheral should call this function + * to enable the GSI FW after the FW has been loaded to the SRAM. + * + * @gsi_base_addr: Base address of GSI register space + * @gsi_size: Mapping size of the GSI register space + * @ver: GSI core version + + * @Return gsi_status + */ +int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver); + +/** + * gsi_get_inst_ram_offset_and_size - Peripheral should call this function + * to get instruction RAM base address offset and size. Peripheral typically + * uses this info to load GSI FW into the IRAM. + * + * @base_offset:[OUT] - IRAM base offset address + * @size: [OUT] - IRAM size + * @ver: GSI core version + + * @Return none + */ +void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset, + unsigned long *size, enum gsi_ver ver); + +/** + * gsi_halt_channel_ee - Peripheral should call this function + * to stop other EE's channel. This is usually used in SSR clean + * + * @chan_idx: Virtual channel index + * @ee: EE + * @code: [out] response code for operation + + * @Return gsi_status + */ +int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code); + +/** + * gsi_map_base - Peripheral should call this function to configure + * access to the GSI registers. + + * @gsi_base_addr: Base address of GSI register space + * @gsi_size: Mapping size of the GSI register space + * + * @Return gsi_status + */ +int gsi_map_base(phys_addr_t gsi_base_addr, u32 gsi_size); + +/** + * gsi_unmap_base - Peripheral should call this function to undo the + * effects of gsi_map_base + * + * @Return gsi_status + */ +int gsi_unmap_base(void); + +/** + * gsi_map_virtual_ch_to_per_ep - Peripheral should call this function + * to configure each GSI virtual channel with the per endpoint index. + * + * @ee: The ee to be used + * @chan_num: The channel to be used + * @per_ep_index: value to assign + * + * @Return gsi_status + */ +int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index); + +/** + * gsi_alloc_channel_ee - Peripheral should call this function + * to alloc other EE's channel. This is usually done in bootup to allocate all + * chnnels. + * + * @chan_idx: Virtual channel index + * @ee: EE + * @code: [out] response code for operation + + * @Return gsi_status + */ +int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code); + +/* + * Here is a typical sequence of calls + * + * gsi_register_device + * + * gsi_write_device_scratch (if the protocol needs this) + * + * gsi_alloc_evt_ring (for as many event rings as needed) + * gsi_write_evt_ring_scratch + * + * gsi_alloc_channel (for as many channels as needed; channels can have + * no event ring, an exclusive event ring or a shared event ring) + * gsi_write_channel_scratch + * gsi_read_channel_scratch + * gsi_start_channel + * gsi_queue_xfer/gsi_start_xfer + * gsi_config_channel_mode/gsi_poll_channel (if clients wants to poll on + * xfer completions) + * gsi_stop_db_channel/gsi_stop_channel + * + * gsi_dealloc_channel + * + * gsi_dealloc_evt_ring + * + * gsi_deregister_device + * + */ +#else +static inline int gsi_register_device(struct gsi_per_props *props, + unsigned long *dev_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_complete_clk_grant(unsigned long dev_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_write_device_scratch(unsigned long dev_hdl, + struct gsi_device_scratch *val) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_deregister_device(unsigned long dev_hdl, bool force) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, + unsigned long dev_hdl, + unsigned long *evt_ring_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl, + union __packed gsi_evt_scratch val) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl, + uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, + uint64_t value) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_reset_evt_ring(unsigned long evt_ring_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_alloc_channel(struct gsi_chan_props *props, + unsigned long dev_hdl, + unsigned long *chan_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_write_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch val) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} +static inline int gsi_write_channel_scratch3_reg(unsigned long chan_hdl, + union __packed gsi_wdi_channel_scratch3_reg val) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_read_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch *val) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_update_mhi_channel_scratch(unsigned long chan_hdl, + struct __packed gsi_mhi_channel_scratch mscr) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_start_channel(unsigned long chan_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_stop_channel(unsigned long chan_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_reset_channel(unsigned long chan_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_dealloc_channel(unsigned long chan_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_stop_db_channel(unsigned long chan_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_query_channel_db_addr(unsigned long chan_hdl, + uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_query_channel_info(unsigned long chan_hdl, + struct gsi_chan_info *info) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_poll_channel(unsigned long chan_hdl, + struct gsi_chan_xfer_notify *notify) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_poll_n_channel(unsigned long chan_hdl, + struct gsi_chan_xfer_notify *notify, + int expected_num, int *actual_num) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_config_channel_mode(unsigned long chan_hdl, + enum gsi_chan_mode mode) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers, + struct gsi_xfer_elem *xfer, bool ring_db) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_start_xfer(unsigned long chan_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_get_channel_cfg(unsigned long chan_hdl, + struct gsi_chan_props *props, + union gsi_channel_scratch *scr) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_set_channel_cfg(unsigned long chan_hdl, + struct gsi_chan_props *props, + union gsi_channel_scratch *scr) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl, + struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl, + struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_configure_regs( + phys_addr_t per_base_addr, enum gsi_ver ver) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_enable_fw( + phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset, + unsigned long *size, enum gsi_ver ver) +{ +} + +static inline int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, + int *code) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_map_base(phys_addr_t gsi_base_addr, u32 gsi_size) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_unmap_base(void) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_map_virtual_ch_to_per_ep( + u32 ee, u32 chan_num, u32 per_ep_index) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, + int *code) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +#endif +#endif diff --git a/include/linux/rndis_ipa.h b/include/linux/rndis_ipa.h new file mode 100644 index 000000000000..f2df4f2b8b87 --- /dev/null +++ b/include/linux/rndis_ipa.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _RNDIS_IPA_H_ +#define _RNDIS_IPA_H_ + +#include + +/* + * @priv: private data given upon ipa_connect + * @evt: event enum, should be IPA_WRITE_DONE + * @data: for tx path the data field is the sent socket buffer. + */ +typedef void (*ipa_callback)(void *priv, + enum ipa_dp_evt_type evt, + unsigned long data); + +/* + * struct ipa_usb_init_params - parameters for driver initialization API + * + * @device_ready_notify: callback supplied by USB core driver + * This callback shall be called by the Netdev once the device + * is ready to receive data from tethered PC. + * @ipa_rx_notify: The network driver will set this callback (out parameter). + * this callback shall be supplied for ipa_connect upon pipe + * connection (USB->IPA), once IPA driver receive data packets + * from USB pipe destined for Apps this callback will be called. + * @ipa_tx_notify: The network driver will set this callback (out parameter). + * this callback shall be supplied for ipa_connect upon pipe + * connection (IPA->USB), once IPA driver send packets destined + * for USB, IPA BAM will notify for Tx-complete. + * @host_ethaddr: host Ethernet address in network order + * @device_ethaddr: device Ethernet address in network order + * @private: The network driver will set this pointer (out parameter). + * This pointer will hold the network device for later interaction + * with between USB driver and the network driver. + * @skip_ep_cfg: boolean field that determines if Apps-processor + * should or should not configure this end-point. + */ +struct ipa_usb_init_params { + void (*device_ready_notify)(void); + ipa_callback ipa_rx_notify; + ipa_callback ipa_tx_notify; + u8 host_ethaddr[ETH_ALEN]; + u8 device_ethaddr[ETH_ALEN]; + void *private; + bool skip_ep_cfg; +}; + +#ifdef CONFIG_RNDIS_IPA + +int rndis_ipa_init(struct ipa_usb_init_params *params); + +int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl, + u32 ipa_to_usb_hdl, + u32 max_xfer_size_bytes_to_dev, + u32 max_packet_number_to_dev, + u32 max_xfer_size_bytes_to_host, + void *private); + +int rndis_ipa_pipe_disconnect_notify(void *private); + +void rndis_ipa_cleanup(void *private); + +#else /* CONFIG_RNDIS_IPA*/ + +static inline int rndis_ipa_init(struct ipa_usb_init_params *params) +{ + return -ENOMEM; +} + +static inline int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl, + u32 ipa_to_usb_hdl, + u32 max_xfer_size_bytes_to_dev, + u32 max_packet_number_to_dev, + u32 max_xfer_size_bytes_to_host, + void *private) +{ + return -ENOMEM; +} + +static inline int rndis_ipa_pipe_disconnect_notify(void *private) +{ + return -ENOMEM; +} + +static inline void rndis_ipa_cleanup(void *private) +{ + +} +#endif /* CONFIG_RNDIS_IPA */ + +#endif /* _RNDIS_IPA_H_ */ -- GitLab