From c29d9020674b07a7c3fb2e8c0152d73fd6310d29 Mon Sep 17 00:00:00 2001 From: Michael Adisumarta Date: Wed, 15 Nov 2017 16:23:09 -0800 Subject: [PATCH] msm: ipa: Add snapshot of IPA driver This is a snapshot of the IPA driver as of msm-4.9 commit '089d1cb5176d3cf8a99eb744027c11b91dba00c9'. Change-Id: If7d19da967c2851c56b971769d6ff2c38f07465f Acked-by: Jyothi Jayanthi Signed-off-by: Michael Adisumarta --- .../devicetree/bindings/platform/msm/ipa.txt | 227 + .../bindings/platform/msm/msm_gsi.txt | 15 + .../bindings/platform/msm/rmnet_ipa.txt | 22 + .../bindings/platform/msm/rmnet_ipa3.txt | 22 + drivers/platform/msm/Kconfig | 84 + drivers/platform/msm/Makefile | 3 + drivers/platform/msm/gsi/Makefile | 2 + drivers/platform/msm/gsi/gsi.c | 2996 +++++++++ drivers/platform/msm/gsi/gsi.h | 298 + drivers/platform/msm/gsi/gsi_dbg.c | 916 +++ drivers/platform/msm/gsi/gsi_reg.h | 1875 ++++++ drivers/platform/msm/ipa/Makefile | 5 + drivers/platform/msm/ipa/ipa_api.c | 2997 +++++++++ drivers/platform/msm/ipa/ipa_api.h | 411 ++ drivers/platform/msm/ipa/ipa_clients/Makefile | 4 + .../platform/msm/ipa/ipa_clients/ecm_ipa.c | 1465 ++++ .../msm/ipa/ipa_clients/ipa_mhi_client.c | 2631 ++++++++ .../msm/ipa/ipa_clients/ipa_uc_offload.c | 658 ++ .../platform/msm/ipa/ipa_clients/ipa_usb.c | 2750 ++++++++ .../platform/msm/ipa/ipa_clients/odu_bridge.c | 1538 +++++ .../platform/msm/ipa/ipa_clients/rndis_ipa.c | 2479 +++++++ .../msm/ipa/ipa_clients/rndis_ipa_trace.h | 81 + drivers/platform/msm/ipa/ipa_common_i.h | 389 ++ drivers/platform/msm/ipa/ipa_rm.c | 1191 ++++ .../msm/ipa/ipa_rm_dependency_graph.c | 247 + .../msm/ipa/ipa_rm_dependency_graph.h | 49 + drivers/platform/msm/ipa/ipa_rm_i.h | 157 + .../msm/ipa/ipa_rm_inactivity_timer.c | 268 + drivers/platform/msm/ipa/ipa_rm_peers_list.c | 277 + drivers/platform/msm/ipa/ipa_rm_peers_list.h | 62 + drivers/platform/msm/ipa/ipa_rm_resource.c | 1211 ++++ drivers/platform/msm/ipa/ipa_rm_resource.h | 166 + .../msm/ipa/ipa_uc_offload_common_i.h | 27 + drivers/platform/msm/ipa/ipa_v3/Makefile | 9 + drivers/platform/msm/ipa/ipa_v3/ipa.c | 5947 +++++++++++++++++ drivers/platform/msm/ipa/ipa_v3/ipa_client.c | 1501 +++++ drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c | 2195 ++++++ drivers/platform/msm/ipa/ipa_v3/ipa_dma.c | 1105 +++ drivers/platform/msm/ipa/ipa_v3/ipa_dp.c | 3807 +++++++++++ drivers/platform/msm/ipa/ipa_v3/ipa_flt.c | 1626 +++++ drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c | 1252 ++++ drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h | 44 + .../platform/msm/ipa/ipa_v3/ipa_hw_stats.c | 1971 ++++++ drivers/platform/msm/ipa/ipa_v3/ipa_i.h | 2231 +++++++ .../platform/msm/ipa/ipa_v3/ipa_interrupts.c | 569 ++ drivers/platform/msm/ipa/ipa_v3/ipa_intf.c | 649 ++ drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c | 657 ++ drivers/platform/msm/ipa/ipa_v3/ipa_nat.c | 1053 +++ .../platform/msm/ipa/ipa_v3/ipa_qmi_service.c | 1375 ++++ .../platform/msm/ipa/ipa_v3/ipa_qmi_service.h | 345 + .../msm/ipa/ipa_v3/ipa_qmi_service_v01.c | 2926 ++++++++ drivers/platform/msm/ipa/ipa_v3/ipa_rt.c | 1805 +++++ drivers/platform/msm/ipa/ipa_v3/ipa_trace.h | 153 + drivers/platform/msm/ipa/ipa_v3/ipa_uc.c | 929 +++ drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c | 964 +++ drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c | 443 ++ .../msm/ipa/ipa_v3/ipa_uc_offload_i.h | 553 ++ drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c | 1871 ++++++ drivers/platform/msm/ipa/ipa_v3/ipa_utils.c | 5313 +++++++++++++++ .../platform/msm/ipa/ipa_v3/ipahal/Makefile | 3 + .../platform/msm/ipa/ipa_v3/ipahal/ipahal.c | 1535 +++++ .../platform/msm/ipa/ipa_v3/ipahal/ipahal.h | 665 ++ .../msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c | 3516 ++++++++++ .../msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h | 291 + .../msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h | 180 + .../msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c | 547 ++ .../msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h | 248 + .../msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h | 55 + .../platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h | 679 ++ .../msm/ipa/ipa_v3/ipahal/ipahal_reg.c | 1966 ++++++ .../msm/ipa/ipa_v3/ipahal/ipahal_reg.h | 547 ++ .../msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h | 363 + drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c | 3313 +++++++++ .../msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c | 446 ++ drivers/platform/msm/ipa/ipa_v3/teth_bridge.c | 250 + drivers/platform/msm/ipa/test/Makefile | 2 + drivers/platform/msm/ipa/test/ipa_test_dma.c | 1050 +++ .../platform/msm/ipa/test/ipa_test_example.c | 99 + .../platform/msm/ipa/test/ipa_test_hw_stats.c | 330 + drivers/platform/msm/ipa/test/ipa_test_mhi.c | 3322 +++++++++ .../platform/msm/ipa/test/ipa_ut_framework.c | 1016 +++ .../platform/msm/ipa/test/ipa_ut_framework.h | 240 + drivers/platform/msm/ipa/test/ipa_ut_i.h | 88 + .../platform/msm/ipa/test/ipa_ut_suite_list.h | 41 + include/linux/ecm_ipa.h | 95 + include/linux/ipa.h | 2204 ++++++ include/linux/ipa_mhi.h | 161 + include/linux/ipa_odu_bridge.h | 162 + include/linux/ipa_uc_offload.h | 295 + include/linux/ipa_usb.h | 333 + include/linux/msm_gsi.h | 1294 ++++ include/linux/rndis_ipa.h | 102 + include/uapi/linux/ipa_qmi_service_v01.h | 1705 +++++ include/uapi/linux/msm_ipa.h | 1953 ++++++ include/uapi/linux/rmnet_ipa_fd_ioctl.h | 173 + 95 files changed, 96085 insertions(+) create mode 100644 Documentation/devicetree/bindings/platform/msm/ipa.txt create mode 100644 Documentation/devicetree/bindings/platform/msm/msm_gsi.txt create mode 100644 Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt create mode 100644 Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt create mode 100644 drivers/platform/msm/gsi/Makefile create mode 100644 drivers/platform/msm/gsi/gsi.c create mode 100644 drivers/platform/msm/gsi/gsi.h create mode 100644 drivers/platform/msm/gsi/gsi_dbg.c create mode 100644 drivers/platform/msm/gsi/gsi_reg.h create mode 100644 drivers/platform/msm/ipa/Makefile create mode 100644 drivers/platform/msm/ipa/ipa_api.c create mode 100644 drivers/platform/msm/ipa/ipa_api.h create mode 100644 drivers/platform/msm/ipa/ipa_clients/Makefile create mode 100644 drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c create mode 100644 drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c create mode 100644 drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c create mode 100644 drivers/platform/msm/ipa/ipa_clients/ipa_usb.c create mode 100644 drivers/platform/msm/ipa/ipa_clients/odu_bridge.c create mode 100644 drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c create mode 100644 drivers/platform/msm/ipa/ipa_clients/rndis_ipa_trace.h create mode 100644 drivers/platform/msm/ipa/ipa_common_i.h create mode 100644 drivers/platform/msm/ipa/ipa_rm.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_dependency_graph.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_dependency_graph.h create mode 100644 drivers/platform/msm/ipa/ipa_rm_i.h create mode 100644 drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_peers_list.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_peers_list.h create mode 100644 drivers/platform/msm/ipa/ipa_rm_resource.c create mode 100644 drivers/platform/msm/ipa/ipa_rm_resource.h create mode 100644 drivers/platform/msm/ipa/ipa_uc_offload_common_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/Makefile create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_client.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_dma.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_dp.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_flt.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_intf.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_nat.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_rt.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_trace.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_uc.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipa_utils.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c create mode 100644 drivers/platform/msm/ipa/ipa_v3/teth_bridge.c create mode 100644 drivers/platform/msm/ipa/test/Makefile create mode 100644 drivers/platform/msm/ipa/test/ipa_test_dma.c create mode 100644 drivers/platform/msm/ipa/test/ipa_test_example.c create mode 100644 drivers/platform/msm/ipa/test/ipa_test_hw_stats.c create mode 100644 drivers/platform/msm/ipa/test/ipa_test_mhi.c create mode 100644 drivers/platform/msm/ipa/test/ipa_ut_framework.c create mode 100644 drivers/platform/msm/ipa/test/ipa_ut_framework.h create mode 100644 drivers/platform/msm/ipa/test/ipa_ut_i.h create mode 100644 drivers/platform/msm/ipa/test/ipa_ut_suite_list.h create mode 100644 include/linux/ecm_ipa.h create mode 100644 include/linux/ipa.h create mode 100644 include/linux/ipa_mhi.h create mode 100644 include/linux/ipa_odu_bridge.h create mode 100644 include/linux/ipa_uc_offload.h create mode 100644 include/linux/ipa_usb.h create mode 100644 include/linux/msm_gsi.h create mode 100644 include/linux/rndis_ipa.h create mode 100644 include/uapi/linux/ipa_qmi_service_v01.h create mode 100644 include/uapi/linux/msm_ipa.h create mode 100644 include/uapi/linux/rmnet_ipa_fd_ioctl.h diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt new file mode 100644 index 000000000000..e821feb3dda2 --- /dev/null +++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt @@ -0,0 +1,227 @@ +Qualcomm technologies inc. Internet Packet Accelerator + +Internet Packet Accelerator (IPA) is a programmable protocol +processor HW block. It is designed to support generic HW processing +of UL/DL IP packets for various use cases independent of radio technology. + +Required properties: + +IPA node: + +- compatible : "qcom,ipa" +- reg: Specifies the base physical addresses and the sizes of the IPA + registers. +- reg-names: "ipa-base" - string to identify the IPA CORE base registers. + "bam-base" - string to identify the IPA BAM base registers. + "a2-bam-base" - string to identify the A2 BAM base registers. +- interrupts: Specifies the interrupt associated with IPA. +- interrupt-names: "ipa-irq" - string to identify the IPA core interrupt. + "bam-irq" - string to identify the IPA BAM interrupt. + "a2-bam-irq" - string to identify the A2 BAM interrupt. +- qcom,ipa-hw-ver: Specifies the IPA hardware version. +- qcom,ipa-ram-mmap: An array of unsigned integers representing addresses and + sizes which are used by the driver to access IPA RAM. + +Optional: + +- qcom,wan-rx-ring-size: size of WAN rx ring, default is 192 +- qcom,lan-rx-ring-size: size of LAN rx ring, default is 192 +- qcom,arm-smmu: SMMU is present and ARM SMMU driver is used +- qcom,msm-smmu: SMMU is present and QSMMU driver is used +- qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass +- qcom,smmu-fast-map: Boolean context flag to set SMMU to fastpath mode +- ipa_smmu_ap: AP general purpose SMMU device + compatible "qcom,ipa-smmu-ap-cb" +- ipa_smmu_wlan: WDI SMMU device + compatible "qcom,ipa-smmu-wlan-cb" +- ipa_smmu_uc: uc SMMU device + compatible "qcom,ipa-smmu-uc-cb" +- qcom,use-a2-service: determine if A2 service will be used +- qcom,use-ipa-tethering-bridge: determine if tethering bridge will be used +- qcom,use-ipa-bamdma-a2-bridge: determine if a2/ipa hw bridge will be used +- qcom,ee: which EE is assigned to (non-secure) APPS from IPA-BAM POV. This +is a number +- qcom,ipa-hw-mode: IPA hardware mode - Normal, Virtual memory allocation, +memory allocation over a PCIe bridge +- qcom,msm-bus,name: String representing the client-name +- qcom,msm-bus,num-cases: Total number of usecases +- qcom,msm-bus,active-only: Boolean context flag for requests in active or + dual (active & sleep) contex +- qcom,msm-bus,num-paths: Total number of master-slave pairs +- qcom,msm-bus,vectors-KBps: Arrays of unsigned integers representing: + master-id, slave-id, arbitrated bandwidth + in KBps, instantaneous bandwidth in KBps +- qcom,ipa-bam-remote-mode: Boolean context flag to determine if ipa bam + is in remote mode. +- qcom,modem-cfg-emb-pipe-flt: Boolean context flag to determine if modem + configures embedded pipe filtering rules +- qcom,skip-uc-pipe-reset: Boolean context flag to indicate whether + a pipe reset via the IPA uC is required +- qcom,ipa-wdi2: Boolean context flag to indicate whether + using wdi-2.0 or not +- qcom,bandwidth-vote-for-ipa: Boolean context flag to indicate whether + ipa clock voting is done by bandwidth + voting via msm-bus-scale driver or not +- qcom,use-64-bit-dma-mask: Boolean context flag to indicate whether + using 64bit dma mask or not +- qcom,use-dma-zone: Boolean context flag to indicate whether memory + allocations controlled by IPA driver that do not + specify a struct device * should use GFP_DMA to + workaround IPA HW limitations +- qcom,use-rg10-limitation-mitigation: Boolean context flag to activate + the mitigation to register group 10 + AP access limitation +- qcom,do-not-use-ch-gsi-20: Boolean context flag to activate + software workaround for IPA limitation + to not use GSI physical channel 20 +- qcom,tethered-flow-control: Boolean context flag to indicate whether + apps based flow control is needed for tethered + call. +- qcom,rx-polling-sleep-ms: Receive Polling Timeout in millisecond, + default is 1 millisecond. +- qcom,ipa-polling-iteration: IPA Polling Iteration Count,default is 40. +- qcom,ipa-tz-unlock-reg: Register start addresses and ranges which + need to be unlocked by TZ. + +IPA pipe sub nodes (A2 static pipes configurations): + +-label: two labels are supported, a2-to-ipa and ipa-to-a2 which +supply static configuration for A2-IPA connection. +-qcom,src-bam-physical-address: The physical address of the source BAM +-qcom,ipa-bam-mem-type:The memory type: + 0(Pipe memory), 1(Private memory), 2(System memory) +-qcom,src-bam-pipe-index: Source pipe index +-qcom,dst-bam-physical-address: The physical address of the + destination BAM +-qcom,dst-bam-pipe-index: Destination pipe index +-qcom,data-fifo-offset: Data fifo base offset +-qcom,data-fifo-size: Data fifo size (bytes) +-qcom,descriptor-fifo-offset: Descriptor fifo base offset +-qcom,descriptor-fifo-size: Descriptor fifo size (bytes) + +Optional properties: +-qcom,ipa-pipe-mem: Specifies the base physical address and the + size of the IPA pipe memory region. + Pipe memory is a feature which may be supported by the + target (HW platform). The Driver support using pipe + memory instead of system memory. In case this property + will not appear in the IPA DTS entry, the driver will + use system memory. +- clocks: This property shall provide a list of entries each of which + contains a phandle to clock controller device and a macro that is + the clock's name in hardware.This should be "clock_rpm" as clock + controller phandle and "clk_ipa_clk" as macro for "iface_clk" +- clock-names: This property shall contain the clock input names used + by driver in same order as the clocks property.This should be "iface_clk" + +IPA SMMU sub nodes + +-compatible: "qcom,ipa-smmu-ap-cb" - represents the AP context bank. + +-compatible: "qcom,ipa-smmu-wlan-cb" - represents IPA WLAN context bank. + +-compatible: "qcom,ipa-smmu-uc-cb" - represents IPA uC context bank (for uC + offload scenarios). +- iommus : the phandle and stream IDs for the SMMU used by this root + +- qcom,iova-mapping: specifies the start address and size of iova space. + +- qcom,additional-mapping: specifies any addtional mapping needed for this + context bank. The format is + +IPA SMP2P sub nodes + +-compatible: "qcom,smp2pgpio-map-ipa-1-out" - represents the out gpio from + ipa driver to modem. + +-compatible: "qcom,smp2pgpio-map-ipa-1-in" - represents the in gpio to + ipa driver from modem. + +-gpios: Binding to the gpio defined in XXX-smp2p.dtsi + + +Example: + +qcom,ipa@fd4c0000 { + compatible = "qcom,ipa"; + reg = <0xfd4c0000 0x26000>, + <0xfd4c4000 0x14818>; + <0xfc834000 0x7000>; + reg-names = "ipa-base", "bam-base"; "a2-bam-base"; + interrupts = <0 252 0>, + <0 253 0>; + <0 29 1>; + interrupt-names = "ipa-irq", "bam-irq"; "a2-bam-irq"; + qcom,ipa-hw-ver = <1>; + clocks = <&clock_rpm clk_ipa_clk>; + clock-names = "iface_clk"; + + qcom,msm-bus,name = "ipa"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + <90 512 0 0>, <90 585 0 0>, /* No vote */ + <90 512 100000 800000>, <90 585 100000 800000>, /* SVS */ + <90 512 100000 1200000>, <90 585 100000 1200000>; /* PERF */ + qcom,bus-vector-names = "MIN", "SVS", "PERF"; + + qcom,pipe1 { + label = "a2-to-ipa"; + qcom,src-bam-physical-address = <0xfc834000>; + qcom,ipa-bam-mem-type = <0>; + qcom,src-bam-pipe-index = <1>; + qcom,dst-bam-physical-address = <0xfd4c0000>; + qcom,dst-bam-pipe-index = <6>; + qcom,data-fifo-offset = <0x1000>; + qcom,data-fifo-size = <0xd00>; + qcom,descriptor-fifo-offset = <0x1d00>; + qcom,descriptor-fifo-size = <0x300>; + }; + + qcom,pipe2 { + label = "ipa-to-a2"; + qcom,src-bam-physical-address = <0xfd4c0000>; + qcom,ipa-bam-mem-type = <0>; + qcom,src-bam-pipe-index = <7>; + qcom,dst-bam-physical-address = <0xfc834000>; + qcom,dst-bam-pipe-index = <0>; + qcom,data-fifo-offset = <0x00>; + qcom,data-fifo-size = <0xd00>; + qcom,descriptor-fifo-offset = <0xd00>; + qcom,descriptor-fifo-size = <0x300>; + }; + + /* smp2p gpio information */ + qcom,smp2pgpio_map_ipa_1_out { + compatible = "qcom,smp2pgpio-map-ipa-1-out"; + gpios = <&smp2pgpio_ipa_1_out 0 0>; + }; + + qcom,smp2pgpio_map_ipa_1_in { + compatible = "qcom,smp2pgpio-map-ipa-1-in"; + gpios = <&smp2pgpio_ipa_1_in 0 0>; + }; + + ipa_smmu_ap: ipa_smmu_ap { + compatible = "qcom,ipa-smmu-ap-cb"; + iommus = <&apps_smmu 0x720>; + qcom,iova-mapping = <0x20000000 0x40000000>; + qcom,additional-mapping = + /* modem tables in IMEM */ + <0x146bd000 0x146bd000 0x2000>; + }; + + ipa_smmu_wlan: ipa_smmu_wlan { + compatible = "qcom,ipa-smmu-wlan-cb"; + iommus = <&apps_smmu 0x721>; + qcom,additional-mapping = + /* ipa-uc ram */ + <0x1e60000 0x1e60000 0x80000>; + }; + + ipa_smmu_uc: ipa_smmu_uc { + compatible = "qcom,ipa-smmu-uc-cb"; + iommus = <&apps_smmu 0x722>; + qcom,iova-mapping = <0x40000000 0x20000000>; + }; +}; diff --git a/Documentation/devicetree/bindings/platform/msm/msm_gsi.txt b/Documentation/devicetree/bindings/platform/msm/msm_gsi.txt new file mode 100644 index 000000000000..7b297249554f --- /dev/null +++ b/Documentation/devicetree/bindings/platform/msm/msm_gsi.txt @@ -0,0 +1,15 @@ +* Qualcomm Technologies, Inc. GSI driver module + +GSI is a HW accelerator that supports Generic SW Interfaces (GSI) which are +peripheral specific (IPA in this case). +GSI translates SW transfer elements (TRE) into TLV transactions which are +then processed by the peripheral. +This Driver configures and communicates with GSI HW. + +Required properties: +- compatible: Must be "qcom,msm_gsi" + +Example: + qcom,msm-gsi { + compatible = "qcom,msm_gsi"; + } diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt new file mode 100644 index 000000000000..d8934c01cc71 --- /dev/null +++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt @@ -0,0 +1,22 @@ +* Qualcomm Technologies, Inc. RmNet IPA driver module + +This module enables embedded data calls using IPA HW. + +Required properties: +- compatible: Must be "qcom,rmnet-ipa" + +Optional: +- qcom,rmnet-ipa-ssr: determine if modem SSR is supported +- qcom,ipa-loaduC: indicate that ipa uC should be loaded +- qcom,ipa-advertise-sg-support: determine how to respond to a query +regarding scatter-gather capability +- qcom,ipa-napi-enable: Boolean context flag to indicate whether + to enable napi framework or not +- qcom,wan-rx-desc-size: size of WAN rx desc fifo ring, default is 256 + +Example: + qcom,rmnet-ipa { + compatible = "qcom,rmnet-ipa"; + qcom,wan-rx-desc-size = <256>; + } + diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt new file mode 100644 index 000000000000..e9575f150c5e --- /dev/null +++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt @@ -0,0 +1,22 @@ +* Qualcomm Technologies, Inc. RmNet IPA driver module + +This module enables embedded data calls using IPA v3 HW. + +Required properties: +- compatible: Must be "qcom,rmnet-ipa3" + +Optional: +- qcom,rmnet-ipa-ssr: determine if modem SSR is supported +- qcom,ipa-loaduC: indicate that ipa uC should be loaded +- qcom,ipa-advertise-sg-support: determine how to respond to a query +regarding scatter-gather capability +- qcom,ipa-napi-enable: Boolean context flag to indicate whether + to enable napi framework or not +- qcom,wan-rx-desc-size: size of WAN rx desc fifo ring, default is 256 + +Example: + qcom,rmnet-ipa3 { + compatible = "qcom,rmnet-ipa3"; + qcom,wan-rx-desc-size = <256>; + } + diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig index e68ec8075e30..dadf93a9f1a7 100644 --- a/drivers/platform/msm/Kconfig +++ b/drivers/platform/msm/Kconfig @@ -54,4 +54,88 @@ config USB_BAM USB BAM driver was added to supports SPS Peripheral-to-Peripheral transfers between the USB and other peripheral. +config IPA + tristate "IPA support" + depends on SPS && NET + help + This driver supports the Internet Packet Accelerator (IPA) core. + IPA is a programmable protocol processor HW block. + It is designed to support generic HW processing of UL/DL IP packets + for various use cases independent of radio technology. + The driver support client connection and configuration + for the IPA core. + Kernel and user-space processes can call the IPA driver + to configure IPA core. + +config RMNET_IPA + tristate "IPA RMNET WWAN Network Device" + depends on IPA && MSM_QMI_INTERFACE + help + This WWAN Network Driver implements network stack class device. + It supports Embedded data transfer from A7 to Q6. Configures IPA HW + for RmNet Data Driver and also exchange of QMI messages between + A7 and Q6 IPA-driver. + +config GSI + bool "GSI support" + help + This driver provides the transport needed to talk to the + IPA core. It replaces the BAM transport used previously. + + The GSI connects to a peripheral component via uniform TLV + interface, and allows it to interface with other peripherals + and CPUs over various types of interfaces such as MHI, xDCI, + xHCI, GPI, WDI, Ethernet, etc. + +config IPA3 + tristate "IPA3 support" + select GSI + depends on NET + help + This driver supports the Internet Packet Accelerator (IPA3) core. + IPA is a programmable protocol processor HW block. + It is designed to support generic HW processing of UL/DL IP packets + for various use cases independent of radio technology. + The driver support client connection and configuration + for the IPA core. + Kernel and user-space processes can call the IPA driver + to configure IPA core. + +config RMNET_IPA3 + tristate "IPA3 RMNET WWAN Network Device" + depends on IPA3 && MSM_QMI_INTERFACE + help + This WWAN Network Driver implements network stack class device. + It supports Embedded data transfer from A7 to Q6. Configures IPA HW + for RmNet Data Driver and also exchange of QMI messages between + A7 and Q6 IPA-driver. + +config ECM_IPA + tristate "STD ECM LAN Driver support" + depends on IPA || IPA3 + help + Enables LAN between applications processor and a tethered + host using the STD ECM protocol. + This Network interface is aimed to allow data path go through + IPA core while using STD ECM protocol. + +config RNDIS_IPA + tristate "RNDIS_IPA Network Interface Driver support" + depends on IPA || IPA3 + help + Enables LAN between applications processor and a tethered + host using the RNDIS protocol. + This Network interface is aimed to allow data path go through + IPA core while using RNDIS protocol. + +config IPA_UT + tristate "IPA Unit-Test Framework and Test Suites" + depends on IPA3 && DEBUG_FS + help + This Module implements IPA in-kernel test framework. + The framework supports defining and running tests, grouped + into suites according to the sub-unit of the IPA being tested. + The user interface to run and control the tests is debugfs file + system. + endmenu diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile index 3a3c174222a4..f409e015ad07 100644 --- a/drivers/platform/msm/Makefile +++ b/drivers/platform/msm/Makefile @@ -5,3 +5,6 @@ obj-$(CONFIG_MSM_EXT_DISPLAY) += msm_ext_display.o obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o obj-$(CONFIG_SPS) += sps/ obj-$(CONFIG_USB_BAM) += usb_bam.o +obj-$(CONFIG_GSI) += gsi/ +obj-$(CONFIG_IPA) += ipa/ +obj-$(CONFIG_IPA3) += ipa/ diff --git a/drivers/platform/msm/gsi/Makefile b/drivers/platform/msm/gsi/Makefile new file mode 100644 index 000000000000..82d6c6299bb8 --- /dev/null +++ b/drivers/platform/msm/gsi/Makefile @@ -0,0 +1,2 @@ +gsidbg-$(CONFIG_DEBUG_FS) += gsi_dbg.o +obj-$(CONFIG_GSI) += gsi.o gsidbg.o diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c new file mode 100644 index 000000000000..40f847120852 --- /dev/null +++ b/drivers/platform/msm/gsi/gsi.c @@ -0,0 +1,2996 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "gsi.h" +#include "gsi_reg.h" + +#define GSI_CMD_TIMEOUT (5*HZ) +#define GSI_STOP_CMD_TIMEOUT_MS 20 +#define GSI_MAX_CH_LOW_WEIGHT 15 +#define GSI_MHI_ER_START 10 +#define GSI_MHI_ER_END 16 + +#define GSI_RESET_WA_MIN_SLEEP 1000 +#define GSI_RESET_WA_MAX_SLEEP 2000 + +#define GSI_STTS_REG_BITS 32 + +#ifndef CONFIG_DEBUG_FS +void gsi_debugfs_init(void) +{ +} +#endif + +static const struct of_device_id msm_gsi_match[] = { + { .compatible = "qcom,msm_gsi", }, + { }, +}; + +struct gsi_ctx *gsi_ctx; + +static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val) +{ + uint32_t curr; + + curr = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee)); + gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base + + GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee)); +} + +static void __gsi_config_ch_irq(int ee, uint32_t mask, uint32_t val) +{ + uint32_t curr; + + curr = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee)); + gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee)); +} + +static void __gsi_config_evt_irq(int ee, uint32_t mask, uint32_t val) +{ + uint32_t curr; + + curr = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee)); + gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee)); +} + +static void __gsi_config_ieob_irq(int ee, uint32_t mask, uint32_t val) +{ + uint32_t curr; + + curr = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee)); + gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee)); +} + +static void __gsi_config_glob_irq(int ee, uint32_t mask, uint32_t val) +{ + uint32_t curr; + + curr = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee)); + gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base + + GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee)); +} + +static void __gsi_config_gen_irq(int ee, uint32_t mask, uint32_t val) +{ + uint32_t curr; + + curr = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee)); + gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base + + GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee)); +} + +static void gsi_handle_ch_ctrl(int ee) +{ + uint32_t ch; + int i; + uint32_t val; + struct gsi_chan_ctx *ctx; + + ch = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(ee)); + gsi_writel(ch, gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(ee)); + GSIDBG("ch %x\n", ch); + for (i = 0; i < GSI_STTS_REG_BITS; i++) { + if ((1 << i) & ch) { + if (i >= gsi_ctx->max_ch || i >= GSI_CHAN_MAX) { + GSIERR("invalid channel %d\n", i); + break; + } + + ctx = &gsi_ctx->chan[i]; + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(i, ee)); + ctx->state = (val & + GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >> + GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT; + GSIDBG("ch %u state updated to %u\n", i, ctx->state); + complete(&ctx->compl); + gsi_ctx->ch_dbg[i].cmd_completed++; + } + } +} + +static void gsi_handle_ev_ctrl(int ee) +{ + uint32_t ch; + int i; + uint32_t val; + struct gsi_evt_ctx *ctx; + + ch = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(ee)); + gsi_writel(ch, gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(ee)); + GSIDBG("ev %x\n", ch); + for (i = 0; i < GSI_STTS_REG_BITS; i++) { + if ((1 << i) & ch) { + if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) { + GSIERR("invalid event %d\n", i); + break; + } + + ctx = &gsi_ctx->evtr[i]; + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(i, ee)); + ctx->state = (val & + GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK) >> + GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT; + GSIDBG("evt %u state updated to %u\n", i, ctx->state); + complete(&ctx->compl); + } + } +} + +static void gsi_handle_glob_err(uint32_t err) +{ + struct gsi_log_err *log; + struct gsi_chan_ctx *ch; + struct gsi_evt_ctx *ev; + struct gsi_chan_err_notify chan_notify; + struct gsi_evt_err_notify evt_notify; + struct gsi_per_notify per_notify; + uint32_t val; + enum gsi_err_type err_type; + + log = (struct gsi_log_err *)&err; + GSIERR("log err_type=%u ee=%u idx=%u\n", log->err_type, log->ee, + log->virt_idx); + GSIERR("code=%u arg1=%u arg2=%u arg3=%u\n", log->code, log->arg1, + log->arg2, log->arg3); + + err_type = log->err_type; + /* + * These are errors thrown by hardware. We need + * BUG_ON() to capture the hardware state right + * when it is unexpected. + */ + switch (err_type) { + case GSI_ERR_TYPE_GLOB: + per_notify.evt_id = GSI_PER_EVT_GLOB_ERROR; + per_notify.user_data = gsi_ctx->per.user_data; + per_notify.data.err_desc = err & 0xFFFF; + gsi_ctx->per.notify_cb(&per_notify); + break; + case GSI_ERR_TYPE_CHAN: + if (WARN_ON(log->virt_idx >= gsi_ctx->max_ch)) { + GSIERR("Unexpected ch %d\n", log->virt_idx); + return; + } + + ch = &gsi_ctx->chan[log->virt_idx]; + chan_notify.chan_user_data = ch->props.chan_user_data; + chan_notify.err_desc = err & 0xFFFF; + if (log->code == GSI_INVALID_TRE_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + BUG(); + } + + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(log->virt_idx, + gsi_ctx->per.ee)); + ch->state = (val & + GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >> + GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT; + GSIDBG("ch %u state updated to %u\n", log->virt_idx, + ch->state); + ch->stats.invalid_tre_error++; + if (ch->state != GSI_CHAN_STATE_ERROR) { + GSIERR("Unexpected channel state %d\n", + ch->state); + BUG(); + } + chan_notify.evt_id = GSI_CHAN_INVALID_TRE_ERR; + } else if (log->code == GSI_OUT_OF_BUFFERS_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + BUG(); + } + chan_notify.evt_id = GSI_CHAN_OUT_OF_BUFFERS_ERR; + } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + BUG(); + } + chan_notify.evt_id = GSI_CHAN_OUT_OF_RESOURCES_ERR; + complete(&ch->compl); + } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) { + chan_notify.evt_id = + GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR; + } else if (log->code == GSI_NON_ALLOCATED_EVT_ACCESS_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + BUG(); + } + chan_notify.evt_id = + GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR; + } else if (log->code == GSI_HWO_1_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + BUG(); + } + chan_notify.evt_id = GSI_CHAN_HWO_1_ERR; + } else { + GSIERR("unexpected event log code %d\n", log->code); + BUG(); + } + ch->props.err_cb(&chan_notify); + break; + case GSI_ERR_TYPE_EVT: + if (WARN_ON(log->virt_idx >= gsi_ctx->max_ev)) { + GSIERR("Unexpected ev %d\n", log->virt_idx); + return; + } + + ev = &gsi_ctx->evtr[log->virt_idx]; + evt_notify.user_data = ev->props.user_data; + evt_notify.err_desc = err & 0xFFFF; + if (log->code == GSI_OUT_OF_BUFFERS_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + BUG(); + } + evt_notify.evt_id = GSI_EVT_OUT_OF_BUFFERS_ERR; + } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + BUG(); + } + evt_notify.evt_id = GSI_EVT_OUT_OF_RESOURCES_ERR; + complete(&ev->compl); + } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) { + evt_notify.evt_id = GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR; + } else if (log->code == GSI_EVT_RING_EMPTY_ERR) { + if (log->ee != gsi_ctx->per.ee) { + GSIERR("unexpected EE in event %d\n", log->ee); + BUG(); + } + evt_notify.evt_id = GSI_EVT_EVT_RING_EMPTY_ERR; + } else { + GSIERR("unexpected event log code %d\n", log->code); + BUG(); + } + ev->props.err_cb(&evt_notify); + break; + } +} + +static void gsi_handle_gp_int1(void) +{ + complete(&gsi_ctx->gen_ee_cmd_compl); +} + +static void gsi_handle_glob_ee(int ee) +{ + uint32_t val; + uint32_t err; + struct gsi_per_notify notify; + uint32_t clr = ~0; + + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(ee)); + + notify.user_data = gsi_ctx->per.user_data; + + if (val & GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK) { + err = gsi_readl(gsi_ctx->base + + GSI_EE_n_ERROR_LOG_OFFS(ee)); + if (gsi_ctx->per.ver >= GSI_VER_1_2) + gsi_writel(0, gsi_ctx->base + + GSI_EE_n_ERROR_LOG_OFFS(ee)); + gsi_writel(clr, gsi_ctx->base + + GSI_EE_n_ERROR_LOG_CLR_OFFS(ee)); + gsi_handle_glob_err(err); + } + + if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK) + gsi_handle_gp_int1(); + + if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK) { + notify.evt_id = GSI_PER_EVT_GLOB_GP2; + gsi_ctx->per.notify_cb(¬ify); + } + + if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK) { + notify.evt_id = GSI_PER_EVT_GLOB_GP3; + gsi_ctx->per.notify_cb(¬ify); + } + + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(ee)); +} + +static void gsi_incr_ring_wp(struct gsi_ring_ctx *ctx) +{ + ctx->wp_local += ctx->elem_sz; + if (ctx->wp_local == ctx->end) + ctx->wp_local = ctx->base; +} + +static void gsi_incr_ring_rp(struct gsi_ring_ctx *ctx) +{ + ctx->rp_local += ctx->elem_sz; + if (ctx->rp_local == ctx->end) + ctx->rp_local = ctx->base; +} + +uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr) +{ + WARN_ON(addr < ctx->base || addr >= ctx->end); + return (uint32_t)(addr - ctx->base) / ctx->elem_sz; +} + +static void gsi_process_chan(struct gsi_xfer_compl_evt *evt, + struct gsi_chan_xfer_notify *notify, bool callback) +{ + uint32_t ch_id; + struct gsi_chan_ctx *ch_ctx; + uint16_t rp_idx; + uint64_t rp; + + ch_id = evt->chid; + if (WARN_ON(ch_id >= gsi_ctx->max_ch)) { + GSIERR("Unexpected ch %d\n", ch_id); + return; + } + + ch_ctx = &gsi_ctx->chan[ch_id]; + if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI)) + return; + + rp = evt->xfer_ptr; + + while (ch_ctx->ring.rp_local != rp) { + gsi_incr_ring_rp(&ch_ctx->ring); + ch_ctx->stats.completed++; + } + + /* the element at RP is also processed */ + gsi_incr_ring_rp(&ch_ctx->ring); + ch_ctx->stats.completed++; + + ch_ctx->ring.rp = ch_ctx->ring.rp_local; + rp_idx = gsi_find_idx_from_addr(&ch_ctx->ring, rp); + notify->xfer_user_data = ch_ctx->user_data[rp_idx]; + notify->chan_user_data = ch_ctx->props.chan_user_data; + notify->evt_id = evt->code; + notify->bytes_xfered = evt->len; + if (callback) { + if (atomic_read(&ch_ctx->poll_mode)) { + /* + * This is not expected, HW triggers an interrupt + * while interrupts are masked. + */ + GSIERR("Calling client callback in polling mode\n"); + BUG(); + } + ch_ctx->props.xfer_cb(notify); + } +} + +static void gsi_process_evt_re(struct gsi_evt_ctx *ctx, + struct gsi_chan_xfer_notify *notify, bool callback) +{ + struct gsi_xfer_compl_evt *evt; + uint16_t idx; + + idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.rp_local); + evt = (struct gsi_xfer_compl_evt *)(ctx->ring.base_va + + idx * ctx->ring.elem_sz); + gsi_process_chan(evt, notify, callback); + gsi_incr_ring_rp(&ctx->ring); + /* recycle this element */ + gsi_incr_ring_wp(&ctx->ring); + ctx->stats.completed++; +} + +static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx) +{ + uint32_t val; + + /* write order MUST be MSB followed by LSB */ + val = ((ctx->ring.wp_local >> 32) & + GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) << + GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(ctx->id, + gsi_ctx->per.ee)); + + val = (ctx->ring.wp_local & + GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) << + GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(ctx->id, + gsi_ctx->per.ee)); +} + +static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx) +{ + uint32_t val; + + /* + * allocate new events for this channel first + * before submitting the new TREs. + * for TO_GSI channels the event ring doorbell is rang as part of + * interrupt handling. + */ + if (ctx->evtr && ctx->props.dir == GSI_CHAN_DIR_FROM_GSI) + gsi_ring_evt_doorbell(ctx->evtr); + ctx->ring.wp = ctx->ring.wp_local; + + /* write order MUST be MSB followed by LSB */ + val = ((ctx->ring.wp_local >> 32) & + GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) << + GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id, + gsi_ctx->per.ee)); + + val = (ctx->ring.wp_local & + GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) << + GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(ctx->props.ch_id, + gsi_ctx->per.ee)); +} + +static void gsi_handle_ieob(int ee) +{ + uint32_t ch; + int i; + uint64_t rp; + struct gsi_evt_ctx *ctx; + struct gsi_chan_xfer_notify notify; + unsigned long flags; + unsigned long cntr; + uint32_t msk; + + ch = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(ee)); + msk = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee)); + gsi_writel(ch & msk, gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee)); + + for (i = 0; i < GSI_STTS_REG_BITS; i++) { + if ((1 << i) & ch & msk) { + if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) { + GSIERR("invalid event %d\n", i); + break; + } + ctx = &gsi_ctx->evtr[i]; + + /* + * Don't handle MSI interrupts, only handle IEOB + * IRQs + */ + if (ctx->props.intr == GSI_INTR_MSI) + continue; + + if (ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV) { + GSIERR("Unexpected irq intf %d\n", + ctx->props.intf); + BUG(); + } + spin_lock_irqsave(&ctx->ring.slock, flags); +check_again: + cntr = 0; + rp = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(i, ee)); + rp |= ctx->ring.rp & 0xFFFFFFFF00000000; + + ctx->ring.rp = rp; + while (ctx->ring.rp_local != rp) { + ++cntr; + if (ctx->props.exclusive && + atomic_read(&ctx->chan->poll_mode)) { + cntr = 0; + break; + } + gsi_process_evt_re(ctx, ¬ify, true); + } + gsi_ring_evt_doorbell(ctx); + if (cntr != 0) + goto check_again; + spin_unlock_irqrestore(&ctx->ring.slock, flags); + } + } +} + +static void gsi_handle_inter_ee_ch_ctrl(int ee) +{ + uint32_t ch; + int i; + + ch = gsi_readl(gsi_ctx->base + + GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(ee)); + gsi_writel(ch, gsi_ctx->base + + GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(ee)); + for (i = 0; i < GSI_STTS_REG_BITS; i++) { + if ((1 << i) & ch) { + /* not currently expected */ + GSIERR("ch %u was inter-EE changed\n", i); + } + } +} + +static void gsi_handle_inter_ee_ev_ctrl(int ee) +{ + uint32_t ch; + int i; + + ch = gsi_readl(gsi_ctx->base + + GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(ee)); + gsi_writel(ch, gsi_ctx->base + + GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(ee)); + for (i = 0; i < GSI_STTS_REG_BITS; i++) { + if ((1 << i) & ch) { + /* not currently expected */ + GSIERR("evt %u was inter-EE changed\n", i); + } + } +} + +static void gsi_handle_general(int ee) +{ + uint32_t val; + struct gsi_per_notify notify; + + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(ee)); + + notify.user_data = gsi_ctx->per.user_data; + + if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK) + notify.evt_id = GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW; + + if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK) + notify.evt_id = GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW; + + if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK) + notify.evt_id = GSI_PER_EVT_GENERAL_BUS_ERROR; + + if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK) + notify.evt_id = GSI_PER_EVT_GENERAL_BREAK_POINT; + + if (gsi_ctx->per.notify_cb) + gsi_ctx->per.notify_cb(¬ify); + + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(ee)); +} + +#define GSI_ISR_MAX_ITER 50 + +static void gsi_handle_irq(void) +{ + uint32_t type; + int ee = gsi_ctx->per.ee; + unsigned long cnt = 0; + + while (1) { + type = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(ee)); + + if (!type) + break; + + GSIDBG_LOW("type %x\n", type); + + if (type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK) + gsi_handle_ch_ctrl(ee); + + if (type & GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK) + gsi_handle_ev_ctrl(ee); + + if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK) + gsi_handle_glob_ee(ee); + + if (type & GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK) + gsi_handle_ieob(ee); + + if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK) + gsi_handle_inter_ee_ch_ctrl(ee); + + if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK) + gsi_handle_inter_ee_ev_ctrl(ee); + + if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK) + gsi_handle_general(ee); + + if (++cnt > GSI_ISR_MAX_ITER) + /* + * Max number of spurious interrupts from hardware. + * Unexpected hardware state. + */ + GSIERR("Too many spurious interrupt from GSI HW\n"); + BUG(); + } +} + +static irqreturn_t gsi_isr(int irq, void *ctxt) +{ + if (gsi_ctx->per.req_clk_cb) { + bool granted = false; + + gsi_ctx->per.req_clk_cb(gsi_ctx->per.user_data, &granted); + if (granted) { + gsi_handle_irq(); + gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data); + } + } else { + gsi_handle_irq(); + } + return IRQ_HANDLED; + +} + +static uint32_t gsi_get_max_channels(enum gsi_ver ver) +{ + uint32_t reg; + + switch (ver) { + case GSI_VER_ERR: + case GSI_VER_MAX: + GSIERR("GSI version is not supported %d\n", ver); + WARN_ON(1); + break; + case GSI_VER_1_0: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee)); + reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK) >> + GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT; + break; + case GSI_VER_1_2: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee)); + reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK) >> + GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT; + break; + case GSI_VER_1_3: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >> + GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT; + break; + case GSI_VER_2_0: + reg = gsi_readl(gsi_ctx->base + + GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >> + GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT; + break; + } + + GSIDBG("max channels %d\n", reg); + + return reg; +} + +static uint32_t gsi_get_max_event_rings(enum gsi_ver ver) +{ + uint32_t reg; + + switch (ver) { + case GSI_VER_ERR: + case GSI_VER_MAX: + GSIERR("GSI version is not supported %d\n", ver); + WARN_ON(1); + break; + case GSI_VER_1_0: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee)); + reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK) >> + GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT; + break; + case GSI_VER_1_2: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee)); + reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK) >> + GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT; + break; + case GSI_VER_1_3: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >> + GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT; + break; + case GSI_VER_2_0: + reg = gsi_readl(gsi_ctx->base + + GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >> + GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT; + break; + } + + GSIDBG("max event rings %d\n", reg); + + return reg; +} +int gsi_complete_clk_grant(unsigned long dev_hdl) +{ + unsigned long flags; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!gsi_ctx->per_registered) { + GSIERR("no client registered\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (dev_hdl != (uintptr_t)gsi_ctx) { + GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl, + gsi_ctx); + return -GSI_STATUS_INVALID_PARAMS; + } + + spin_lock_irqsave(&gsi_ctx->slock, flags); + gsi_handle_irq(); + gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data); + spin_unlock_irqrestore(&gsi_ctx->slock, flags); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_complete_clk_grant); + +int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl) +{ + int res; + uint32_t val; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!props || !dev_hdl) { + GSIERR("bad params props=%pK dev_hdl=%pK\n", props, dev_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) { + GSIERR("bad params gsi_ver=%d\n", props->ver); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (!props->notify_cb) { + GSIERR("notify callback must be provided\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->req_clk_cb && !props->rel_clk_cb) { + GSIERR("rel callback must be provided\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (gsi_ctx->per_registered) { + GSIERR("per already registered\n"); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + spin_lock_init(&gsi_ctx->slock); + if (props->intr == GSI_INTR_IRQ) { + if (!props->irq) { + GSIERR("bad irq specified %u\n", props->irq); + return -GSI_STATUS_INVALID_PARAMS; + } + + res = devm_request_irq(gsi_ctx->dev, props->irq, + gsi_isr, + props->req_clk_cb ? IRQF_TRIGGER_RISING : + IRQF_TRIGGER_HIGH, + "gsi", + gsi_ctx); + if (res) { + GSIERR("failed to register isr for %u\n", props->irq); + return -GSI_STATUS_ERROR; + } + + res = enable_irq_wake(props->irq); + if (res) + GSIERR("failed to enable wake irq %u\n", props->irq); + else + GSIERR("GSI irq is wake enabled %u\n", props->irq); + + } else { + GSIERR("do not support interrupt type %u\n", props->intr); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + gsi_ctx->base = devm_ioremap_nocache(gsi_ctx->dev, props->phys_addr, + props->size); + if (!gsi_ctx->base) { + GSIERR("failed to remap GSI HW\n"); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + + gsi_ctx->per = *props; + gsi_ctx->per_registered = true; + mutex_init(&gsi_ctx->mlock); + atomic_set(&gsi_ctx->num_chan, 0); + atomic_set(&gsi_ctx->num_evt_ring, 0); + gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver); + if (gsi_ctx->max_ch == 0) { + GSIERR("failed to get max channels\n"); + return -GSI_STATUS_ERROR; + } + gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver); + if (gsi_ctx->max_ev == 0) { + GSIERR("failed to get max event rings\n"); + return -GSI_STATUS_ERROR; + } + + /* bitmap is max events excludes reserved events */ + gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1); + gsi_ctx->evt_bmap |= ((1 << (GSI_MHI_ER_END + 1)) - 1) ^ + ((1 << GSI_MHI_ER_START) - 1); + + /* + * enable all interrupts but GSI_BREAK_POINT. + * Inter EE commands / interrupt are no supported. + */ + __gsi_config_type_irq(props->ee, ~0, ~0); + __gsi_config_ch_irq(props->ee, ~0, ~0); + __gsi_config_evt_irq(props->ee, ~0, ~0); + __gsi_config_ieob_irq(props->ee, ~0, ~0); + __gsi_config_glob_irq(props->ee, ~0, ~0); + __gsi_config_gen_irq(props->ee, ~0, + ~GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK); + + gsi_writel(props->intr, gsi_ctx->base + + GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee)); + + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee)); + if (val & GSI_EE_n_GSI_STATUS_ENABLED_BMSK) + gsi_ctx->enabled = true; + else + GSIERR("Manager EE has not enabled GSI, GSI un-usable\n"); + + if (gsi_ctx->per.ver >= GSI_VER_1_2) + gsi_writel(0, gsi_ctx->base + + GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee)); + + *dev_hdl = (uintptr_t)gsi_ctx; + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_register_device); + +int gsi_write_device_scratch(unsigned long dev_hdl, + struct gsi_device_scratch *val) +{ + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!gsi_ctx->per_registered) { + GSIERR("no client registered\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (dev_hdl != (uintptr_t)gsi_ctx) { + GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl, + gsi_ctx); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (val->max_usb_pkt_size_valid && + val->max_usb_pkt_size != 1024 && + val->max_usb_pkt_size != 512) { + GSIERR("bad USB max pkt size dev_hdl=0x%lx sz=%u\n", dev_hdl, + val->max_usb_pkt_size); + return -GSI_STATUS_INVALID_PARAMS; + } + + mutex_lock(&gsi_ctx->mlock); + if (val->mhi_base_chan_idx_valid) + gsi_ctx->scratch.word0.s.mhi_base_chan_idx = + val->mhi_base_chan_idx; + if (val->max_usb_pkt_size_valid) + gsi_ctx->scratch.word0.s.max_usb_pkt_size = + (val->max_usb_pkt_size == 1024) ? 1 : 0; + gsi_writel(gsi_ctx->scratch.word0.val, + gsi_ctx->base + + GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee)); + mutex_unlock(&gsi_ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_write_device_scratch); + +int gsi_deregister_device(unsigned long dev_hdl, bool force) +{ + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!gsi_ctx->per_registered) { + GSIERR("no client registered\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (dev_hdl != (uintptr_t)gsi_ctx) { + GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl, + gsi_ctx); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (!force && atomic_read(&gsi_ctx->num_chan)) { + GSIERR("cannot deregister %u channels are still connected\n", + atomic_read(&gsi_ctx->num_chan)); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (!force && atomic_read(&gsi_ctx->num_evt_ring)) { + GSIERR("cannot deregister %u events are still connected\n", + atomic_read(&gsi_ctx->num_evt_ring)); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + /* disable all interrupts */ + __gsi_config_type_irq(gsi_ctx->per.ee, ~0, 0); + __gsi_config_ch_irq(gsi_ctx->per.ee, ~0, 0); + __gsi_config_evt_irq(gsi_ctx->per.ee, ~0, 0); + __gsi_config_ieob_irq(gsi_ctx->per.ee, ~0, 0); + __gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0); + __gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0); + + devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx); + devm_iounmap(gsi_ctx->dev, gsi_ctx->base); + memset(gsi_ctx, 0, sizeof(*gsi_ctx)); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_deregister_device); + +static void gsi_program_evt_ring_ctx(struct gsi_evt_ring_props *props, + uint8_t evt_id, unsigned int ee) +{ + uint32_t val; + + GSIDBG("intf=%u intr=%u re=%u\n", props->intf, props->intr, + props->re_size); + + val = (((props->intf << GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT) & + GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK) | + ((props->intr << GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT) & + GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK) | + ((props->re_size << GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT) + & GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK)); + + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(evt_id, ee)); + + val = (props->ring_len & GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(evt_id, ee)); + + val = (props->ring_base_addr & + GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(evt_id, ee)); + + val = ((props->ring_base_addr >> 32) & + GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(evt_id, ee)); + + val = (((props->int_modt << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT) & + GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK) | + ((props->int_modc << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT) & + GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(evt_id, ee)); + + val = (props->intvec & GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(evt_id, ee)); + + val = (props->msi_addr & GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(evt_id, ee)); + + val = ((props->msi_addr >> 32) & + GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(evt_id, ee)); + + val = (props->rp_update_addr & + GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(evt_id, ee)); + + val = ((props->rp_update_addr >> 32) & + GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK) << + GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(evt_id, ee)); +} + +static void gsi_init_evt_ring(struct gsi_evt_ring_props *props, + struct gsi_ring_ctx *ctx) +{ + ctx->base_va = (uintptr_t)props->ring_base_vaddr; + ctx->base = props->ring_base_addr; + ctx->wp = ctx->base; + ctx->rp = ctx->base; + ctx->wp_local = ctx->base; + ctx->rp_local = ctx->base; + ctx->len = props->ring_len; + ctx->elem_sz = props->re_size; + ctx->max_num_elem = ctx->len / ctx->elem_sz - 1; + ctx->end = ctx->base + (ctx->max_num_elem + 1) * ctx->elem_sz; +} + +static void gsi_prime_evt_ring(struct gsi_evt_ctx *ctx) +{ + unsigned long flags; + + spin_lock_irqsave(&ctx->ring.slock, flags); + memset((void *)ctx->ring.base_va, 0, ctx->ring.len); + ctx->ring.wp_local = ctx->ring.base + + ctx->ring.max_num_elem * ctx->ring.elem_sz; + gsi_ring_evt_doorbell(ctx); + spin_unlock_irqrestore(&ctx->ring.slock, flags); +} + +static int gsi_validate_evt_ring_props(struct gsi_evt_ring_props *props) +{ + uint64_t ra; + + if ((props->re_size == GSI_EVT_RING_RE_SIZE_4B && + props->ring_len % 4) || + (props->re_size == GSI_EVT_RING_RE_SIZE_16B && + props->ring_len % 16)) { + GSIERR("bad params ring_len %u not a multiple of RE size %u\n", + props->ring_len, props->re_size); + return -GSI_STATUS_INVALID_PARAMS; + } + + ra = props->ring_base_addr; + do_div(ra, roundup_pow_of_two(props->ring_len)); + + if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) { + GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n", + props->ring_base_addr, + roundup_pow_of_two(props->ring_len)); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->intf == GSI_EVT_CHTYPE_GPI_EV && + !props->ring_base_vaddr) { + GSIERR("protocol %u requires ring base VA\n", props->intf); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->intf == GSI_EVT_CHTYPE_MHI_EV && + (!props->evchid_valid || + props->evchid > GSI_MHI_ER_END || + props->evchid < GSI_MHI_ER_START)) { + GSIERR("MHI requires evchid valid=%d val=%u\n", + props->evchid_valid, props->evchid); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->intf != GSI_EVT_CHTYPE_MHI_EV && + props->evchid_valid) { + GSIERR("protocol %u cannot specify evchid\n", props->intf); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (!props->err_cb) { + GSIERR("err callback must be provided\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + return GSI_STATUS_SUCCESS; +} + +int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl, + unsigned long *evt_ring_hdl) +{ + unsigned long evt_id; + enum gsi_evt_ch_cmd_opcode op = GSI_EVT_ALLOCATE; + uint32_t val; + struct gsi_evt_ctx *ctx; + int res; + int ee; + unsigned long flags; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!props || !evt_ring_hdl || dev_hdl != (uintptr_t)gsi_ctx) { + GSIERR("bad params props=%pK dev_hdl=0x%lx evt_ring_hdl=%pK\n", + props, dev_hdl, evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (gsi_validate_evt_ring_props(props)) { + GSIERR("invalid params\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (!props->evchid_valid) { + mutex_lock(&gsi_ctx->mlock); + evt_id = find_first_zero_bit(&gsi_ctx->evt_bmap, + sizeof(unsigned long) * BITS_PER_BYTE); + if (evt_id == sizeof(unsigned long) * BITS_PER_BYTE) { + GSIERR("failed to alloc event ID\n"); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + set_bit(evt_id, &gsi_ctx->evt_bmap); + mutex_unlock(&gsi_ctx->mlock); + } else { + evt_id = props->evchid; + } + GSIDBG("Using %lu as virt evt id\n", evt_id); + + ctx = &gsi_ctx->evtr[evt_id]; + memset(ctx, 0, sizeof(*ctx)); + mutex_init(&ctx->mlock); + init_completion(&ctx->compl); + atomic_set(&ctx->chan_ref_cnt, 0); + ctx->props = *props; + + mutex_lock(&gsi_ctx->mlock); + val = (((evt_id << GSI_EE_n_EV_CH_CMD_CHID_SHFT) & + GSI_EE_n_EV_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_EV_CH_CMD_OPCODE_BMSK)); + ee = gsi_ctx->per.ee; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_CMD_OFFS(ee)); + res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT); + if (res == 0) { + GSIERR("evt_id=%lu timed out\n", evt_id); + if (!props->evchid_valid) + clear_bit(evt_id, &gsi_ctx->evt_bmap); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_TIMED_OUT; + } + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("evt_id=%lu allocation failed state=%u\n", + evt_id, ctx->state); + if (!props->evchid_valid) + clear_bit(evt_id, &gsi_ctx->evt_bmap); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + + gsi_program_evt_ring_ctx(props, evt_id, gsi_ctx->per.ee); + + spin_lock_init(&ctx->ring.slock); + gsi_init_evt_ring(props, &ctx->ring); + + ctx->id = evt_id; + *evt_ring_hdl = evt_id; + atomic_inc(&gsi_ctx->num_evt_ring); + if (props->intf == GSI_EVT_CHTYPE_GPI_EV) + gsi_prime_evt_ring(ctx); + mutex_unlock(&gsi_ctx->mlock); + + spin_lock_irqsave(&gsi_ctx->slock, flags); + gsi_writel(1 << evt_id, gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee)); + + /* enable ieob interrupts for GPI, enable MSI interrupts */ + if ((props->intf != GSI_EVT_CHTYPE_GPI_EV) && + (props->intr != GSI_INTR_MSI)) + __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << evt_id, 0); + else + __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->id, ~0); + spin_unlock_irqrestore(&gsi_ctx->slock, flags); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_alloc_evt_ring); + +static void __gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl, + union __packed gsi_evt_scratch val) +{ + gsi_writel(val.data.word1, gsi_ctx->base + + GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(evt_ring_hdl, + gsi_ctx->per.ee)); + gsi_writel(val.data.word2, gsi_ctx->base + + GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(evt_ring_hdl, + gsi_ctx->per.ee)); +} + +int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl, + union __packed gsi_evt_scratch val) +{ + struct gsi_evt_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("bad state %d\n", + gsi_ctx->evtr[evt_ring_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&ctx->mlock); + ctx->scratch = val; + __gsi_write_evt_ring_scratch(evt_ring_hdl, val); + mutex_unlock(&ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_write_evt_ring_scratch); + +int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl) +{ + uint32_t val; + enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC; + struct gsi_evt_ctx *ctx; + int res; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (atomic_read(&ctx->chan_ref_cnt)) { + GSIERR("%d channels still using this event ring\n", + atomic_read(&ctx->chan_ref_cnt)); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&ctx->compl); + val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) & + GSI_EE_n_EV_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_EV_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee)); + res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT); + if (res == 0) { + GSIERR("evt_id=%lu timed out\n", evt_ring_hdl); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_TIMED_OUT; + } + + if (ctx->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { + GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl, + ctx->state); + /* + * IPA Hardware returned GSI RING not allocated, which is + * unexpected hardware state. + */ + BUG(); + } + mutex_unlock(&gsi_ctx->mlock); + + if (!ctx->props.evchid_valid) { + mutex_lock(&gsi_ctx->mlock); + clear_bit(evt_ring_hdl, &gsi_ctx->evt_bmap); + mutex_unlock(&gsi_ctx->mlock); + } + atomic_dec(&gsi_ctx->num_evt_ring); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_dealloc_evt_ring); + +int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl, + uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb) +{ + struct gsi_evt_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!db_addr_wp_msb || !db_addr_wp_lsb) { + GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb, + db_addr_wp_lsb); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("bad state %d\n", + gsi_ctx->evtr[evt_ring_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + *db_addr_wp_lsb = gsi_ctx->per.phys_addr + + GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(evt_ring_hdl, gsi_ctx->per.ee); + *db_addr_wp_msb = gsi_ctx->per.phys_addr + + GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(evt_ring_hdl, gsi_ctx->per.ee); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_query_evt_ring_db_addr); + +int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value) +{ + struct gsi_evt_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("bad state %d\n", + gsi_ctx->evtr[evt_ring_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + ctx->ring.wp_local = value; + gsi_ring_evt_doorbell(ctx); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_ring_evt_ring_db); + +int gsi_reset_evt_ring(unsigned long evt_ring_hdl) +{ + uint32_t val; + enum gsi_evt_ch_cmd_opcode op = GSI_EVT_RESET; + struct gsi_evt_ctx *ctx; + int res; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&ctx->compl); + val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) & + GSI_EE_n_EV_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_EV_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee)); + res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT); + if (res == 0) { + GSIERR("evt_id=%lu timed out\n", evt_ring_hdl); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_TIMED_OUT; + } + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl, + ctx->state); + /* + * IPA Hardware returned GSI RING not allocated, which is + * unexpected. Indicates hardware instability. + */ + BUG(); + } + + gsi_program_evt_ring_ctx(&ctx->props, evt_ring_hdl, gsi_ctx->per.ee); + gsi_init_evt_ring(&ctx->props, &ctx->ring); + + /* restore scratch */ + __gsi_write_evt_ring_scratch(evt_ring_hdl, ctx->scratch); + + if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV) + gsi_prime_evt_ring(ctx); + mutex_unlock(&gsi_ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_reset_evt_ring); + +int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl, + struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr) +{ + struct gsi_evt_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!props || !scr) { + GSIERR("bad params props=%pK scr=%pK\n", props, scr); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (ctx->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&ctx->mlock); + *props = ctx->props; + *scr = ctx->scratch; + mutex_unlock(&ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_get_evt_ring_cfg); + +int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl, + struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr) +{ + struct gsi_evt_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!props || gsi_validate_evt_ring_props(props)) { + GSIERR("bad params props=%pK\n", props); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (ctx->props.exclusive != props->exclusive) { + GSIERR("changing immutable fields not supported\n"); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&ctx->mlock); + ctx->props = *props; + if (scr) + ctx->scratch = *scr; + mutex_unlock(&ctx->mlock); + + return gsi_reset_evt_ring(evt_ring_hdl); +} +EXPORT_SYMBOL(gsi_set_evt_ring_cfg); + +static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee, + uint8_t erindex) +{ + uint32_t val; + + val = (((props->prot << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT) + & GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK) | + ((props->dir << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT) & + GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK) | + ((erindex << GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT) & + GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK) | + ((props->re_size << GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT) + & GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(props->ch_id, ee)); + + val = (props->ring_len & GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK) << + GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(props->ch_id, ee)); + + val = (props->ring_base_addr & + GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) << + GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(props->ch_id, ee)); + + val = ((props->ring_base_addr >> 32) & + GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) << + GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT; + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(props->ch_id, ee)); + + val = (((props->low_weight << GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) & + GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) | + ((props->max_prefetch << + GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) & + GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) | + ((props->use_db_eng << GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) & + GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee)); +} + +static void gsi_init_chan_ring(struct gsi_chan_props *props, + struct gsi_ring_ctx *ctx) +{ + ctx->base_va = (uintptr_t)props->ring_base_vaddr; + ctx->base = props->ring_base_addr; + ctx->wp = ctx->base; + ctx->rp = ctx->base; + ctx->wp_local = ctx->base; + ctx->rp_local = ctx->base; + ctx->len = props->ring_len; + ctx->elem_sz = props->re_size; + ctx->max_num_elem = ctx->len / ctx->elem_sz - 1; + ctx->end = ctx->base + (ctx->max_num_elem + 1) * + ctx->elem_sz; +} + +static int gsi_validate_channel_props(struct gsi_chan_props *props) +{ + uint64_t ra; + uint64_t last; + + if (props->ch_id >= gsi_ctx->max_ch) { + GSIERR("ch_id %u invalid\n", props->ch_id); + return -GSI_STATUS_INVALID_PARAMS; + } + + if ((props->re_size == GSI_CHAN_RE_SIZE_4B && + props->ring_len % 4) || + (props->re_size == GSI_CHAN_RE_SIZE_16B && + props->ring_len % 16) || + (props->re_size == GSI_CHAN_RE_SIZE_32B && + props->ring_len % 32)) { + GSIERR("bad params ring_len %u not a multiple of re size %u\n", + props->ring_len, props->re_size); + return -GSI_STATUS_INVALID_PARAMS; + } + + ra = props->ring_base_addr; + do_div(ra, roundup_pow_of_two(props->ring_len)); + + if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) { + GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n", + props->ring_base_addr, + roundup_pow_of_two(props->ring_len)); + return -GSI_STATUS_INVALID_PARAMS; + } + + last = props->ring_base_addr + props->ring_len - props->re_size; + + /* MSB should stay same within the ring */ + if ((props->ring_base_addr & 0xFFFFFFFF00000000ULL) != + (last & 0xFFFFFFFF00000000ULL)) { + GSIERR("MSB is not fixed on ring base 0x%llx size 0x%x\n", + props->ring_base_addr, + props->ring_len); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->prot == GSI_CHAN_PROT_GPI && + !props->ring_base_vaddr) { + GSIERR("protocol %u requires ring base VA\n", props->prot); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->low_weight > GSI_MAX_CH_LOW_WEIGHT) { + GSIERR("invalid channel low weight %u\n", props->low_weight); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->prot == GSI_CHAN_PROT_GPI && !props->xfer_cb) { + GSIERR("xfer callback must be provided\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (!props->err_cb) { + GSIERR("err callback must be provided\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + return GSI_STATUS_SUCCESS; +} + +int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl, + unsigned long *chan_hdl) +{ + struct gsi_chan_ctx *ctx; + uint32_t val; + int res; + int ee; + enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE; + uint8_t erindex; + void **user_data; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!props || !chan_hdl || dev_hdl != (uintptr_t)gsi_ctx) { + GSIERR("bad params props=%pK dev_hdl=0x%lx chan_hdl=%pK\n", + props, dev_hdl, chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (gsi_validate_channel_props(props)) { + GSIERR("bad params\n"); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (props->evt_ring_hdl != ~0) { + if (props->evt_ring_hdl >= GSI_EVT_RING_MAX) { + GSIERR("invalid evt ring=%lu\n", props->evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (atomic_read( + &gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) && + gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive) { + GSIERR("evt ring=%lu exclusively used by ch_hdl=%pK\n", + props->evt_ring_hdl, chan_hdl); + return -GSI_STATUS_UNSUPPORTED_OP; + } + } + + ctx = &gsi_ctx->chan[props->ch_id]; + if (ctx->allocated) { + GSIERR("chan %d already allocated\n", props->ch_id); + return -GSI_STATUS_NODEV; + } + + memset(ctx, 0, sizeof(*ctx)); + user_data = devm_kzalloc(gsi_ctx->dev, + (props->ring_len / props->re_size) * sizeof(void *), + GFP_KERNEL); + if (user_data == NULL) { + GSIERR("context not allocated\n"); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + + mutex_init(&ctx->mlock); + init_completion(&ctx->compl); + atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK); + ctx->props = *props; + + mutex_lock(&gsi_ctx->mlock); + ee = gsi_ctx->per.ee; + gsi_ctx->ch_dbg[props->ch_id].ch_allocate++; + val = (((props->ch_id << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) & + GSI_EE_n_GSI_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_CMD_OFFS(ee)); + res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT); + if (res == 0) { + GSIERR("chan_hdl=%u timed out\n", props->ch_id); + mutex_unlock(&gsi_ctx->mlock); + devm_kfree(gsi_ctx->dev, user_data); + return -GSI_STATUS_TIMED_OUT; + } + if (ctx->state != GSI_CHAN_STATE_ALLOCATED) { + GSIERR("chan_hdl=%u allocation failed state=%d\n", + props->ch_id, ctx->state); + mutex_unlock(&gsi_ctx->mlock); + devm_kfree(gsi_ctx->dev, user_data); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + mutex_unlock(&gsi_ctx->mlock); + + erindex = props->evt_ring_hdl != ~0 ? props->evt_ring_hdl : + GSI_NO_EVT_ERINDEX; + if (erindex != GSI_NO_EVT_ERINDEX) { + ctx->evtr = &gsi_ctx->evtr[erindex]; + atomic_inc(&ctx->evtr->chan_ref_cnt); + if (ctx->evtr->props.exclusive) + ctx->evtr->chan = ctx; + } + + gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex); + + spin_lock_init(&ctx->ring.slock); + gsi_init_chan_ring(props, &ctx->ring); + if (!props->max_re_expected) + ctx->props.max_re_expected = ctx->ring.max_num_elem; + ctx->user_data = user_data; + *chan_hdl = props->ch_id; + ctx->allocated = true; + ctx->stats.dp.last_timestamp = jiffies_to_msecs(jiffies); + atomic_inc(&gsi_ctx->num_chan); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_alloc_channel); + +static void __gsi_write_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch val) +{ + uint32_t reg; + + gsi_writel(val.data.word1, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl, + gsi_ctx->per.ee)); + gsi_writel(val.data.word2, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl, + gsi_ctx->per.ee)); + gsi_writel(val.data.word3, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl, + gsi_ctx->per.ee)); + /* below sequence is not atomic. assumption is sequencer specific fields + * will remain unchanged across this sequence + */ + reg = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl, + gsi_ctx->per.ee)); + reg &= 0xFFFF; + reg |= (val.data.word4 & 0xFFFF0000); + gsi_writel(reg, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl, + gsi_ctx->per.ee)); +} + +int gsi_write_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch val) +{ + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED && + gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) { + GSIERR("bad state %d\n", + gsi_ctx->chan[chan_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + mutex_lock(&ctx->mlock); + ctx->scratch = val; + __gsi_write_channel_scratch(chan_hdl, val); + mutex_unlock(&ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_write_channel_scratch); + +int gsi_query_channel_db_addr(unsigned long chan_hdl, + uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb) +{ + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!db_addr_wp_msb || !db_addr_wp_lsb) { + GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb, + db_addr_wp_lsb); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) { + GSIERR("bad state %d\n", + gsi_ctx->chan[chan_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + *db_addr_wp_lsb = gsi_ctx->per.phys_addr + + GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(chan_hdl, gsi_ctx->per.ee); + *db_addr_wp_msb = gsi_ctx->per.phys_addr + + GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(chan_hdl, gsi_ctx->per.ee); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_query_channel_db_addr); + +int gsi_start_channel(unsigned long chan_hdl) +{ + enum gsi_ch_cmd_opcode op = GSI_CH_START; + int res; + uint32_t val; + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state != GSI_CHAN_STATE_ALLOCATED && + ctx->state != GSI_CHAN_STATE_STOP_IN_PROC && + ctx->state != GSI_CHAN_STATE_STOPPED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&ctx->compl); + + gsi_ctx->ch_dbg[chan_hdl].ch_start++; + val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) & + GSI_EE_n_GSI_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee)); + res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT); + if (res == 0) { + GSIERR("chan_hdl=%lu timed out\n", chan_hdl); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_TIMED_OUT; + } + if (ctx->state != GSI_CHAN_STATE_STARTED) { + GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state); + /* + * Hardware returned unexpected status, unexpected + * hardware state. + */ + BUG(); + } + + mutex_unlock(&gsi_ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_start_channel); + +int gsi_stop_channel(unsigned long chan_hdl) +{ + enum gsi_ch_cmd_opcode op = GSI_CH_STOP; + int res; + uint32_t val; + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state == GSI_CHAN_STATE_STOPPED) { + GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl); + return GSI_STATUS_SUCCESS; + } + + if (ctx->state != GSI_CHAN_STATE_STARTED && + ctx->state != GSI_CHAN_STATE_STOP_IN_PROC && + ctx->state != GSI_CHAN_STATE_ERROR) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&ctx->compl); + + gsi_ctx->ch_dbg[chan_hdl].ch_stop++; + val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) & + GSI_EE_n_GSI_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee)); + res = wait_for_completion_timeout(&ctx->compl, + msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS)); + if (res == 0) { + GSIDBG("chan_hdl=%lu timed out\n", chan_hdl); + res = -GSI_STATUS_TIMED_OUT; + goto free_lock; + } + + if (ctx->state != GSI_CHAN_STATE_STOPPED && + ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) { + GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state); + res = -GSI_STATUS_BAD_STATE; + goto free_lock; + } + + if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) { + GSIERR("chan=%lu busy try again\n", chan_hdl); + res = -GSI_STATUS_AGAIN; + goto free_lock; + } + + res = GSI_STATUS_SUCCESS; + +free_lock: + mutex_unlock(&gsi_ctx->mlock); + return res; +} +EXPORT_SYMBOL(gsi_stop_channel); + +int gsi_stop_db_channel(unsigned long chan_hdl) +{ + enum gsi_ch_cmd_opcode op = GSI_CH_DB_STOP; + int res; + uint32_t val; + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state == GSI_CHAN_STATE_STOPPED) { + GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl); + return GSI_STATUS_SUCCESS; + } + + if (ctx->state != GSI_CHAN_STATE_STARTED && + ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&ctx->compl); + + gsi_ctx->ch_dbg[chan_hdl].ch_db_stop++; + val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) & + GSI_EE_n_GSI_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee)); + res = wait_for_completion_timeout(&ctx->compl, + msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS)); + if (res == 0) { + GSIERR("chan_hdl=%lu timed out\n", chan_hdl); + res = -GSI_STATUS_TIMED_OUT; + goto free_lock; + } + + if (ctx->state != GSI_CHAN_STATE_STOPPED && + ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) { + GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state); + res = -GSI_STATUS_BAD_STATE; + goto free_lock; + } + + if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) { + GSIERR("chan=%lu busy try again\n", chan_hdl); + res = -GSI_STATUS_AGAIN; + goto free_lock; + } + + res = GSI_STATUS_SUCCESS; + +free_lock: + mutex_unlock(&gsi_ctx->mlock); + return res; +} +EXPORT_SYMBOL(gsi_stop_db_channel); + +int gsi_reset_channel(unsigned long chan_hdl) +{ + enum gsi_ch_cmd_opcode op = GSI_CH_RESET; + int res; + uint32_t val; + struct gsi_chan_ctx *ctx; + bool reset_done = false; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state != GSI_CHAN_STATE_STOPPED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&gsi_ctx->mlock); + +reset: + reinit_completion(&ctx->compl); + gsi_ctx->ch_dbg[chan_hdl].ch_reset++; + val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) & + GSI_EE_n_GSI_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee)); + res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT); + if (res == 0) { + GSIERR("chan_hdl=%lu timed out\n", chan_hdl); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_TIMED_OUT; + } + + if (ctx->state != GSI_CHAN_STATE_ALLOCATED) { + GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl, + ctx->state); + /* + * Hardware returned incorrect state, unexpected + * hardware state. + */ + BUG(); + } + + /* workaround: reset GSI producers again */ + if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) { + usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP); + reset_done = true; + goto reset; + } + + gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee, + ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX); + gsi_init_chan_ring(&ctx->props, &ctx->ring); + + /* restore scratch */ + __gsi_write_channel_scratch(chan_hdl, ctx->scratch); + + mutex_unlock(&gsi_ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_reset_channel); + +int gsi_dealloc_channel(unsigned long chan_hdl) +{ + enum gsi_ch_cmd_opcode op = GSI_CH_DE_ALLOC; + int res; + uint32_t val; + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state != GSI_CHAN_STATE_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&ctx->compl); + + gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++; + val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) & + GSI_EE_n_GSI_CH_CMD_CHID_BMSK) | + ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee)); + res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT); + if (res == 0) { + GSIERR("chan_hdl=%lu timed out\n", chan_hdl); + mutex_unlock(&gsi_ctx->mlock); + return -GSI_STATUS_TIMED_OUT; + } + if (ctx->state != GSI_CHAN_STATE_NOT_ALLOCATED) { + GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl, + ctx->state); + /* Hardware returned incorrect value */ + BUG(); + } + + mutex_unlock(&gsi_ctx->mlock); + + devm_kfree(gsi_ctx->dev, ctx->user_data); + ctx->allocated = false; + if (ctx->evtr) + atomic_dec(&ctx->evtr->chan_ref_cnt); + atomic_dec(&gsi_ctx->num_chan); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_dealloc_channel); + +void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used) +{ + unsigned long now = jiffies_to_msecs(jiffies); + unsigned long elapsed; + + if (used == 0) { + elapsed = now - ctx->stats.dp.last_timestamp; + if (ctx->stats.dp.empty_time < elapsed) + ctx->stats.dp.empty_time = elapsed; + } + + if (used <= ctx->props.max_re_expected / 3) + ++ctx->stats.dp.ch_below_lo; + else if (used <= 2 * ctx->props.max_re_expected / 3) + ++ctx->stats.dp.ch_below_hi; + else + ++ctx->stats.dp.ch_above_hi; + ctx->stats.dp.last_timestamp = now; +} + +static void __gsi_query_channel_free_re(struct gsi_chan_ctx *ctx, + uint16_t *num_free_re) +{ + uint16_t start; + uint16_t end; + uint64_t rp; + int ee = gsi_ctx->per.ee; + uint16_t used; + + if (!ctx->evtr) { + rp = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee)); + rp |= ctx->ring.rp & 0xFFFFFFFF00000000; + + ctx->ring.rp = rp; + } else { + rp = ctx->ring.rp_local; + } + + start = gsi_find_idx_from_addr(&ctx->ring, rp); + end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local); + + if (end >= start) + used = end - start; + else + used = ctx->ring.max_num_elem + 1 - (start - end); + + *num_free_re = ctx->ring.max_num_elem - used; +} + +int gsi_query_channel_info(unsigned long chan_hdl, + struct gsi_chan_info *info) +{ + struct gsi_chan_ctx *ctx; + spinlock_t *slock; + unsigned long flags; + uint64_t rp; + uint64_t wp; + int ee; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch || !info) { + GSIERR("bad params chan_hdl=%lu info=%pK\n", chan_hdl, info); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + if (ctx->evtr) { + slock = &ctx->evtr->ring.slock; + info->evt_valid = true; + } else { + slock = &ctx->ring.slock; + info->evt_valid = false; + } + + spin_lock_irqsave(slock, flags); + + ee = gsi_ctx->per.ee; + rp = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee)); + rp |= ((uint64_t)gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32; + ctx->ring.rp = rp; + info->rp = rp; + + wp = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee)); + wp |= ((uint64_t)gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32; + ctx->ring.wp = wp; + info->wp = wp; + + if (info->evt_valid) { + rp = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee)); + rp |= ((uint64_t)gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(ctx->evtr->id, ee))) + << 32; + info->evt_rp = rp; + + wp = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(ctx->evtr->id, ee)); + wp |= ((uint64_t)gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(ctx->evtr->id, ee))) + << 32; + info->evt_wp = wp; + } + + spin_unlock_irqrestore(slock, flags); + + GSIDBG("ch=%lu RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx EWP=0x%llx\n", + chan_hdl, info->rp, info->wp, + info->evt_valid, info->evt_rp, info->evt_wp); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_query_channel_info); + +int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty) +{ + struct gsi_chan_ctx *ctx; + spinlock_t *slock; + unsigned long flags; + uint64_t rp; + uint64_t wp; + int ee; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch || !is_empty) { + GSIERR("bad params chan_hdl=%lu is_empty=%pK\n", + chan_hdl, is_empty); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + ee = gsi_ctx->per.ee; + + if (ctx->props.prot != GSI_CHAN_PROT_GPI) { + GSIERR("op not supported for protocol %u\n", ctx->props.prot); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (ctx->evtr) + slock = &ctx->evtr->ring.slock; + else + slock = &ctx->ring.slock; + + spin_lock_irqsave(slock, flags); + + rp = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee)); + rp |= ctx->ring.rp & 0xFFFFFFFF00000000; + ctx->ring.rp = rp; + + wp = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee)); + wp |= ctx->ring.wp & 0xFFFFFFFF00000000; + ctx->ring.wp = wp; + + if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI) + *is_empty = (ctx->ring.rp_local == rp) ? true : false; + else + *is_empty = (wp == rp) ? true : false; + + spin_unlock_irqrestore(slock, flags); + + GSIDBG("ch=%lu RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n", + chan_hdl, rp, wp, ctx->ring.rp_local); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_is_channel_empty); + +int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers, + struct gsi_xfer_elem *xfer, bool ring_db) +{ + struct gsi_chan_ctx *ctx; + uint16_t free; + struct gsi_tre tre; + struct gsi_tre *tre_ptr; + uint16_t idx; + uint64_t wp_rollback; + int i; + spinlock_t *slock; + unsigned long flags; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch || !num_xfers || !xfer) { + GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n", + chan_hdl, num_xfers, xfer); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->props.prot != GSI_CHAN_PROT_GPI) { + GSIERR("op not supported for protocol %u\n", ctx->props.prot); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (ctx->evtr) + slock = &ctx->evtr->ring.slock; + else + slock = &ctx->ring.slock; + + spin_lock_irqsave(slock, flags); + __gsi_query_channel_free_re(ctx, &free); + + if (num_xfers > free) { + GSIERR("chan_hdl=%lu num_xfers=%u free=%u\n", + chan_hdl, num_xfers, free); + spin_unlock_irqrestore(slock, flags); + return -GSI_STATUS_RING_INSUFFICIENT_SPACE; + } + + wp_rollback = ctx->ring.wp_local; + for (i = 0; i < num_xfers; i++) { + memset(&tre, 0, sizeof(tre)); + tre.buffer_ptr = xfer[i].addr; + tre.buf_len = xfer[i].len; + if (xfer[i].type == GSI_XFER_ELEM_DATA) { + tre.re_type = GSI_RE_XFER; + } else if (xfer[i].type == GSI_XFER_ELEM_IMME_CMD) { + tre.re_type = GSI_RE_IMMD_CMD; + } else if (xfer[i].type == GSI_XFER_ELEM_NOP) { + tre.re_type = GSI_RE_NOP; + } else { + GSIERR("chan_hdl=%lu bad RE type=%u\n", chan_hdl, + xfer[i].type); + break; + } + tre.bei = (xfer[i].flags & GSI_XFER_FLAG_BEI) ? 1 : 0; + tre.ieot = (xfer[i].flags & GSI_XFER_FLAG_EOT) ? 1 : 0; + tre.ieob = (xfer[i].flags & GSI_XFER_FLAG_EOB) ? 1 : 0; + tre.chain = (xfer[i].flags & GSI_XFER_FLAG_CHAIN) ? 1 : 0; + + idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local); + tre_ptr = (struct gsi_tre *)(ctx->ring.base_va + + idx * ctx->ring.elem_sz); + + /* write the TRE to ring */ + *tre_ptr = tre; + ctx->user_data[idx] = xfer[i].xfer_user_data; + gsi_incr_ring_wp(&ctx->ring); + } + + if (i != num_xfers) { + /* reject all the xfers */ + ctx->ring.wp_local = wp_rollback; + spin_unlock_irqrestore(slock, flags); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx->stats.queued += num_xfers; + + /* ensure TRE is set before ringing doorbell */ + wmb(); + + if (ring_db) + gsi_ring_chan_doorbell(ctx); + + spin_unlock_irqrestore(slock, flags); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_queue_xfer); + +int gsi_start_xfer(unsigned long chan_hdl) +{ + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->props.prot != GSI_CHAN_PROT_GPI) { + GSIERR("op not supported for protocol %u\n", ctx->props.prot); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (ctx->state != GSI_CHAN_STATE_STARTED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (ctx->ring.wp == ctx->ring.wp_local) + return GSI_STATUS_SUCCESS; + + gsi_ring_chan_doorbell(ctx); + + return GSI_STATUS_SUCCESS; +}; +EXPORT_SYMBOL(gsi_start_xfer); + +int gsi_poll_channel(unsigned long chan_hdl, + struct gsi_chan_xfer_notify *notify) +{ + struct gsi_chan_ctx *ctx; + uint64_t rp; + int ee; + unsigned long flags; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch || !notify) { + GSIERR("bad param chan_hdl=%lu notify=%pK\n", chan_hdl, notify); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + ee = gsi_ctx->per.ee; + + if (ctx->props.prot != GSI_CHAN_PROT_GPI) { + GSIERR("op not supported for protocol %u\n", ctx->props.prot); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (!ctx->evtr) { + GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + spin_lock_irqsave(&ctx->evtr->ring.slock, flags); + if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) { + /* update rp to see of we have anything new to process */ + rp = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee)); + rp |= ctx->ring.rp & 0xFFFFFFFF00000000; + + ctx->evtr->ring.rp = rp; + } + + if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) { + spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags); + ctx->stats.poll_empty++; + return GSI_STATUS_POLL_EMPTY; + } + + gsi_process_evt_re(ctx->evtr, notify, false); + spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags); + ctx->stats.poll_ok++; + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_poll_channel); + +int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode) +{ + struct gsi_chan_ctx *ctx; + enum gsi_chan_mode curr; + unsigned long flags; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->props.prot != GSI_CHAN_PROT_GPI) { + GSIERR("op not supported for protocol %u\n", ctx->props.prot); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (!ctx->evtr || !ctx->evtr->props.exclusive) { + GSIERR("cannot configure mode on chan_hdl=%lu\n", + chan_hdl); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (atomic_read(&ctx->poll_mode)) + curr = GSI_CHAN_MODE_POLL; + else + curr = GSI_CHAN_MODE_CALLBACK; + + if (mode == curr) { + GSIERR("already in requested mode %u chan_hdl=%lu\n", + curr, chan_hdl); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + spin_lock_irqsave(&gsi_ctx->slock, flags); + if (curr == GSI_CHAN_MODE_CALLBACK && + mode == GSI_CHAN_MODE_POLL) { + __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0); + ctx->stats.callback_to_poll++; + } + + if (curr == GSI_CHAN_MODE_POLL && + mode == GSI_CHAN_MODE_CALLBACK) { + __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0); + ctx->stats.poll_to_callback++; + } + atomic_set(&ctx->poll_mode, mode); + spin_unlock_irqrestore(&gsi_ctx->slock, flags); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_config_channel_mode); + +int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props, + union gsi_channel_scratch *scr) +{ + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!props || !scr) { + GSIERR("bad params props=%pK scr=%pK\n", props, scr); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&ctx->mlock); + *props = ctx->props; + *scr = ctx->scratch; + mutex_unlock(&ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_get_channel_cfg); + +int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props, + union gsi_channel_scratch *scr) +{ + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (!props || gsi_validate_channel_props(props)) { + GSIERR("bad params props=%pK\n", props); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + if (ctx->state != GSI_CHAN_STATE_ALLOCATED) { + GSIERR("bad state %d\n", ctx->state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + if (ctx->props.ch_id != props->ch_id || + ctx->props.evt_ring_hdl != props->evt_ring_hdl) { + GSIERR("changing immutable fields not supported\n"); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + mutex_lock(&ctx->mlock); + ctx->props = *props; + if (scr) + ctx->scratch = *scr; + gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee, + ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX); + gsi_init_chan_ring(&ctx->props, &ctx->ring); + + /* restore scratch */ + __gsi_write_channel_scratch(chan_hdl, ctx->scratch); + mutex_unlock(&ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_set_channel_cfg); + +static void gsi_configure_ieps(void *base) +{ + void __iomem *gsi_base = base; + + gsi_writel(1, gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS); + gsi_writel(2, gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS); + gsi_writel(3, gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS); + gsi_writel(4, gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS); + gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS); + gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS); + gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS); + gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS); + gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS); + gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS); + gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS); + gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS); + gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS); +} + +static void gsi_configure_bck_prs_matrix(void *base) +{ + void __iomem *gsi_base = base; + /* + * For now, these are default values. In the future, GSI FW image will + * produce optimized back-pressure values based on the FW image. + */ + gsi_writel(0xfffffffe, + gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffffff, + gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS); + gsi_writel(0xffffffbf, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS); + gsi_writel(0xffffefff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS); + gsi_writel(0xffffefff, + gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffffff, + gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS); + gsi_writel(0x00000000, + gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS); + gsi_writel(0x00000000, + gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS); + gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS); + gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS); + gsi_writel(0xffffffff, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS); + gsi_writel(0xfffffffe, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS); + gsi_writel(0xffffffff, gsi_base + GSI_IC_READ_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffefff, gsi_base + GSI_IC_READ_BCK_PRS_MSB_OFFS); + gsi_writel(0xffffffff, gsi_base + GSI_IC_WRITE_BCK_PRS_LSB_OFFS); + gsi_writel(0xffffdfff, gsi_base + GSI_IC_WRITE_BCK_PRS_MSB_OFFS); + gsi_writel(0xffffffff, + gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS); + gsi_writel(0xff03ffff, + gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS); +} + +int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size, + phys_addr_t per_base_addr) +{ + void __iomem *gsi_base; + + gsi_base = ioremap_nocache(gsi_base_addr, gsi_size); + if (!gsi_base) { + GSIERR("ioremap failed\n"); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + gsi_writel(0, gsi_base + GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS); + gsi_writel(per_base_addr, + gsi_base + GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS); + gsi_configure_bck_prs_matrix((void *)gsi_base); + gsi_configure_ieps(gsi_base); + iounmap(gsi_base); + + return 0; +} +EXPORT_SYMBOL(gsi_configure_regs); + +int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver) +{ + void __iomem *gsi_base; + uint32_t value; + + if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) { + GSIERR("Incorrect version %d\n", ver); + return -GSI_STATUS_ERROR; + } + + gsi_base = ioremap_nocache(gsi_base_addr, gsi_size); + if (!gsi_base) { + GSIERR("ioremap failed\n"); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + + /* Enable the MCS and set to x2 clocks */ + if (ver >= GSI_VER_1_2) { + value = ((1 << GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT) & + GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK); + gsi_writel(value, gsi_base + GSI_GSI_MCS_CFG_OFFS); + + value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) & + GSI_GSI_CFG_GSI_ENABLE_BMSK) | + ((0 << GSI_GSI_CFG_MCS_ENABLE_SHFT) & + GSI_GSI_CFG_MCS_ENABLE_BMSK) | + ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) & + GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) | + ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) & + GSI_GSI_CFG_UC_IS_MCS_BMSK) | + ((0 << GSI_GSI_CFG_GSI_PWR_CLPS_SHFT) & + GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) | + ((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) & + GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK)); + gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS); + } else { + value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) & + GSI_GSI_CFG_GSI_ENABLE_BMSK) | + ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) & + GSI_GSI_CFG_MCS_ENABLE_BMSK) | + ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) & + GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) | + ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) & + GSI_GSI_CFG_UC_IS_MCS_BMSK)); + gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS); + } + + iounmap(gsi_base); + + return 0; + +} +EXPORT_SYMBOL(gsi_enable_fw); + +void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset, + unsigned long *size) +{ + if (base_offset) + *base_offset = GSI_GSI_INST_RAM_n_OFFS(0); + if (size) + *size = GSI_GSI_INST_RAM_n_WORD_SZ * + (GSI_GSI_INST_RAM_n_MAXn + 1); +} +EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size); + +int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code) +{ + enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_HALT_CHANNEL; + uint32_t val; + int res; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_idx >= gsi_ctx->max_ch || !code) { + GSIERR("bad params chan_idx=%d\n", chan_idx); + return -GSI_STATUS_INVALID_PARAMS; + } + + mutex_lock(&gsi_ctx->mlock); + reinit_completion(&gsi_ctx->gen_ee_cmd_compl); + + /* invalidate the response */ + gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee)); + gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0; + gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base + + GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee)); + + gsi_ctx->gen_ee_cmd_dbg.halt_channel++; + val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) & + GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) | + ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) & + GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) | + ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) & + GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK)); + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee)); + + res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl, + msecs_to_jiffies(GSI_CMD_TIMEOUT)); + if (res == 0) { + GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee); + res = -GSI_STATUS_TIMED_OUT; + goto free_lock; + } + + gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee)); + if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) { + GSIERR("No response received\n"); + res = -GSI_STATUS_ERROR; + goto free_lock; + } + + res = GSI_STATUS_SUCCESS; + *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code; +free_lock: + mutex_unlock(&gsi_ctx->mlock); + + return res; +} +EXPORT_SYMBOL(gsi_halt_channel_ee); + +static int msm_gsi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + pr_debug("gsi_probe\n"); + gsi_ctx = devm_kzalloc(dev, sizeof(*gsi_ctx), GFP_KERNEL); + if (!gsi_ctx) { + dev_err(dev, "failed to allocated gsi context\n"); + return -ENOMEM; + } + + gsi_ctx->ipc_logbuf = ipc_log_context_create(GSI_IPC_LOG_PAGES, + "gsi", 0); + if (gsi_ctx->ipc_logbuf == NULL) + GSIERR("failed to create IPC log, continue...\n"); + + gsi_ctx->dev = dev; + init_completion(&gsi_ctx->gen_ee_cmd_compl); + gsi_debugfs_init(); + + return 0; +} + +static struct platform_driver msm_gsi_driver = { + .probe = msm_gsi_probe, + .driver = { + .owner = THIS_MODULE, + .name = "gsi", + .of_match_table = msm_gsi_match, + }, +}; + +/** + * Module Init. + */ +static int __init gsi_init(void) +{ + pr_debug("%s\n", __func__); + return platform_driver_register(&msm_gsi_driver); +} + +arch_initcall(gsi_init); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Generic Software Interface (GSI)"); diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h new file mode 100644 index 000000000000..e0171dca6aba --- /dev/null +++ b/drivers/platform/msm/gsi/gsi.h @@ -0,0 +1,298 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef GSI_H +#define GSI_H + +#include +#include +#include +#include +#include +#include +#include + +#define GSI_CHAN_MAX 31 +#define GSI_EVT_RING_MAX 23 +#define GSI_NO_EVT_ERINDEX 31 + +#define gsi_readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) +#define gsi_writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); }) + +#define GSI_IPC_LOGGING(buf, fmt, args...) \ + do { \ + if (buf) \ + ipc_log_string((buf), fmt, __func__, __LINE__, \ + ## args); \ + } while (0) + +#define GSIDBG(fmt, args...) \ + do { \ + dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \ + ## args);\ + if (gsi_ctx) { \ + GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \ + "%s:%d " fmt, ## args); \ + GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \ + "%s:%d " fmt, ## args); \ + } \ + } while (0) + +#define GSIDBG_LOW(fmt, args...) \ + do { \ + dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \ + ## args);\ + if (gsi_ctx) { \ + GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \ + "%s:%d " fmt, ## args); \ + } \ + } while (0) + +#define GSIERR(fmt, args...) \ + do { \ + dev_err(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \ + ## args);\ + if (gsi_ctx) { \ + GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \ + "%s:%d " fmt, ## args); \ + GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \ + "%s:%d " fmt, ## args); \ + } \ + } while (0) + +#define GSI_IPC_LOG_PAGES 50 + +enum gsi_evt_ring_state { + GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0, + GSI_EVT_RING_STATE_ALLOCATED = 0x1, + GSI_EVT_RING_STATE_ERROR = 0xf +}; + +enum gsi_chan_state { + GSI_CHAN_STATE_NOT_ALLOCATED = 0x0, + GSI_CHAN_STATE_ALLOCATED = 0x1, + GSI_CHAN_STATE_STARTED = 0x2, + GSI_CHAN_STATE_STOPPED = 0x3, + GSI_CHAN_STATE_STOP_IN_PROC = 0x4, + GSI_CHAN_STATE_ERROR = 0xf +}; + +struct gsi_ring_ctx { + spinlock_t slock; + unsigned long base_va; + uint64_t base; + uint64_t wp; + uint64_t rp; + uint64_t wp_local; + uint64_t rp_local; + uint16_t len; + uint8_t elem_sz; + uint16_t max_num_elem; + uint64_t end; +}; + +struct gsi_chan_dp_stats { + unsigned long ch_below_lo; + unsigned long ch_below_hi; + unsigned long ch_above_hi; + unsigned long empty_time; + unsigned long last_timestamp; +}; + +struct gsi_chan_stats { + unsigned long queued; + unsigned long completed; + unsigned long callback_to_poll; + unsigned long poll_to_callback; + unsigned long invalid_tre_error; + unsigned long poll_ok; + unsigned long poll_empty; + struct gsi_chan_dp_stats dp; +}; + +struct gsi_chan_ctx { + struct gsi_chan_props props; + enum gsi_chan_state state; + struct gsi_ring_ctx ring; + void **user_data; + struct gsi_evt_ctx *evtr; + struct mutex mlock; + struct completion compl; + bool allocated; + atomic_t poll_mode; + union __packed gsi_channel_scratch scratch; + struct gsi_chan_stats stats; + bool enable_dp_stats; + bool print_dp_stats; +}; + +struct gsi_evt_stats { + unsigned long completed; +}; + +struct gsi_evt_ctx { + struct gsi_evt_ring_props props; + enum gsi_evt_ring_state state; + uint8_t id; + struct gsi_ring_ctx ring; + struct mutex mlock; + struct completion compl; + struct gsi_chan_ctx *chan; + atomic_t chan_ref_cnt; + union __packed gsi_evt_scratch scratch; + struct gsi_evt_stats stats; +}; + +struct gsi_ee_scratch { + union __packed { + struct { + uint32_t inter_ee_cmd_return_code:3; + uint32_t resvd1:2; + uint32_t generic_ee_cmd_return_code:3; + uint32_t resvd2:7; + uint32_t max_usb_pkt_size:1; + uint32_t resvd3:8; + uint32_t mhi_base_chan_idx:8; + } s; + uint32_t val; + } word0; + uint32_t word1; +}; + +struct ch_debug_stats { + unsigned long ch_allocate; + unsigned long ch_start; + unsigned long ch_stop; + unsigned long ch_reset; + unsigned long ch_de_alloc; + unsigned long ch_db_stop; + unsigned long cmd_completed; +}; + +struct gsi_generic_ee_cmd_debug_stats { + unsigned long halt_channel; +}; + +struct gsi_ctx { + void __iomem *base; + struct device *dev; + struct gsi_per_props per; + bool per_registered; + struct gsi_chan_ctx chan[GSI_CHAN_MAX]; + struct ch_debug_stats ch_dbg[GSI_CHAN_MAX]; + struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX]; + struct gsi_generic_ee_cmd_debug_stats gen_ee_cmd_dbg; + struct mutex mlock; + spinlock_t slock; + unsigned long evt_bmap; + bool enabled; + atomic_t num_chan; + atomic_t num_evt_ring; + struct gsi_ee_scratch scratch; + int num_ch_dp_stats; + struct workqueue_struct *dp_stat_wq; + u32 max_ch; + u32 max_ev; + struct completion gen_ee_cmd_compl; + void *ipc_logbuf; + void *ipc_logbuf_low; +}; + +enum gsi_re_type { + GSI_RE_XFER = 0x2, + GSI_RE_IMMD_CMD = 0x3, + GSI_RE_NOP = 0x4, +}; + +struct __packed gsi_tre { + uint64_t buffer_ptr; + uint16_t buf_len; + uint16_t resvd1; + uint16_t chain:1; + uint16_t resvd4:7; + uint16_t ieob:1; + uint16_t ieot:1; + uint16_t bei:1; + uint16_t resvd3:5; + uint8_t re_type; + uint8_t resvd2; +}; + +struct __packed gsi_xfer_compl_evt { + uint64_t xfer_ptr; + uint16_t len; + uint8_t resvd1; + uint8_t code; /* see gsi_chan_evt */ + uint16_t resvd; + uint8_t type; + uint8_t chid; +}; + +enum gsi_err_type { + GSI_ERR_TYPE_GLOB = 0x1, + GSI_ERR_TYPE_CHAN = 0x2, + GSI_ERR_TYPE_EVT = 0x3, +}; + +enum gsi_err_code { + GSI_INVALID_TRE_ERR = 0x1, + GSI_OUT_OF_BUFFERS_ERR = 0x2, + GSI_OUT_OF_RESOURCES_ERR = 0x3, + GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4, + GSI_EVT_RING_EMPTY_ERR = 0x5, + GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6, + GSI_HWO_1_ERR = 0x8 +}; + +struct __packed gsi_log_err { + uint32_t arg3:4; + uint32_t arg2:4; + uint32_t arg1:4; + uint32_t code:4; + uint32_t resvd:3; + uint32_t virt_idx:5; + uint32_t err_type:4; + uint32_t ee:4; +}; + +enum gsi_ch_cmd_opcode { + GSI_CH_ALLOCATE = 0x0, + GSI_CH_START = 0x1, + GSI_CH_STOP = 0x2, + GSI_CH_RESET = 0x9, + GSI_CH_DE_ALLOC = 0xa, + GSI_CH_DB_STOP = 0xb, +}; + +enum gsi_evt_ch_cmd_opcode { + GSI_EVT_ALLOCATE = 0x0, + GSI_EVT_RESET = 0x9, + GSI_EVT_DE_ALLOC = 0xa, +}; + +enum gsi_generic_ee_cmd_opcode { + GSI_GEN_EE_CMD_HALT_CHANNEL = 0x1, +}; + +enum gsi_generic_ee_cmd_return_code { + GSI_GEN_EE_CMD_RETURN_CODE_SUCCESS = 0x1, + GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING = 0x2, + GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_DIRECTION = 0x3, + GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE = 0x4, + GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX = 0x5, +}; + +extern struct gsi_ctx *gsi_ctx; +void gsi_debugfs_init(void); +uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr); +void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used); + +#endif diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c new file mode 100644 index 000000000000..b2dd88523fcf --- /dev/null +++ b/drivers/platform/msm/gsi/gsi_dbg.c @@ -0,0 +1,916 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include "gsi_reg.h" +#include "gsi.h" + +#define TERR(fmt, args...) \ + pr_err("%s:%d " fmt, __func__, __LINE__, ## args) +#define TDBG(fmt, args...) \ + pr_debug("%s:%d " fmt, __func__, __LINE__, ## args) +#define PRT_STAT(fmt, args...) \ + pr_err(fmt, ## args) + +static struct dentry *dent; +static char dbg_buff[4096]; +static void *gsi_ipc_logbuf_low; + +static void gsi_wq_print_dp_stats(struct work_struct *work); +static DECLARE_DELAYED_WORK(gsi_print_dp_stats_work, gsi_wq_print_dp_stats); +static void gsi_wq_update_dp_stats(struct work_struct *work); +static DECLARE_DELAYED_WORK(gsi_update_dp_stats_work, gsi_wq_update_dp_stats); + +static ssize_t gsi_dump_evt(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + u32 arg1; + u32 arg2; + unsigned long missing; + char *sptr, *token; + uint32_t val; + struct gsi_evt_ctx *ctx; + uint16_t i; + + if (sizeof(dbg_buff) < count + 1) + return -EINVAL; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + + sptr = dbg_buff; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &arg1)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &arg2)) + return -EINVAL; + + TDBG("arg1=%u arg2=%u\n", arg1, arg2); + + if (arg1 >= gsi_ctx->max_ev) { + TERR("invalid evt ring id %u\n", arg1); + return -EINVAL; + } + + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX0 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX1 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX2 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX3 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX4 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX5 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX6 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX7 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX8 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX9 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX10 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX11 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX12 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d CTX13 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d SCR0 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(arg1, gsi_ctx->per.ee)); + TERR("EV%2d SCR1 0x%x\n", arg1, val); + + if (arg2) { + ctx = &gsi_ctx->evtr[arg1]; + + if (ctx->props.ring_base_vaddr) { + for (i = 0; i < ctx->props.ring_len / 16; i++) + TERR("EV%2d (0x%08llx) %08x %08x %08x %08x\n", + arg1, ctx->props.ring_base_addr + i * 16, + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 0), + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 4), + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 8), + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 12)); + } else { + TERR("No VA supplied for event ring id %u\n", arg1); + } + } + + return count; +} + +static ssize_t gsi_dump_ch(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + u32 arg1; + u32 arg2; + unsigned long missing; + char *sptr, *token; + uint32_t val; + struct gsi_chan_ctx *ctx; + uint16_t i; + + if (sizeof(dbg_buff) < count + 1) + return -EINVAL; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + + sptr = dbg_buff; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &arg1)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &arg2)) + return -EINVAL; + + TDBG("arg1=%u arg2=%u\n", arg1, arg2); + + if (arg1 >= gsi_ctx->max_ch) { + TERR("invalid chan id %u\n", arg1); + return -EINVAL; + } + + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX0 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX1 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX2 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX3 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX4 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX5 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX6 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d CTX7 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(arg1, + gsi_ctx->per.ee)); + TERR("CH%2d REFRP 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(arg1, + gsi_ctx->per.ee)); + TERR("CH%2d REFWP 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_QOS_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d QOS 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d SCR0 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d SCR1 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d SCR2 0x%x\n", arg1, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(arg1, gsi_ctx->per.ee)); + TERR("CH%2d SCR3 0x%x\n", arg1, val); + + if (arg2) { + ctx = &gsi_ctx->chan[arg1]; + + if (ctx->props.ring_base_vaddr) { + for (i = 0; i < ctx->props.ring_len / 16; i++) + TERR("CH%2d (0x%08llx) %08x %08x %08x %08x\n", + arg1, ctx->props.ring_base_addr + i * 16, + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 0), + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 4), + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 8), + *(u32 *)((u8 *)ctx->props.ring_base_vaddr + + i * 16 + 12)); + } else { + TERR("No VA supplied for chan id %u\n", arg1); + } + } + + return count; +} + +static ssize_t gsi_dump_ee(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + uint32_t val; + + val = gsi_readl(gsi_ctx->base + + GSI_GSI_MANAGER_EE_QOS_n_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d QOS 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d STATUS 0x%x\n", gsi_ctx->per.ee, val); + if (gsi_ctx->per.ver == GSI_VER_1_0) { + val = gsi_readl(gsi_ctx->base + + GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM 0x%x\n", gsi_ctx->per.ee, val); + } else if (gsi_ctx->per.ver == GSI_VER_1_2) { + val = gsi_readl(gsi_ctx->base + + GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val); + } else if (gsi_ctx->per.ver == GSI_VER_1_3) { + val = gsi_readl(gsi_ctx->base + + GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM_2 0x%x\n", gsi_ctx->per.ee, val); + } else if (gsi_ctx->per.ver == GSI_VER_2_0) { + val = gsi_readl(gsi_ctx->base + + GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM_2 0x%x\n", gsi_ctx->per.ee, val); + } else { + TERR("EE%2d Invalid Version 0x%x\n", + gsi_ctx->per.ee, + gsi_ctx->per.ver); + } + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_SW_VERSION_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d SW_VERSION 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_MCS_CODE_VER_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d MCS_CODE_VER 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d TYPE_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d CH_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d EV_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d IEOB_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d GLOB_IRQ_EN 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d GSI_IRQ_EN 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d INTSET 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_MSI_BASE_LSB_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d MSI_BASE_LSB 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_MSI_BASE_MSB_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d MSI_BASE_MSB 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_INT_VEC_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d INT_VEC 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d SCR0 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SCRATCH_1_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d SCR1 0x%x\n", gsi_ctx->per.ee, val); + + return count; +} + +static ssize_t gsi_dump_map(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct gsi_chan_ctx *ctx; + uint32_t val1; + uint32_t val2; + int i; + + TERR("EVT bitmap 0x%lx\n", gsi_ctx->evt_bmap); + for (i = 0; i < gsi_ctx->max_ch; i++) { + ctx = &gsi_ctx->chan[i]; + + if (ctx->allocated) { + TERR("VIRT CH%2d -> VIRT EV%2d\n", ctx->props.ch_id, + ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX); + val1 = gsi_readl(gsi_ctx->base + + GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_OFFS(i, + gsi_ctx->per.ee)); + TERR("VIRT CH%2d -> PHYS CH%2d\n", ctx->props.ch_id, + val1 & + GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK); + if (ctx->evtr) { + val2 = gsi_readl(gsi_ctx->base + + GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_OFFS( + ctx->evtr->id, gsi_ctx->per.ee)); + TERR("VRT EV%2d -> PHYS EV%2d\n", ctx->evtr->id, + val2 & + GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK); + } + TERR("\n"); + } + } + + return count; +} + +static void gsi_dump_ch_stats(struct gsi_chan_ctx *ctx) +{ + if (!ctx->allocated) + return; + + PRT_STAT("CH%2d:\n", ctx->props.ch_id); + PRT_STAT("queued=%lu compl=%lu\n", + ctx->stats.queued, + ctx->stats.completed); + PRT_STAT("cb->poll=%lu poll->cb=%lu\n", + ctx->stats.callback_to_poll, + ctx->stats.poll_to_callback); + PRT_STAT("invalid_tre_error=%lu\n", + ctx->stats.invalid_tre_error); + PRT_STAT("poll_ok=%lu poll_empty=%lu\n", + ctx->stats.poll_ok, ctx->stats.poll_empty); + if (ctx->evtr) + PRT_STAT("compl_evt=%lu\n", + ctx->evtr->stats.completed); + + PRT_STAT("ch_below_lo=%lu\n", ctx->stats.dp.ch_below_lo); + PRT_STAT("ch_below_hi=%lu\n", ctx->stats.dp.ch_below_hi); + PRT_STAT("ch_above_hi=%lu\n", ctx->stats.dp.ch_above_hi); + PRT_STAT("time_empty=%lums\n", ctx->stats.dp.empty_time); + PRT_STAT("\n"); +} + +static ssize_t gsi_dump_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + int ch_id; + int min, max; + + if (sizeof(dbg_buff) < count + 1) + goto error; + + if (copy_from_user(dbg_buff, buf, count)) + goto error; + + dbg_buff[count] = '\0'; + + if (kstrtos32(dbg_buff, 0, &ch_id)) + goto error; + + if (ch_id == -1) { + min = 0; + max = gsi_ctx->max_ch; + } else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch || + !gsi_ctx->chan[ch_id].allocated) { + goto error; + } else { + min = ch_id; + max = ch_id + 1; + } + + for (ch_id = min; ch_id < max; ch_id++) + gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]); + + return count; +error: + TERR("Usage: echo ch_id > stats. Use -1 for all\n"); + return -EINVAL; +} + +static int gsi_dbg_create_stats_wq(void) +{ + gsi_ctx->dp_stat_wq = + create_singlethread_workqueue("gsi_stat"); + if (!gsi_ctx->dp_stat_wq) { + TERR("failed create workqueue\n"); + return -ENOMEM; + } + + return 0; +} + +static void gsi_dbg_destroy_stats_wq(void) +{ + cancel_delayed_work_sync(&gsi_update_dp_stats_work); + cancel_delayed_work_sync(&gsi_print_dp_stats_work); + flush_workqueue(gsi_ctx->dp_stat_wq); + destroy_workqueue(gsi_ctx->dp_stat_wq); + gsi_ctx->dp_stat_wq = NULL; +} + +static ssize_t gsi_enable_dp_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + int ch_id; + bool enable; + int ret; + + if (sizeof(dbg_buff) < count + 1) + goto error; + + if (copy_from_user(dbg_buff, buf, count)) + goto error; + + dbg_buff[count] = '\0'; + + if (dbg_buff[0] != '+' && dbg_buff[0] != '-') + goto error; + + enable = (dbg_buff[0] == '+'); + + if (kstrtos32(dbg_buff + 1, 0, &ch_id)) + goto error; + + if (ch_id < 0 || ch_id >= gsi_ctx->max_ch || + !gsi_ctx->chan[ch_id].allocated) { + goto error; + } + + if (gsi_ctx->chan[ch_id].enable_dp_stats == enable) { + TERR("ch_%d: already enabled/disabled\n", ch_id); + return -EINVAL; + } + gsi_ctx->chan[ch_id].enable_dp_stats = enable; + + if (enable) + gsi_ctx->num_ch_dp_stats++; + else + gsi_ctx->num_ch_dp_stats--; + + if (enable) { + if (gsi_ctx->num_ch_dp_stats == 1) { + ret = gsi_dbg_create_stats_wq(); + if (ret) + return ret; + } + cancel_delayed_work_sync(&gsi_update_dp_stats_work); + queue_delayed_work(gsi_ctx->dp_stat_wq, + &gsi_update_dp_stats_work, msecs_to_jiffies(10)); + } else if (!enable && gsi_ctx->num_ch_dp_stats == 0) { + gsi_dbg_destroy_stats_wq(); + } + + return count; +error: + TERR("Usage: echo [+-]ch_id > enable_dp_stats\n"); + return -EINVAL; +} + +static ssize_t gsi_set_max_elem_dp_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + u32 ch_id; + u32 max_elem; + unsigned long missing; + char *sptr, *token; + + + if (sizeof(dbg_buff) < count + 1) + goto error; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + goto error; + + dbg_buff[count] = '\0'; + + sptr = dbg_buff; + + token = strsep(&sptr, " "); + if (!token) { + TERR("\n"); + goto error; + } + + if (kstrtou32(token, 0, &ch_id)) { + TERR("\n"); + goto error; + } + + token = strsep(&sptr, " "); + if (!token) { + /* get */ + if (kstrtou32(dbg_buff, 0, &ch_id)) + goto error; + if (ch_id >= gsi_ctx->max_ch) + goto error; + PRT_STAT("ch %d: max_re_expected=%d\n", ch_id, + gsi_ctx->chan[ch_id].props.max_re_expected); + return count; + } + if (kstrtou32(token, 0, &max_elem)) { + TERR("\n"); + goto error; + } + + TDBG("ch_id=%u max_elem=%u\n", ch_id, max_elem); + + if (ch_id >= gsi_ctx->max_ch) { + TERR("invalid chan id %u\n", ch_id); + goto error; + } + + gsi_ctx->chan[ch_id].props.max_re_expected = max_elem; + + return count; + +error: + TERR("Usage: (set) echo > max_elem_dp_stats\n"); + TERR("Usage: (get) echo > max_elem_dp_stats\n"); + return -EINVAL; +} + +static void gsi_wq_print_dp_stats(struct work_struct *work) +{ + int ch_id; + + for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) { + if (gsi_ctx->chan[ch_id].print_dp_stats) + gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]); + } + + queue_delayed_work(gsi_ctx->dp_stat_wq, &gsi_print_dp_stats_work, + msecs_to_jiffies(1000)); +} + +static void gsi_dbg_update_ch_dp_stats(struct gsi_chan_ctx *ctx) +{ + uint16_t start_hw; + uint16_t end_hw; + uint64_t rp_hw; + uint64_t wp_hw; + int ee = gsi_ctx->per.ee; + uint16_t used_hw; + + rp_hw = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee)); + rp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) + << 32; + + wp_hw = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee)); + wp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) + << 32; + + start_hw = gsi_find_idx_from_addr(&ctx->ring, rp_hw); + end_hw = gsi_find_idx_from_addr(&ctx->ring, wp_hw); + + if (end_hw >= start_hw) + used_hw = end_hw - start_hw; + else + used_hw = ctx->ring.max_num_elem + 1 - (start_hw - end_hw); + + TDBG("ch %d used %d\n", ctx->props.ch_id, used_hw); + gsi_update_ch_dp_stats(ctx, used_hw); +} + +static void gsi_wq_update_dp_stats(struct work_struct *work) +{ + int ch_id; + + for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) { + if (gsi_ctx->chan[ch_id].allocated && + gsi_ctx->chan[ch_id].enable_dp_stats) + gsi_dbg_update_ch_dp_stats(&gsi_ctx->chan[ch_id]); + } + + queue_delayed_work(gsi_ctx->dp_stat_wq, &gsi_update_dp_stats_work, + msecs_to_jiffies(10)); +} + + +static ssize_t gsi_rst_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + int ch_id; + int min, max; + + if (sizeof(dbg_buff) < count + 1) + goto error; + + if (copy_from_user(dbg_buff, buf, count)) + goto error; + + dbg_buff[count] = '\0'; + + if (kstrtos32(dbg_buff, 0, &ch_id)) + goto error; + + if (ch_id == -1) { + min = 0; + max = gsi_ctx->max_ch; + } else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch || + !gsi_ctx->chan[ch_id].allocated) { + goto error; + } else { + min = ch_id; + max = ch_id + 1; + } + + for (ch_id = min; ch_id < max; ch_id++) + memset(&gsi_ctx->chan[ch_id].stats, 0, + sizeof(gsi_ctx->chan[ch_id].stats)); + + return count; +error: + TERR("Usage: echo ch_id > rst_stats. Use -1 for all\n"); + return -EINVAL; +} + +static ssize_t gsi_print_dp_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + int ch_id; + bool enable; + int ret; + + if (sizeof(dbg_buff) < count + 1) + goto error; + + if (copy_from_user(dbg_buff, buf, count)) + goto error; + + dbg_buff[count] = '\0'; + + if (dbg_buff[0] != '+' && dbg_buff[0] != '-') + goto error; + + enable = (dbg_buff[0] == '+'); + + if (kstrtos32(dbg_buff + 1, 0, &ch_id)) + goto error; + + if (ch_id < 0 || ch_id >= gsi_ctx->max_ch || + !gsi_ctx->chan[ch_id].allocated) { + goto error; + } + + if (gsi_ctx->chan[ch_id].print_dp_stats == enable) { + TERR("ch_%d: already enabled/disabled\n", ch_id); + return -EINVAL; + } + gsi_ctx->chan[ch_id].print_dp_stats = enable; + + if (enable) + gsi_ctx->num_ch_dp_stats++; + else + gsi_ctx->num_ch_dp_stats--; + + if (enable) { + if (gsi_ctx->num_ch_dp_stats == 1) { + ret = gsi_dbg_create_stats_wq(); + if (ret) + return ret; + } + cancel_delayed_work_sync(&gsi_print_dp_stats_work); + queue_delayed_work(gsi_ctx->dp_stat_wq, + &gsi_print_dp_stats_work, msecs_to_jiffies(10)); + } else if (!enable && gsi_ctx->num_ch_dp_stats == 0) { + gsi_dbg_destroy_stats_wq(); + } + + return count; +error: + TERR("Usage: echo [+-]ch_id > print_dp_stats\n"); + return -EINVAL; +} + +static ssize_t gsi_enable_ipc_low(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EINVAL; + + mutex_lock(&gsi_ctx->mlock); + if (option) { + if (!gsi_ipc_logbuf_low) { + gsi_ipc_logbuf_low = + ipc_log_context_create(GSI_IPC_LOG_PAGES, + "gsi_low", 0); + if (gsi_ipc_logbuf_low == NULL) + TERR("failed to get ipc_logbuf_low\n"); + } + gsi_ctx->ipc_logbuf_low = gsi_ipc_logbuf_low; + } else { + gsi_ctx->ipc_logbuf_low = NULL; + } + mutex_unlock(&gsi_ctx->mlock); + + return count; +} + +const struct file_operations gsi_ev_dump_ops = { + .write = gsi_dump_evt, +}; + +const struct file_operations gsi_ch_dump_ops = { + .write = gsi_dump_ch, +}; + +const struct file_operations gsi_ee_dump_ops = { + .write = gsi_dump_ee, +}; + +const struct file_operations gsi_map_ops = { + .write = gsi_dump_map, +}; + +const struct file_operations gsi_stats_ops = { + .write = gsi_dump_stats, +}; + +const struct file_operations gsi_enable_dp_stats_ops = { + .write = gsi_enable_dp_stats, +}; + +const struct file_operations gsi_max_elem_dp_stats_ops = { + .write = gsi_set_max_elem_dp_stats, +}; + +const struct file_operations gsi_rst_stats_ops = { + .write = gsi_rst_stats, +}; + +const struct file_operations gsi_print_dp_stats_ops = { + .write = gsi_print_dp_stats, +}; + +const struct file_operations gsi_ipc_low_ops = { + .write = gsi_enable_ipc_low, +}; + +void gsi_debugfs_init(void) +{ + static struct dentry *dfile; + const mode_t read_only_mode = 0444; + const mode_t write_only_mode = 0220; + + dent = debugfs_create_dir("gsi", 0); + if (IS_ERR(dent)) { + TERR("fail to create dir\n"); + return; + } + + dfile = debugfs_create_file("ev_dump", write_only_mode, + dent, 0, &gsi_ev_dump_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create ev_dump file\n"); + goto fail; + } + + dfile = debugfs_create_file("ch_dump", write_only_mode, + dent, 0, &gsi_ch_dump_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create ch_dump file\n"); + goto fail; + } + + dfile = debugfs_create_file("ee_dump", read_only_mode, dent, + 0, &gsi_ee_dump_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create ee_dump file\n"); + goto fail; + } + + dfile = debugfs_create_file("map", read_only_mode, dent, + 0, &gsi_map_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create map file\n"); + goto fail; + } + + dfile = debugfs_create_file("stats", write_only_mode, dent, + 0, &gsi_stats_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create stats file\n"); + goto fail; + } + + dfile = debugfs_create_file("enable_dp_stats", write_only_mode, dent, + 0, &gsi_enable_dp_stats_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create stats file\n"); + goto fail; + } + + dfile = debugfs_create_file("max_elem_dp_stats", write_only_mode, + dent, 0, &gsi_max_elem_dp_stats_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create stats file\n"); + goto fail; + } + + dfile = debugfs_create_file("rst_stats", write_only_mode, + dent, 0, &gsi_rst_stats_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create stats file\n"); + goto fail; + } + + dfile = debugfs_create_file("print_dp_stats", + write_only_mode, dent, 0, &gsi_print_dp_stats_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("fail to create stats file\n"); + goto fail; + } + + dfile = debugfs_create_file("ipc_low", write_only_mode, + dent, 0, &gsi_ipc_low_ops); + if (!dfile || IS_ERR(dfile)) { + TERR("could not create ipc_low\n"); + goto fail; + } + + return; +fail: + debugfs_remove_recursive(dent); +} + diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h new file mode 100644 index 000000000000..3c35b02486da --- /dev/null +++ b/drivers/platform/msm/gsi/gsi_reg.h @@ -0,0 +1,1875 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __GSI_REG_H__ +#define __GSI_REG_H__ + +#define GSI_GSI_REG_BASE_OFFS 0 + +#define GSI_GSI_CFG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000000) +#define GSI_GSI_CFG_RMSK 0xf +#define GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20 +#define GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5 +#define GSI_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10 +#define GSI_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4 +#define GSI_GSI_CFG_UC_IS_MCS_BMSK 0x8 +#define GSI_GSI_CFG_UC_IS_MCS_SHFT 0x3 +#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4 +#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT 0x2 +#define GSI_GSI_CFG_MCS_ENABLE_BMSK 0x2 +#define GSI_GSI_CFG_MCS_ENABLE_SHFT 0x1 +#define GSI_GSI_CFG_GSI_ENABLE_BMSK 0x1 +#define GSI_GSI_CFG_GSI_ENABLE_SHFT 0x0 + +#define GSI_GSI_MCS_CFG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000B000) +#define GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK 0x1 +#define GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT 0x0 + +#define GSI_GSI_MANAGER_MCS_CODE_VER_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000008) +#define GSI_GSI_MANAGER_MCS_CODE_VER_RMSK 0xffffffff +#define GSI_GSI_MANAGER_MCS_CODE_VER_VER_BMSK 0xffffffff +#define GSI_GSI_MANAGER_MCS_CODE_VER_VER_SHFT 0x0 + +#define GSI_GSI_ZEROS_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000010) +#define GSI_GSI_ZEROS_RMSK 0xffffffff +#define GSI_GSI_ZEROS_ZEROS_BMSK 0xffffffff +#define GSI_GSI_ZEROS_ZEROS_SHFT 0x0 + +#define GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000018) +#define GSI_GSI_PERIPH_BASE_ADDR_LSB_RMSK 0xffffffff +#define GSI_GSI_PERIPH_BASE_ADDR_LSB_BASE_ADDR_BMSK 0xffffffff +#define GSI_GSI_PERIPH_BASE_ADDR_LSB_BASE_ADDR_SHFT 0x0 + +#define GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000001c) +#define GSI_GSI_PERIPH_BASE_ADDR_MSB_RMSK 0xffffffff +#define GSI_GSI_PERIPH_BASE_ADDR_MSB_BASE_ADDR_BMSK 0xffffffff +#define GSI_GSI_PERIPH_BASE_ADDR_MSB_BASE_ADDR_SHFT 0x0 + +#define GSI_GSI_MOQA_CFG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000030) +#define GSI_GSI_MOQA_CFG_RMSK 0xffffff +#define GSI_GSI_MOQA_CFG_CLIENT_OOWR_BMSK 0xff0000 +#define GSI_GSI_MOQA_CFG_CLIENT_OOWR_SHFT 0x10 +#define GSI_GSI_MOQA_CFG_CLIENT_OORD_BMSK 0xff00 +#define GSI_GSI_MOQA_CFG_CLIENT_OORD_SHFT 0x8 +#define GSI_GSI_MOQA_CFG_CLIENT_REQ_PRIO_BMSK 0xff +#define GSI_GSI_MOQA_CFG_CLIENT_REQ_PRIO_SHFT 0x0 + +#define GSI_GSI_REE_CFG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000038) +#define GSI_GSI_REE_CFG_RMSK 0xff01 +#define GSI_GSI_REE_CFG_MAX_BURST_SIZE_BMSK 0xff00 +#define GSI_GSI_REE_CFG_MAX_BURST_SIZE_SHFT 0x8 +#define GSI_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_BMSK 0x1 +#define GSI_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_SHFT 0x0 + +#define GSI_GSI_SHRAM_WR_WRR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000050) +#define GSI_GSI_SHRAM_WR_WRR_RMSK 0xffff +#define GSI_GSI_SHRAM_WR_WRR_CLIENT3_WR_WEIGHT_BMSK 0xf000 +#define GSI_GSI_SHRAM_WR_WRR_CLIENT3_WR_WEIGHT_SHFT 0xc +#define GSI_GSI_SHRAM_WR_WRR_CLIENT2_WR_WEIGHT_BMSK 0xf00 +#define GSI_GSI_SHRAM_WR_WRR_CLIENT2_WR_WEIGHT_SHFT 0x8 +#define GSI_GSI_SHRAM_WR_WRR_CLIENT1_WR_WEIGHT_BMSK 0xf0 +#define GSI_GSI_SHRAM_WR_WRR_CLIENT1_WR_WEIGHT_SHFT 0x4 +#define GSI_GSI_SHRAM_WR_WRR_CLIENT0_WR_WEIGHT_BMSK 0xf +#define GSI_GSI_SHRAM_WR_WRR_CLIENT0_WR_WEIGHT_SHFT 0x0 + +#define GSI_GSI_SHRAM_RD_WRR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000058) +#define GSI_GSI_SHRAM_RD_WRR_RMSK 0xffffff +#define GSI_GSI_SHRAM_RD_WRR_ACH_SHRAM_RD_WEIGHT_BMSK 0xf00000 +#define GSI_GSI_SHRAM_RD_WRR_ACH_SHRAM_RD_WEIGHT_SHFT 0x14 +#define GSI_GSI_SHRAM_RD_WRR_IE_SHRAM_RD_WEIGHT_BMSK 0xf0000 +#define GSI_GSI_SHRAM_RD_WRR_IE_SHRAM_RD_WEIGHT_SHFT 0x10 +#define GSI_GSI_SHRAM_RD_WRR_CSR_SHRAM_RD_WEIGHT_BMSK 0xf000 +#define GSI_GSI_SHRAM_RD_WRR_CSR_SHRAM_RD_WEIGHT_SHFT 0xc +#define GSI_GSI_SHRAM_RD_WRR_RE_CNTXT_SHRAM_RD_WEIGHT_BMSK 0xf00 +#define GSI_GSI_SHRAM_RD_WRR_RE_CNTXT_SHRAM_RD_WEIGHT_SHFT 0x8 +#define GSI_GSI_SHRAM_RD_WRR_MCS_LD_SHRAM_RD_WEIGHT_BMSK 0xf0 +#define GSI_GSI_SHRAM_RD_WRR_MCS_LD_SHRAM_RD_WEIGHT_SHFT 0x4 +#define GSI_GSI_SHRAM_RD_WRR_EV_ENG_SHRAM_RD_WEIGHT_BMSK 0xf +#define GSI_GSI_SHRAM_RD_WRR_EV_ENG_SHRAM_RD_WEIGHT_SHFT 0x0 + +#define GSI_GSI_CGC_CTRL_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000060) +#define GSI_GSI_CGC_CTRL_RMSK 0x3f +#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_BMSK 0x800 +#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_SHFT 0xb +#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_BMSK0x400 +#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_SHFT 0xa +#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_BMSK0x200 +#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_SHFT 0x9 +#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_BMSK 0x100 +#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_SHFT 0x8 +#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_BMSK 0x80 +#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_SHFT 0x7 +#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_BMSK 0x40 +#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_SHFT 0x6 +#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_BMSK 0x20 +#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_SHFT 0x5 +#define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_BMSK 0x10 +#define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_SHFT 0x4 +#define GSI_GSI_CGC_CTRL_REGION_4_HW_CGC_EN_BMSK 0x8 +#define GSI_GSI_CGC_CTRL_REGION_4_HW_CGC_EN_SHFT 0x3 +#define GSI_GSI_CGC_CTRL_REGION_3_HW_CGC_EN_BMSK 0x4 +#define GSI_GSI_CGC_CTRL_REGION_3_HW_CGC_EN_SHFT 0x2 +#define GSI_GSI_CGC_CTRL_REGION_2_HW_CGC_EN_BMSK 0x2 +#define GSI_GSI_CGC_CTRL_REGION_2_HW_CGC_EN_SHFT 0x1 +#define GSI_GSI_CGC_CTRL_REGION_1_HW_CGC_EN_BMSK 0x1 +#define GSI_GSI_CGC_CTRL_REGION_1_HW_CGC_EN_SHFT 0x0 + +#define GSI_GSI_MSI_CACHEATTR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000080) +#define GSI_GSI_MSI_CACHEATTR_RMSK 0x3f +#define GSI_GSI_MSI_CACHEATTR_AREQPRIORITY_BMSK 0x30 +#define GSI_GSI_MSI_CACHEATTR_AREQPRIORITY_SHFT 0x4 +#define GSI_GSI_MSI_CACHEATTR_ATRANSIENT_BMSK 0x8 +#define GSI_GSI_MSI_CACHEATTR_ATRANSIENT_SHFT 0x3 +#define GSI_GSI_MSI_CACHEATTR_ANOALLOCATE_BMSK 0x4 +#define GSI_GSI_MSI_CACHEATTR_ANOALLOCATE_SHFT 0x2 +#define GSI_GSI_MSI_CACHEATTR_AINNERSHARED_BMSK 0x2 +#define GSI_GSI_MSI_CACHEATTR_AINNERSHARED_SHFT 0x1 +#define GSI_GSI_MSI_CACHEATTR_ASHARED_BMSK 0x1 +#define GSI_GSI_MSI_CACHEATTR_ASHARED_SHFT 0x0 + +#define GSI_GSI_EVENT_CACHEATTR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000084) +#define GSI_GSI_EVENT_CACHEATTR_RMSK 0x3f +#define GSI_GSI_EVENT_CACHEATTR_AREQPRIORITY_BMSK 0x30 +#define GSI_GSI_EVENT_CACHEATTR_AREQPRIORITY_SHFT 0x4 +#define GSI_GSI_EVENT_CACHEATTR_ATRANSIENT_BMSK 0x8 +#define GSI_GSI_EVENT_CACHEATTR_ATRANSIENT_SHFT 0x3 +#define GSI_GSI_EVENT_CACHEATTR_ANOALLOCATE_BMSK 0x4 +#define GSI_GSI_EVENT_CACHEATTR_ANOALLOCATE_SHFT 0x2 +#define GSI_GSI_EVENT_CACHEATTR_AINNERSHARED_BMSK 0x2 +#define GSI_GSI_EVENT_CACHEATTR_AINNERSHARED_SHFT 0x1 +#define GSI_GSI_EVENT_CACHEATTR_ASHARED_BMSK 0x1 +#define GSI_GSI_EVENT_CACHEATTR_ASHARED_SHFT 0x0 + +#define GSI_GSI_DATA_CACHEATTR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000088) +#define GSI_GSI_DATA_CACHEATTR_RMSK 0x3f +#define GSI_GSI_DATA_CACHEATTR_AREQPRIORITY_BMSK 0x30 +#define GSI_GSI_DATA_CACHEATTR_AREQPRIORITY_SHFT 0x4 +#define GSI_GSI_DATA_CACHEATTR_ATRANSIENT_BMSK 0x8 +#define GSI_GSI_DATA_CACHEATTR_ATRANSIENT_SHFT 0x3 +#define GSI_GSI_DATA_CACHEATTR_ANOALLOCATE_BMSK 0x4 +#define GSI_GSI_DATA_CACHEATTR_ANOALLOCATE_SHFT 0x2 +#define GSI_GSI_DATA_CACHEATTR_AINNERSHARED_BMSK 0x2 +#define GSI_GSI_DATA_CACHEATTR_AINNERSHARED_SHFT 0x1 +#define GSI_GSI_DATA_CACHEATTR_ASHARED_BMSK 0x1 +#define GSI_GSI_DATA_CACHEATTR_ASHARED_SHFT 0x0 + +#define GSI_GSI_TRE_CACHEATTR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000090) +#define GSI_GSI_TRE_CACHEATTR_RMSK 0x3f +#define GSI_GSI_TRE_CACHEATTR_AREQPRIORITY_BMSK 0x30 +#define GSI_GSI_TRE_CACHEATTR_AREQPRIORITY_SHFT 0x4 +#define GSI_GSI_TRE_CACHEATTR_ATRANSIENT_BMSK 0x8 +#define GSI_GSI_TRE_CACHEATTR_ATRANSIENT_SHFT 0x3 +#define GSI_GSI_TRE_CACHEATTR_ANOALLOCATE_BMSK 0x4 +#define GSI_GSI_TRE_CACHEATTR_ANOALLOCATE_SHFT 0x2 +#define GSI_GSI_TRE_CACHEATTR_AINNERSHARED_BMSK 0x2 +#define GSI_GSI_TRE_CACHEATTR_AINNERSHARED_SHFT 0x1 +#define GSI_GSI_TRE_CACHEATTR_ASHARED_BMSK 0x1 +#define GSI_GSI_TRE_CACHEATTR_ASHARED_SHFT 0x0 + +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000a0) +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000a4) +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000a8) +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000ac) +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000b0) +#define GSI_IC_GEN_INT_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_GEN_INT_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_GEN_INT_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000b4) +#define GSI_IC_GEN_INT_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_GEN_INT_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_GEN_INT_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000b8) +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000bc) +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000c0) +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000c4) +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000c8) +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_TLV_STOP_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000cc) +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_TLV_STOP_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000d0) +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_TLV_RESET_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000d4) +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_TLV_RESET_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000d8) +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000dc) +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_READ_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000e0) +#define GSI_IC_READ_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_READ_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_READ_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_READ_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_READ_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_READ_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_READ_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_READ_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_READ_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_READ_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_READ_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_READ_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000e4) +#define GSI_IC_READ_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_READ_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_READ_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_READ_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_READ_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_READ_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_READ_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_READ_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_READ_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_WRITE_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000e8) +#define GSI_IC_WRITE_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_WRITE_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_WRITE_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_WRITE_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_WRITE_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_WRITE_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_WRITE_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_WRITE_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_WRITE_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_WRITE_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_WRITE_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_WRITE_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000ec) +#define GSI_IC_WRITE_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_WRITE_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_WRITE_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_WRITE_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_WRITE_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_WRITE_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_WRITE_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_WRITE_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_WRITE_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000f0) +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_RMSK 0x3ffc1047 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_TLV_INT_SHFT 0x18 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_CSR_INT_SHFT 0x12 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_INT_END_INT_SHFT 0xc +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_REE_INT_BMSK 0x7 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_REE_INT_SHFT 0x0 + +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x000000f4) +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RMSK 0xfc3041 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 +#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 + +#define GSI_IC_INT_WEIGHT_REE_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000100) +#define GSI_IC_INT_WEIGHT_REE_RMSK 0xfff +#define GSI_IC_INT_WEIGHT_REE_CH_EMPTY_INT_WEIGHT_BMSK 0xf00 +#define GSI_IC_INT_WEIGHT_REE_CH_EMPTY_INT_WEIGHT_SHFT 0x8 +#define GSI_IC_INT_WEIGHT_REE_NEW_RE_INT_WEIGHT_BMSK 0xf0 +#define GSI_IC_INT_WEIGHT_REE_NEW_RE_INT_WEIGHT_SHFT 0x4 +#define GSI_IC_INT_WEIGHT_REE_STOP_CH_COMP_INT_WEIGHT_BMSK 0xf +#define GSI_IC_INT_WEIGHT_REE_STOP_CH_COMP_INT_WEIGHT_SHFT 0x0 + +#define GSI_IC_INT_WEIGHT_EVT_ENG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000104) +#define GSI_IC_INT_WEIGHT_EVT_ENG_RMSK 0xf +#define GSI_IC_INT_WEIGHT_EVT_ENG_EVNT_ENG_INT_WEIGHT_BMSK 0xf +#define GSI_IC_INT_WEIGHT_EVT_ENG_EVNT_ENG_INT_WEIGHT_SHFT 0x0 + +#define GSI_IC_INT_WEIGHT_INT_ENG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000108) +#define GSI_IC_INT_WEIGHT_INT_ENG_RMSK 0xf +#define GSI_IC_INT_WEIGHT_INT_ENG_INT_ENG_INT_WEIGHT_BMSK 0xf +#define GSI_IC_INT_WEIGHT_INT_ENG_INT_ENG_INT_WEIGHT_SHFT 0x0 + +#define GSI_IC_INT_WEIGHT_CSR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000010c) +#define GSI_IC_INT_WEIGHT_CSR_RMSK 0xffffff +#define GSI_IC_INT_WEIGHT_CSR_CH_START_CMD_INT_WEIGHT_BMSK 0xf00000 +#define GSI_IC_INT_WEIGHT_CSR_CH_START_CMD_INT_WEIGHT_SHFT 0x14 +#define GSI_IC_INT_WEIGHT_CSR_CH_STOP_CMD_INT_WEIGHT_BMSK 0xf0000 +#define GSI_IC_INT_WEIGHT_CSR_CH_STOP_CMD_INT_WEIGHT_SHFT 0x10 +#define GSI_IC_INT_WEIGHT_CSR_CH_RESET_CMD_INT_WEIGHT_BMSK 0xf000 +#define GSI_IC_INT_WEIGHT_CSR_CH_RESET_CMD_INT_WEIGHT_SHFT 0xc +#define GSI_IC_INT_WEIGHT_CSR_CH_ALLOC_CMD_INT_WEIGHT_BMSK 0xf00 +#define GSI_IC_INT_WEIGHT_CSR_CH_ALLOC_CMD_INT_WEIGHT_SHFT 0x8 +#define GSI_IC_INT_WEIGHT_CSR_EV_RESET_CMD_INT_WEIGHT_BMSK 0xf0 +#define GSI_IC_INT_WEIGHT_CSR_EV_RESET_CMD_INT_WEIGHT_SHFT 0x4 +#define GSI_IC_INT_WEIGHT_CSR_EV_ALLOC_CMD_INT_WEIGHT_BMSK 0xf +#define GSI_IC_INT_WEIGHT_CSR_EV_ALLOC_CMD_INT_WEIGHT_SHFT 0x0 + +#define GSI_IC_INT_WEIGHT_TLV_ENG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000110) +#define GSI_IC_INT_WEIGHT_TLV_ENG_RMSK 0xf +#define GSI_IC_INT_WEIGHT_TLV_ENG_TLV_INT_WEIGHT_BMSK 0xf +#define GSI_IC_INT_WEIGHT_TLV_ENG_TLV_INT_WEIGHT_SHFT 0x0 + +#define GSI_IC_INT_WEIGHT_TIMER_ENG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000114) +#define GSI_IC_INT_WEIGHT_TIMER_ENG_RMSK 0xf +#define GSI_IC_INT_WEIGHT_TIMER_ENG_TIMER_INT_WEIGHT_BMSK 0xf +#define GSI_IC_INT_WEIGHT_TIMER_ENG_TIMER_INT_WEIGHT_SHFT 0x0 + +#define GSI_IC_INT_WEIGHT_DB_ENG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000118) +#define GSI_IC_INT_WEIGHT_DB_ENG_RMSK 0xf +#define GSI_IC_INT_WEIGHT_DB_ENG_NEW_DB_INT_WEIGHT_BMSK 0xf +#define GSI_IC_INT_WEIGHT_DB_ENG_NEW_DB_INT_WEIGHT_SHFT 0x0 + +#define GSI_IC_INT_WEIGHT_RD_WR_ENG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000011c) +#define GSI_IC_INT_WEIGHT_RD_WR_ENG_RMSK 0xff +#define GSI_IC_INT_WEIGHT_RD_WR_ENG_WRITE_INT_WEIGHT_BMSK 0xf0 +#define GSI_IC_INT_WEIGHT_RD_WR_ENG_WRITE_INT_WEIGHT_SHFT 0x4 +#define GSI_IC_INT_WEIGHT_RD_WR_ENG_READ_INT_WEIGHT_BMSK 0xf +#define GSI_IC_INT_WEIGHT_RD_WR_ENG_READ_INT_WEIGHT_SHFT 0x0 + +#define GSI_IC_INT_WEIGHT_UCONTROLLER_ENG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000120) +#define GSI_IC_INT_WEIGHT_UCONTROLLER_ENG_RMSK 0xf +#define GSI_IC_INT_WEIGHT_UCONTROLLER_ENG_GP_INT_WEIGHT_BMSK 0xf +#define GSI_IC_INT_WEIGHT_UCONTROLLER_ENG_GP_INT_WEIGHT_SHFT 0x0 + +#define GSI_GSI_MANAGER_EE_QOS_n_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00000300 + 0x4 * (n)) +#define GSI_GSI_MANAGER_EE_QOS_n_RMSK 0x1f1f03 +#define GSI_GSI_MANAGER_EE_QOS_n_MAXn 3 +#define GSI_GSI_MANAGER_EE_QOS_n_MAX_EV_ALLOC_BMSK 0x1f0000 +#define GSI_GSI_MANAGER_EE_QOS_n_MAX_EV_ALLOC_SHFT 0x10 +#define GSI_GSI_MANAGER_EE_QOS_n_MAX_CH_ALLOC_BMSK 0x1f00 +#define GSI_GSI_MANAGER_EE_QOS_n_MAX_CH_ALLOC_SHFT 0x8 +#define GSI_GSI_MANAGER_EE_QOS_n_EE_PRIO_BMSK 0x3 +#define GSI_GSI_MANAGER_EE_QOS_n_EE_PRIO_SHFT 0x0 + +#define GSI_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000200) +#define GSI_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_RMSK 0xffff +#define GSI_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_SHRAM_PTR_BMSK 0xffff +#define GSI_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_SHRAM_PTR_SHFT 0x0 + +#define GSI_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000204) +#define GSI_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_RMSK 0xffff +#define GSI_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_SHRAM_PTR_BMSK 0xffff +#define GSI_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_SHRAM_PTR_SHFT 0x0 + +#define GSI_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000208) +#define GSI_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_RMSK 0xffff +#define GSI_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_SHRAM_PTR_BMSK 0xffff +#define GSI_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_SHRAM_PTR_SHFT 0x0 + +#define GSI_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000020c) +#define GSI_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_RMSK 0xffff +#define GSI_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_SHRAM_PTR_BMSK 0xffff +#define GSI_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_SHRAM_PTR_SHFT 0x0 + +#define GSI_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000240) +#define GSI_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_RMSK 0xffff +#define GSI_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_SHRAM_PTR_BMSK 0xffff +#define GSI_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_SHRAM_PTR_SHFT 0x0 + +#define GSI_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000244) +#define GSI_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_RMSK 0xffff +#define GSI_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_SHRAM_PTR_BMSK 0xffff +#define GSI_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_SHRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_CH_CMD_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000400) +#define GSI_GSI_IRAM_PTR_CH_CMD_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000404) +#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_CH_DB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000418) +#define GSI_GSI_IRAM_PTR_CH_DB_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_DB_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_DB_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_EV_DB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000041c) +#define GSI_GSI_IRAM_PTR_EV_DB_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_EV_DB_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_EV_DB_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_NEW_RE_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000420) +#define GSI_GSI_IRAM_PTR_NEW_RE_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000424) +#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000428) +#define GSI_GSI_IRAM_PTR_CH_EMPTY_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000042c) +#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000430) +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000434) +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000438) +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000043c) +#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000440) +#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000444) +#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000448) +#define GSI_GSI_IRAM_PTR_UC_GP_INT_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000044c) +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_INST_RAM_n_WORD_SZ 0x4 +#define GSI_GSI_INST_RAM_n_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00004000 + GSI_GSI_INST_RAM_n_WORD_SZ * (n)) +#define GSI_GSI_INST_RAM_n_RMSK 0xffffffff +#define GSI_GSI_INST_RAM_n_MAXn 4095 +#define GSI_GSI_INST_RAM_n_INST_BYTE_3_BMSK 0xff000000 +#define GSI_GSI_INST_RAM_n_INST_BYTE_3_SHFT 0x18 +#define GSI_GSI_INST_RAM_n_INST_BYTE_2_BMSK 0xff0000 +#define GSI_GSI_INST_RAM_n_INST_BYTE_2_SHFT 0x10 +#define GSI_GSI_INST_RAM_n_INST_BYTE_1_BMSK 0xff00 +#define GSI_GSI_INST_RAM_n_INST_BYTE_1_SHFT 0x8 +#define GSI_GSI_INST_RAM_n_INST_BYTE_0_BMSK 0xff +#define GSI_GSI_INST_RAM_n_INST_BYTE_0_SHFT 0x0 + +#define GSI_GSI_SHRAM_n_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00002000 + 0x4 * (n)) +#define GSI_GSI_SHRAM_n_RMSK 0xffffffff +#define GSI_GSI_SHRAM_n_MAXn 1023 +#define GSI_GSI_SHRAM_n_SHRAM_BMSK 0xffffffff +#define GSI_GSI_SHRAM_n_SHRAM_SHFT 0x0 + +#define GSI_GSI_TEST_BUS_SEL_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00001000) +#define GSI_GSI_TEST_BUS_SEL_RMSK 0xff +#define GSI_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_BMSK 0xff +#define GSI_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SHFT 0x0 + +#define GSI_GSI_TEST_BUS_REG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00001008) +#define GSI_GSI_TEST_BUS_REG_RMSK 0xffffffff +#define GSI_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_BMSK 0xffffffff +#define GSI_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_SHFT 0x0 + +#define GSI_GSI_DEBUG_BUSY_REG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00001010) +#define GSI_GSI_DEBUG_BUSY_REG_RMSK 0xff +#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_BMSK 0x80 +#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_SHFT 0x7 +#define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_BMSK 0x40 +#define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_SHFT 0x6 +#define GSI_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_BMSK 0x20 +#define GSI_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_SHFT 0x5 +#define GSI_GSI_DEBUG_BUSY_REG_RD_WR_BUSY_BMSK 0x10 +#define GSI_GSI_DEBUG_BUSY_REG_RD_WR_BUSY_SHFT 0x4 +#define GSI_GSI_DEBUG_BUSY_REG_TIMER_BUSY_BMSK 0x8 +#define GSI_GSI_DEBUG_BUSY_REG_TIMER_BUSY_SHFT 0x3 +#define GSI_GSI_DEBUG_BUSY_REG_MCS_BUSY_BMSK 0x4 +#define GSI_GSI_DEBUG_BUSY_REG_MCS_BUSY_SHFT 0x2 +#define GSI_GSI_DEBUG_BUSY_REG_REE_BUSY_BMSK 0x2 +#define GSI_GSI_DEBUG_BUSY_REG_REE_BUSY_SHFT 0x1 +#define GSI_GSI_DEBUG_BUSY_REG_CSR_BUSY_BMSK 0x1 +#define GSI_GSI_DEBUG_BUSY_REG_CSR_BUSY_SHFT 0x0 + +#define GSI_GSI_DEBUG_COUNTER_CFGn_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00001200 + 0x4 * (n)) +#define GSI_GSI_DEBUG_COUNTER_CFGn_RMSK 0x3ffffff7 +#define GSI_GSI_DEBUG_COUNTER_CFGn_MAXn 7 +#define GSI_GSI_DEBUG_COUNTER_CFGn_TIMER_VALUE_BMSK 0x3ff80000 +#define GSI_GSI_DEBUG_COUNTER_CFGn_TIMER_VALUE_SHFT 0x13 +#define GSI_GSI_DEBUG_COUNTER_CFGn_VIRTUAL_CHNL_BMSK 0x7f000 +#define GSI_GSI_DEBUG_COUNTER_CFGn_VIRTUAL_CHNL_SHFT 0xc +#define GSI_GSI_DEBUG_COUNTER_CFGn_EE_BMSK 0xf00 +#define GSI_GSI_DEBUG_COUNTER_CFGn_EE_SHFT 0x8 +#define GSI_GSI_DEBUG_COUNTER_CFGn_EVNT_TYPE_BMSK 0xf0 +#define GSI_GSI_DEBUG_COUNTER_CFGn_EVNT_TYPE_SHFT 0x4 +#define GSI_GSI_DEBUG_COUNTER_CFGn_CLR_AT_READ_BMSK 0x4 +#define GSI_GSI_DEBUG_COUNTER_CFGn_CLR_AT_READ_SHFT 0x2 +#define GSI_GSI_DEBUG_COUNTER_CFGn_STOP_AT_WRAP_ARND_BMSK 0x2 +#define GSI_GSI_DEBUG_COUNTER_CFGn_STOP_AT_WRAP_ARND_SHFT 0x1 +#define GSI_GSI_DEBUG_COUNTER_CFGn_ENABLE_BMSK 0x1 +#define GSI_GSI_DEBUG_COUNTER_CFGn_ENABLE_SHFT 0x0 + +#define GSI_GSI_DEBUG_COUNTERn_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00001240 + 0x4 * (n)) +#define GSI_GSI_DEBUG_COUNTERn_RMSK 0xffff +#define GSI_GSI_DEBUG_COUNTERn_MAXn 7 +#define GSI_GSI_DEBUG_COUNTERn_COUNTER_VALUE_BMSK 0xffff +#define GSI_GSI_DEBUG_COUNTERn_COUNTER_VALUE_SHFT 0x0 + +#define GSI_GSI_DEBUG_PC_FROM_SW_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00001040) +#define GSI_GSI_DEBUG_PC_FROM_SW_RMSK 0xfff +#define GSI_GSI_DEBUG_PC_FROM_SW_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_DEBUG_PC_FROM_SW_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_DEBUG_SW_STALL_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00001044) +#define GSI_GSI_DEBUG_SW_STALL_RMSK 0x1 +#define GSI_GSI_DEBUG_SW_STALL_MCS_STALL_BMSK 0x1 +#define GSI_GSI_DEBUG_SW_STALL_MCS_STALL_SHFT 0x0 + +#define GSI_GSI_DEBUG_PC_FOR_DEBUG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00001048) +#define GSI_GSI_DEBUG_PC_FOR_DEBUG_RMSK 0xfff +#define GSI_GSI_DEBUG_PC_FOR_DEBUG_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_DEBUG_PC_FOR_DEBUG_IRAM_PTR_SHFT 0x0 + +#define GSI_GSI_DEBUG_QSB_LOG_SEL_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00001050) +#define GSI_GSI_DEBUG_QSB_LOG_SEL_RMSK 0xffff01 +#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_MID_BMSK 0xff0000 +#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_MID_SHFT 0x10 +#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_TID_BMSK 0xff00 +#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_TID_SHFT 0x8 +#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_WRITE_BMSK 0x1 +#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_WRITE_SHFT 0x0 + +#define GSI_GSI_DEBUG_QSB_LOG_CLR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00001058) +#define GSI_GSI_DEBUG_QSB_LOG_CLR_RMSK 0x1 +#define GSI_GSI_DEBUG_QSB_LOG_CLR_LOG_CLR_BMSK 0x1 +#define GSI_GSI_DEBUG_QSB_LOG_CLR_LOG_CLR_SHFT 0x0 + +#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00001060) +#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_RMSK 0x1ffff01 +#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_SAVED_BMSK 0x1000000 +#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_SAVED_SHFT 0x18 +#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_MID_BMSK 0xff0000 +#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_MID_SHFT 0x10 +#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_TID_BMSK 0xff00 +#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_TID_SHFT 0x8 +#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_WRITE_BMSK 0x1 +#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_WRITE_SHFT 0x0 + +#define GSI_GSI_DEBUG_QSB_LOG_0_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00001064) +#define GSI_GSI_DEBUG_QSB_LOG_0_RMSK 0xffffffff +#define GSI_GSI_DEBUG_QSB_LOG_0_ADDR_31_0_BMSK 0xffffffff +#define GSI_GSI_DEBUG_QSB_LOG_0_ADDR_31_0_SHFT 0x0 + +#define GSI_GSI_DEBUG_QSB_LOG_1_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00001068) +#define GSI_GSI_DEBUG_QSB_LOG_1_RMSK 0xfff7ffff +#define GSI_GSI_DEBUG_QSB_LOG_1_AREQPRIORITY_BMSK 0xf0000000 +#define GSI_GSI_DEBUG_QSB_LOG_1_AREQPRIORITY_SHFT 0x1c +#define GSI_GSI_DEBUG_QSB_LOG_1_ASIZE_BMSK 0xf000000 +#define GSI_GSI_DEBUG_QSB_LOG_1_ASIZE_SHFT 0x18 +#define GSI_GSI_DEBUG_QSB_LOG_1_ALEN_BMSK 0xf00000 +#define GSI_GSI_DEBUG_QSB_LOG_1_ALEN_SHFT 0x14 +#define GSI_GSI_DEBUG_QSB_LOG_1_AOOOWR_BMSK 0x40000 +#define GSI_GSI_DEBUG_QSB_LOG_1_AOOOWR_SHFT 0x12 +#define GSI_GSI_DEBUG_QSB_LOG_1_AOOORD_BMSK 0x20000 +#define GSI_GSI_DEBUG_QSB_LOG_1_AOOORD_SHFT 0x11 +#define GSI_GSI_DEBUG_QSB_LOG_1_ATRANSIENT_BMSK 0x10000 +#define GSI_GSI_DEBUG_QSB_LOG_1_ATRANSIENT_SHFT 0x10 +#define GSI_GSI_DEBUG_QSB_LOG_1_ACACHEABLE_BMSK 0x8000 +#define GSI_GSI_DEBUG_QSB_LOG_1_ACACHEABLE_SHFT 0xf +#define GSI_GSI_DEBUG_QSB_LOG_1_ASHARED_BMSK 0x4000 +#define GSI_GSI_DEBUG_QSB_LOG_1_ASHARED_SHFT 0xe +#define GSI_GSI_DEBUG_QSB_LOG_1_ANOALLOCATE_BMSK 0x2000 +#define GSI_GSI_DEBUG_QSB_LOG_1_ANOALLOCATE_SHFT 0xd +#define GSI_GSI_DEBUG_QSB_LOG_1_AINNERSHARED_BMSK 0x1000 +#define GSI_GSI_DEBUG_QSB_LOG_1_AINNERSHARED_SHFT 0xc +#define GSI_GSI_DEBUG_QSB_LOG_1_ADDR_43_32_BMSK 0xfff +#define GSI_GSI_DEBUG_QSB_LOG_1_ADDR_43_32_SHFT 0x0 + +#define GSI_GSI_DEBUG_QSB_LOG_2_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000106c) +#define GSI_GSI_DEBUG_QSB_LOG_2_RMSK 0xffff +#define GSI_GSI_DEBUG_QSB_LOG_2_AMEMTYPE_BMSK 0xf000 +#define GSI_GSI_DEBUG_QSB_LOG_2_AMEMTYPE_SHFT 0xc +#define GSI_GSI_DEBUG_QSB_LOG_2_AMMUSID_BMSK 0xfff +#define GSI_GSI_DEBUG_QSB_LOG_2_AMMUSID_SHFT 0x0 + +#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00001070 + 0x4 * (n)) +#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_RMSK 0xffffffff +#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_MAXn 3 +#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_MID_BMSK 0xf8000000 +#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_MID_SHFT 0x1b +#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_TID_BMSK 0x7c00000 +#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_TID_SHFT 0x16 +#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_WRITE_BMSK 0x200000 +#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_WRITE_SHFT 0x15 +#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_ADDR_20_0_BMSK 0x1fffff +#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_ADDR_20_0_SHFT 0x0 + +#define GSI_GSI_DEBUG_SW_RF_n_WRITE_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00001080 + 0x4 * (n)) +#define GSI_GSI_DEBUG_SW_RF_n_WRITE_RMSK 0xffffffff +#define GSI_GSI_DEBUG_SW_RF_n_WRITE_MAXn 31 +#define GSI_GSI_DEBUG_SW_RF_n_WRITE_DATA_IN_BMSK 0xffffffff +#define GSI_GSI_DEBUG_SW_RF_n_WRITE_DATA_IN_SHFT 0x0 + +#define GSI_GSI_DEBUG_SW_RF_n_READ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00001100 + 0x4 * (n)) +#define GSI_GSI_DEBUG_SW_RF_n_READ_RMSK 0xffffffff +#define GSI_GSI_DEBUG_SW_RF_n_READ_MAXn 31 +#define GSI_GSI_DEBUG_SW_RF_n_READ_RF_REG_BMSK 0xffffffff +#define GSI_GSI_DEBUG_SW_RF_n_READ_RF_REG_SHFT 0x0 + +#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00001400 + 0x80 * (n) + 0x4 * (k)) +#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_RMSK 0x3f +#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_MAXk 30 +#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_MAXn 3 +#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_VALID_BMSK 0x20 +#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_VALID_SHFT 0x5 +#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK 0x1f +#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHY_CH_SHFT 0x0 + +#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00001600 + 0x80 * (n) + 0x4 * (k)) +#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK 0x3f +#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXk 15 +#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXn 3 +#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_BMSK 0x20 +#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_SHFT 0x5 +#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_BMSK 0x1f +#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_SHFT 0x0 + +#define GSI_GSI_UC_SRC_IRQ_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000500) +#define GSI_GSI_UC_SRC_IRQ_RMSK 0xf +#define GSI_GSI_UC_SRC_IRQ_IC_2_UC_MCS_INT_VLD_BMSK 0x8 +#define GSI_GSI_UC_SRC_IRQ_IC_2_UC_MCS_INT_VLD_SHFT 0x3 +#define GSI_GSI_UC_SRC_IRQ_ACC_2_UC_MCS_GO_ACK_BMSK 0x4 +#define GSI_GSI_UC_SRC_IRQ_ACC_2_UC_MCS_GO_ACK_SHFT 0x2 +#define GSI_GSI_UC_SRC_IRQ_UC_ACC_CMPLT_BMSK 0x2 +#define GSI_GSI_UC_SRC_IRQ_UC_ACC_CMPLT_SHFT 0x1 +#define GSI_GSI_UC_SRC_IRQ_UC_ACC_GO_BMSK 0x1 +#define GSI_GSI_UC_SRC_IRQ_UC_ACC_GO_SHFT 0x0 + +#define GSI_GSI_UC_SRC_IRQ_MSK_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000504) +#define GSI_GSI_UC_SRC_IRQ_MSK_RMSK 0xf +#define GSI_GSI_UC_SRC_IRQ_MSK_IC_2_UC_MCS_INT_VLD_BMSK 0x8 +#define GSI_GSI_UC_SRC_IRQ_MSK_IC_2_UC_MCS_INT_VLD_SHFT 0x3 +#define GSI_GSI_UC_SRC_IRQ_MSK_ACC_2_UC_MCS_GO_ACK_BMSK 0x4 +#define GSI_GSI_UC_SRC_IRQ_MSK_ACC_2_UC_MCS_GO_ACK_SHFT 0x2 +#define GSI_GSI_UC_SRC_IRQ_MSK_UC_ACC_CMPLT_BMSK 0x2 +#define GSI_GSI_UC_SRC_IRQ_MSK_UC_ACC_CMPLT_SHFT 0x1 +#define GSI_GSI_UC_SRC_IRQ_MSK_UC_ACC_GO_BMSK 0x1 +#define GSI_GSI_UC_SRC_IRQ_MSK_UC_ACC_GO_SHFT 0x0 + +#define GSI_GSI_UC_SRC_IRQ_CLR_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000508) +#define GSI_GSI_UC_SRC_IRQ_CLR_RMSK 0xf +#define GSI_GSI_UC_SRC_IRQ_CLR_IC_2_UC_MCS_INT_VLD_BMSK 0x8 +#define GSI_GSI_UC_SRC_IRQ_CLR_IC_2_UC_MCS_INT_VLD_SHFT 0x3 +#define GSI_GSI_UC_SRC_IRQ_CLR_ACC_2_UC_MCS_GO_ACK_BMSK 0x4 +#define GSI_GSI_UC_SRC_IRQ_CLR_ACC_2_UC_MCS_GO_ACK_SHFT 0x2 +#define GSI_GSI_UC_SRC_IRQ_CLR_UC_ACC_CMPLT_BMSK 0x2 +#define GSI_GSI_UC_SRC_IRQ_CLR_UC_ACC_CMPLT_SHFT 0x1 +#define GSI_GSI_UC_SRC_IRQ_CLR_UC_ACC_GO_BMSK 0x1 +#define GSI_GSI_UC_SRC_IRQ_CLR_UC_ACC_GO_SHFT 0x0 + +#define GSI_GSI_ACC_ARGS_n_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000050c + 0x4 * (n)) +#define GSI_GSI_ACC_ARGS_n_RMSK 0xffffffff +#define GSI_GSI_ACC_ARGS_n_MAXn 5 +#define GSI_GSI_ACC_ARGS_n_GSI_ACC_ARGS_BMSK 0xffffffff +#define GSI_GSI_ACC_ARGS_n_GSI_ACC_ARGS_SHFT 0x0 + +#define GSI_GSI_ACC_ROUTINE_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000524) +#define GSI_GSI_ACC_ROUTINE_RMSK 0xffffffff +#define GSI_GSI_ACC_ROUTINE_GSI_ACC_ROUTINE_BMSK 0xffffffff +#define GSI_GSI_ACC_ROUTINE_GSI_ACC_ROUTINE_SHFT 0x0 + +#define GSI_GSI_ACC_GO_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000528) +#define GSI_GSI_ACC_GO_RMSK 0x7f +#define GSI_GSI_ACC_GO_TIMER_GO_BMSK 0x40 +#define GSI_GSI_ACC_GO_TIMER_GO_SHFT 0x6 +#define GSI_GSI_ACC_GO_RW_ENG_GO_BMSK 0x20 +#define GSI_GSI_ACC_GO_RW_ENG_GO_SHFT 0x5 +#define GSI_GSI_ACC_GO_INT_ENG_GO_BMSK 0x10 +#define GSI_GSI_ACC_GO_INT_ENG_GO_SHFT 0x4 +#define GSI_GSI_ACC_GO_TLV_OUT_GO_BMSK 0x8 +#define GSI_GSI_ACC_GO_TLV_OUT_GO_SHFT 0x3 +#define GSI_GSI_ACC_GO_CSR_GO_BMSK 0x4 +#define GSI_GSI_ACC_GO_CSR_GO_SHFT 0x2 +#define GSI_GSI_ACC_GO_RE_ENG_GO_BMSK 0x2 +#define GSI_GSI_ACC_GO_RE_ENG_GO_SHFT 0x1 +#define GSI_GSI_ACC_GO_EV_ENG_GO_BMSK 0x1 +#define GSI_GSI_ACC_GO_EV_ENG_GO_SHFT 0x0 + +#define GSI_GSI_ACC_2_UC_MCS_STTS_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000052c) +#define GSI_GSI_ACC_2_UC_MCS_STTS_RMSK 0xffffffff +#define GSI_GSI_ACC_2_UC_MCS_STTS_GSI_ACC_2_UC_MCS_STTS_BMSK 0xffffffff +#define GSI_GSI_ACC_2_UC_MCS_STTS_GSI_ACC_2_UC_MCS_STTS_SHFT 0x0 + +#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_LSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000530) +#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_LSB_RMSK 0xffffffff +#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_LSB_GSI_ACC_2_UC_MCS_RET_VAL_BMSK \ + 0xffffffff +#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_LSB_GSI_ACC_2_UC_MCS_RET_VAL_SHFT \ + 0x0 + +#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_MSB_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000534) +#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_MSB_RMSK 0xffffffff +#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_MSB_GSI_ACC_2_UC_MCS_RET_VAL_BMSK \ + 0xffffffff +#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_MSB_GSI_ACC_2_UC_MCS_RET_VAL_SHFT \ + 0x0 + +#define GSI_GSI_IC_2_UC_MCS_VLD_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000538) +#define GSI_GSI_IC_2_UC_MCS_VLD_RMSK 0xffffffff +#define GSI_GSI_IC_2_UC_MCS_VLD_GSI_IC_2_UC_MCS_VLD_BMSK 0xffffffff +#define GSI_GSI_IC_2_UC_MCS_VLD_GSI_IC_2_UC_MCS_VLD_SHFT 0x0 + +#define GSI_GSI_IC_2_UC_MCS_PC_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000053c) +#define GSI_GSI_IC_2_UC_MCS_PC_RMSK 0xffffffff +#define GSI_GSI_IC_2_UC_MCS_PC_GSI_IC_2_UC_MCS_PC_BMSK 0xffffffff +#define GSI_GSI_IC_2_UC_MCS_PC_GSI_IC_2_UC_MCS_PC_SHFT 0x0 + +#define GSI_GSI_IC_2_UC_MCS_ARGS_n_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00000540 + 0x4 * (n)) +#define GSI_GSI_IC_2_UC_MCS_ARGS_n_RMSK 0xffffffff +#define GSI_GSI_IC_2_UC_MCS_ARGS_n_MAXn 5 +#define GSI_GSI_IC_2_UC_MCS_ARGS_n_GSI_IC_2_UC_MCS_ARGS_BMSK 0xffffffff +#define GSI_GSI_IC_2_UC_MCS_ARGS_n_GSI_IC_2_UC_MCS_ARGS_SHFT 0x0 + +#define GSI_GSI_UC_TLV_IN_VLD_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000558) +#define GSI_GSI_UC_TLV_IN_VLD_RMSK 0x1 +#define GSI_GSI_UC_TLV_IN_VLD_GSI_UC_TLV_IN_VLD_BMSK 0x1 +#define GSI_GSI_UC_TLV_IN_VLD_GSI_UC_TLV_IN_VLD_SHFT 0x0 + +#define GSI_GSI_UC_TLV_IN_ROUTINE_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000055c) +#define GSI_GSI_UC_TLV_IN_ROUTINE_RMSK 0xffffffff +#define GSI_GSI_UC_TLV_IN_ROUTINE_GSI_UC_TLV_IN_ROUTINE_BMSK 0xffffffff +#define GSI_GSI_UC_TLV_IN_ROUTINE_GSI_UC_TLV_IN_ROUTINE_SHFT 0x0 + +#define GSI_GSI_UC_TLV_IN_ARGS_n_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00000560 + 0x4 * (n)) +#define GSI_GSI_UC_TLV_IN_ARGS_n_RMSK 0xffffffff +#define GSI_GSI_UC_TLV_IN_ARGS_n_MAXn 5 +#define GSI_GSI_UC_TLV_IN_ARGS_n_GSI_UC_TLV_IN_ARGS_BMSK 0xffffffff +#define GSI_GSI_UC_TLV_IN_ARGS_n_GSI_UC_TLV_IN_ARGS_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c000 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_0_RMSK 0xfff7dfff +#define GSI_EE_n_GSI_CH_k_CNTXT_0_MAXk 30 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_MAXn 3 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT 0x14 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK 0x7c000 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT 0xe +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHID_BMSK 0x1f00 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHID_SHFT 0x8 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_EE_BMSK 0xf0 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_EE_SHFT 0x4 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK 0x8 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT 0x3 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK 0x7 +#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c004 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_1_RMSK 0xffff +#define GSI_EE_n_GSI_CH_k_CNTXT_1_MAXk 30 +#define GSI_EE_n_GSI_CH_k_CNTXT_1_MAXn 3 +#define GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff +#define GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c008 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_2_RMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_2_MAXk 30 +#define GSI_EE_n_GSI_CH_k_CNTXT_2_MAXn 3 +#define GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c00c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_3_RMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_3_MAXk 30 +#define GSI_EE_n_GSI_CH_k_CNTXT_3_MAXn 3 +#define GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c010 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_4_RMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_4_MAXk 30 +#define GSI_EE_n_GSI_CH_k_CNTXT_4_MAXn 3 +#define GSI_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c014 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_5_RMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_5_MAXk 30 +#define GSI_EE_n_GSI_CH_k_CNTXT_5_MAXn 3 +#define GSI_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c018 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_6_RMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_6_MAXk 30 +#define GSI_EE_n_GSI_CH_k_CNTXT_6_MAXn 3 +#define GSI_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c01c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_CNTXT_7_RMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_7_MAXk 30 +#define GSI_EE_n_GSI_CH_k_CNTXT_7_MAXn 3 +#define GSI_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c054 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK 0xffff +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXk 30 +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXn 3 +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_BMSK 0xffff +#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c058 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK 0xffff +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXk 30 +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXn 3 +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_BMSK 0xffff +#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_QOS_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c05c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_QOS_RMSK 0x303 +#define GSI_EE_n_GSI_CH_k_QOS_MAXk 30 +#define GSI_EE_n_GSI_CH_k_QOS_MAXn 3 +#define GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200 +#define GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9 +#define GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100 +#define GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8 +#define GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf +#define GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c060 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_SCRATCH_0_RMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_0_MAXk 30 +#define GSI_EE_n_GSI_CH_k_SCRATCH_0_MAXn 3 +#define GSI_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c064 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_SCRATCH_1_RMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_1_MAXk 30 +#define GSI_EE_n_GSI_CH_k_SCRATCH_1_MAXn 3 +#define GSI_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c068 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_SCRATCH_2_RMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_2_MAXk 30 +#define GSI_EE_n_GSI_CH_k_SCRATCH_2_MAXn 3 +#define GSI_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001c06c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_GSI_CH_k_SCRATCH_3_RMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_3_MAXk 30 +#define GSI_EE_n_GSI_CH_k_SCRATCH_3_MAXn 3 +#define GSI_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d000 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_0_RMSK 0xfff1ffff +#define GSI_EE_n_EV_CH_k_CNTXT_0_MAXk 15 +#define GSI_EE_n_EV_CH_k_CNTXT_0_MAXn 3 +#define GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000 +#define GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18 +#define GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000 +#define GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT 0x14 +#define GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK 0x10000 +#define GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT 0x10 +#define GSI_EE_n_EV_CH_k_CNTXT_0_EVCHID_BMSK 0xff00 +#define GSI_EE_n_EV_CH_k_CNTXT_0_EVCHID_SHFT 0x8 +#define GSI_EE_n_EV_CH_k_CNTXT_0_EE_BMSK 0xf0 +#define GSI_EE_n_EV_CH_k_CNTXT_0_EE_SHFT 0x4 +#define GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK 0xf +#define GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d004 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_1_RMSK 0xffff +#define GSI_EE_n_EV_CH_k_CNTXT_1_MAXk 15 +#define GSI_EE_n_EV_CH_k_CNTXT_1_MAXn 3 +#define GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff +#define GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d008 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_2_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_2_MAXk 15 +#define GSI_EE_n_EV_CH_k_CNTXT_2_MAXn 3 +#define GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d00c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_3_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_3_MAXk 15 +#define GSI_EE_n_EV_CH_k_CNTXT_3_MAXn 3 +#define GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d010 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_4_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_4_MAXk 15 +#define GSI_EE_n_EV_CH_k_CNTXT_4_MAXn 3 +#define GSI_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d014 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_5_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_5_MAXk 15 +#define GSI_EE_n_EV_CH_k_CNTXT_5_MAXn 3 +#define GSI_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d018 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_6_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_6_MAXk 15 +#define GSI_EE_n_EV_CH_k_CNTXT_6_MAXn 3 +#define GSI_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d01c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_7_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_7_MAXk 15 +#define GSI_EE_n_EV_CH_k_CNTXT_7_MAXn 3 +#define GSI_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d020 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_8_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_8_MAXk 15 +#define GSI_EE_n_EV_CH_k_CNTXT_8_MAXn 3 +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_BMSK 0xff000000 +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_SHFT 0x18 +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK 0xff0000 +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT 0x10 +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK 0xffff +#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d024 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_9_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_9_MAXk 15 +#define GSI_EE_n_EV_CH_k_CNTXT_9_MAXn 3 +#define GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d028 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_10_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_10_MAXk 15 +#define GSI_EE_n_EV_CH_k_CNTXT_10_MAXn 3 +#define GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d02c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_11_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_11_MAXk 15 +#define GSI_EE_n_EV_CH_k_CNTXT_11_MAXn 3 +#define GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d030 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_12_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_12_MAXk 15 +#define GSI_EE_n_EV_CH_k_CNTXT_12_MAXn 3 +#define GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d034 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_CNTXT_13_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_13_MAXk 15 +#define GSI_EE_n_EV_CH_k_CNTXT_13_MAXn 3 +#define GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d048 + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_SCRATCH_0_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_SCRATCH_0_MAXk 15 +#define GSI_EE_n_EV_CH_k_SCRATCH_0_MAXn 3 +#define GSI_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001d04c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_EE_n_EV_CH_k_SCRATCH_1_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_SCRATCH_1_MAXk 15 +#define GSI_EE_n_EV_CH_k_SCRATCH_1_MAXn 3 +#define GSI_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001e000 + 0x4000 * (n) + 0x8 * (k)) +#define GSI_EE_n_GSI_CH_k_DOORBELL_0_RMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_DOORBELL_0_MAXk 30 +#define GSI_EE_n_GSI_CH_k_DOORBELL_0_MAXn 3 +#define GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001e004 + 0x4000 * (n) + 0x8 * (k)) +#define GSI_EE_n_GSI_CH_k_DOORBELL_1_RMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_DOORBELL_1_MAXk 30 +#define GSI_EE_n_GSI_CH_k_DOORBELL_1_MAXn 3 +#define GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001e100 + 0x4000 * (n) + 0x8 * (k)) +#define GSI_EE_n_EV_CH_k_DOORBELL_0_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_DOORBELL_0_MAXk 15 +#define GSI_EE_n_EV_CH_k_DOORBELL_0_MAXn 3 +#define GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT 0x0 + +#define GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001e104 + 0x4000 * (n) + 0x8 * (k)) +#define GSI_EE_n_EV_CH_k_DOORBELL_1_RMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_DOORBELL_1_MAXk 15 +#define GSI_EE_n_EV_CH_k_DOORBELL_1_MAXn 3 +#define GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK 0xffffffff +#define GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT 0x0 + +#define GSI_EE_n_GSI_STATUS_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f000 + 0x4000 * (n)) +#define GSI_EE_n_GSI_STATUS_RMSK 0x1 +#define GSI_EE_n_GSI_STATUS_MAXn 3 +#define GSI_EE_n_GSI_STATUS_ENABLED_BMSK 0x1 +#define GSI_EE_n_GSI_STATUS_ENABLED_SHFT 0x0 + +#define GSI_EE_n_GSI_CH_CMD_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f008 + 0x4000 * (n)) +#define GSI_EE_n_GSI_CH_CMD_RMSK 0xff0000ff +#define GSI_EE_n_GSI_CH_CMD_MAXn 3 +#define GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK 0xff000000 +#define GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT 0x18 +#define GSI_EE_n_GSI_CH_CMD_CHID_BMSK 0xff +#define GSI_EE_n_GSI_CH_CMD_CHID_SHFT 0x0 + +#define GSI_EE_n_EV_CH_CMD_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f010 + 0x4000 * (n)) +#define GSI_EE_n_EV_CH_CMD_RMSK 0xff0000ff +#define GSI_EE_n_EV_CH_CMD_MAXn 3 +#define GSI_EE_n_EV_CH_CMD_OPCODE_BMSK 0xff000000 +#define GSI_EE_n_EV_CH_CMD_OPCODE_SHFT 0x18 +#define GSI_EE_n_EV_CH_CMD_CHID_BMSK 0xff +#define GSI_EE_n_EV_CH_CMD_CHID_SHFT 0x0 + +#define GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f018 + 0x4000 * (n)) +#define GSI_EE_n_GSI_EE_GENERIC_CMD_RMSK 0xffffffff +#define GSI_EE_n_GSI_EE_GENERIC_CMD_MAXn 3 +#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0x1f +#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0 +#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK 0x3e0 +#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT 0x5 +#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK 0x3c00 +#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT 0xa + +/* v1.0 */ +#define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V1_0_EE_n_GSI_HW_PARAM_RMSK 0x7fffffff +#define GSI_V1_0_EE_n_GSI_HW_PARAM_MAXn 3 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a +#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0 + +/* v1.2 */ +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n)) +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_MAXn 2 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0 + +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_MAXn 2 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \ + 0x80000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \ + 0x40000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0 + +/* v1.3 */ +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n)) +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_MAXn 2 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0 + +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f03c + 0x4000 * (n)) +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_MAXn 2 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \ + 0x80000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \ + 0x40000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0 + +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_MAXn 2 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1 + +/* v2.0 */ +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_MAXn 2 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2 +#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3 + +#define GSI_EE_n_GSI_SW_VERSION_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f044 + 0x4000 * (n)) +#define GSI_EE_n_GSI_SW_VERSION_RMSK 0xffffffff +#define GSI_EE_n_GSI_SW_VERSION_MAXn 3 +#define GSI_EE_n_GSI_SW_VERSION_MAJOR_BMSK 0xf0000000 +#define GSI_EE_n_GSI_SW_VERSION_MAJOR_SHFT 0x1c +#define GSI_EE_n_GSI_SW_VERSION_MINOR_BMSK 0xfff0000 +#define GSI_EE_n_GSI_SW_VERSION_MINOR_SHFT 0x10 +#define GSI_EE_n_GSI_SW_VERSION_STEP_BMSK 0xffff +#define GSI_EE_n_GSI_SW_VERSION_STEP_SHFT 0x0 + +#define GSI_EE_n_GSI_MCS_CODE_VER_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f048 + 0x4000 * (n)) +#define GSI_EE_n_GSI_MCS_CODE_VER_RMSK 0xffffffff +#define GSI_EE_n_GSI_MCS_CODE_VER_MAXn 3 +#define GSI_EE_n_GSI_MCS_CODE_VER_VER_BMSK 0xffffffff +#define GSI_EE_n_GSI_MCS_CODE_VER_VER_SHFT 0x0 + +#define GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f080 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_TYPE_IRQ_RMSK 0x7f +#define GSI_EE_n_CNTXT_TYPE_IRQ_MAXn 3 +#define GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK 0x40 +#define GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_SHFT 0x6 +#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK 0x20 +#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_SHFT 0x5 +#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK 0x10 +#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_SHFT 0x4 +#define GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK 0x8 +#define GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_SHFT 0x3 +#define GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK 0x4 +#define GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_SHFT 0x2 +#define GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK 0x2 +#define GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_SHFT 0x1 +#define GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK 0x1 +#define GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_SHFT 0x0 + +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f088 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK 0x7f +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_MAXn 3 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_BMSK 0x40 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_SHFT 0x6 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_BMSK 0x20 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_SHFT 0x5 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_BMSK 0x10 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_SHFT 0x4 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_BMSK 0x8 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_SHFT 0x3 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_BMSK 0x4 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_SHFT 0x2 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_BMSK 0x2 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_SHFT 0x1 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_BMSK 0x1 +#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f090 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MAXn 3 +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f094 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MAXn 3 +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f098 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_MAXn 3 +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \ + 0xffffffff +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f09c + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_MAXn 3 +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \ + 0xffffffff +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f0a0 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_RMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_MAXn 3 +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f0a4 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_RMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_MAXn 3 +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f0b0 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MAXn 3 +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f0b8 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_MAXn 3 +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \ + 0xffffffff +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f0c0 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_RMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_MAXn 3 +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f100 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK 0xf +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_MAXn 3 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_BMSK 0x8 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_SHFT 0x3 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_BMSK 0x4 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_SHFT 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_BMSK 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_SHFT 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f108 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_RMSK 0xf +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_MAXn 3 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK 0x8 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_SHFT 0x3 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK 0x4 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_SHFT 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_SHFT 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_ERROR_INT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_ERROR_INT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f110 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_RMSK 0xf +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_MAXn 3 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT3_BMSK 0x8 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT3_SHFT 0x3 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT2_BMSK 0x4 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT2_SHFT 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT1_BMSK 0x2 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT1_SHFT 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_ERROR_INT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_ERROR_INT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f118 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_RMSK 0xf +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_MAXn 3 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_BMSK 0x8 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_SHFT 0x3 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_BMSK 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_SHFT 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f120 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_RMSK 0xf +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_MAXn 3 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_MCS_STACK_OVRFLOW_BMSK 0x8 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_MCS_STACK_OVRFLOW_SHFT 0x3 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BUS_ERROR_BMSK 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BUS_ERROR_SHFT 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BREAK_POINT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BREAK_POINT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f128 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_RMSK 0xf +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_MAXn 3 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK 0x8 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_SHFT 0x3 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK 0x2 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_SHFT 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK 0x1 +#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_SHFT 0x0 + +#define GSI_EE_n_CNTXT_INTSET_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f180 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_INTSET_RMSK 0x1 +#define GSI_EE_n_CNTXT_INTSET_MAXn 3 +#define GSI_EE_n_CNTXT_INTSET_INTYPE_BMSK 0x1 +#define GSI_EE_n_CNTXT_INTSET_INTYPE_SHFT 0x0 + +#define GSI_EE_n_CNTXT_MSI_BASE_LSB_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f188 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_MSI_BASE_LSB_RMSK 0xffffffff +#define GSI_EE_n_CNTXT_MSI_BASE_LSB_MAXn 3 +#define GSI_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_SHFT 0x0 + +#define GSI_EE_n_CNTXT_MSI_BASE_MSB_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f18c + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_MSI_BASE_MSB_RMSK 0xffffffff +#define GSI_EE_n_CNTXT_MSI_BASE_MSB_MAXn 3 +#define GSI_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_SHFT 0x0 + +#define GSI_EE_n_CNTXT_INT_VEC_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f190 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_INT_VEC_RMSK 0xffffffff +#define GSI_EE_n_CNTXT_INT_VEC_MAXn 3 +#define GSI_EE_n_CNTXT_INT_VEC_INT_VEC_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_INT_VEC_INT_VEC_SHFT 0x0 + +#define GSI_EE_n_ERROR_LOG_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f200 + 0x4000 * (n)) +#define GSI_EE_n_ERROR_LOG_RMSK 0xffffffff +#define GSI_EE_n_ERROR_LOG_MAXn 3 +#define GSI_EE_n_ERROR_LOG_TODO_BMSK 0xffffffff +#define GSI_EE_n_ERROR_LOG_TODO_SHFT 0x0 + +#define GSI_EE_n_ERROR_LOG_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f210 + 0x4000 * (n)) +#define GSI_EE_n_ERROR_LOG_CLR_RMSK 0xffffffff +#define GSI_EE_n_ERROR_LOG_CLR_MAXn 3 +#define GSI_EE_n_ERROR_LOG_CLR_TODO_BMSK 0xffffffff +#define GSI_EE_n_ERROR_LOG_CLR_TODO_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SCRATCH_0_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f400 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SCRATCH_0_RMSK 0xffffffff +#define GSI_EE_n_CNTXT_SCRATCH_0_MAXn 3 +#define GSI_EE_n_CNTXT_SCRATCH_0_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SCRATCH_0_SCRATCH_SHFT 0x0 + +#define GSI_EE_n_CNTXT_SCRATCH_1_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f404 + 0x4000 * (n)) +#define GSI_EE_n_CNTXT_SCRATCH_1_RMSK 0xffffffff +#define GSI_EE_n_CNTXT_SCRATCH_1_MAXn 3 +#define GSI_EE_n_CNTXT_SCRATCH_1_SCRATCH_BMSK 0xffffffff +#define GSI_EE_n_CNTXT_SCRATCH_1_SCRATCH_SHFT 0x0 + +#define GSI_INTER_EE_n_ORIGINATOR_EE_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c000 + 0x1000 * (n)) +#define GSI_INTER_EE_n_ORIGINATOR_EE_RMSK 0xf +#define GSI_INTER_EE_n_ORIGINATOR_EE_MAXn 3 +#define GSI_INTER_EE_n_ORIGINATOR_EE_EE_NUMBER_BMSK 0xf +#define GSI_INTER_EE_n_ORIGINATOR_EE_EE_NUMBER_SHFT 0x0 + +#define GSI_INTER_EE_n_GSI_CH_CMD_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c008 + 0x1000 * (n)) +#define GSI_INTER_EE_n_GSI_CH_CMD_RMSK 0xff0000ff +#define GSI_INTER_EE_n_GSI_CH_CMD_MAXn 3 +#define GSI_INTER_EE_n_GSI_CH_CMD_OPCODE_BMSK 0xff000000 +#define GSI_INTER_EE_n_GSI_CH_CMD_OPCODE_SHFT 0x18 +#define GSI_INTER_EE_n_GSI_CH_CMD_CHID_BMSK 0xff +#define GSI_INTER_EE_n_GSI_CH_CMD_CHID_SHFT 0x0 + +#define GSI_INTER_EE_n_EV_CH_CMD_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c010 + 0x1000 * (n)) +#define GSI_INTER_EE_n_EV_CH_CMD_RMSK 0xff0000ff +#define GSI_INTER_EE_n_EV_CH_CMD_MAXn 3 +#define GSI_INTER_EE_n_EV_CH_CMD_OPCODE_BMSK 0xff000000 +#define GSI_INTER_EE_n_EV_CH_CMD_OPCODE_SHFT 0x18 +#define GSI_INTER_EE_n_EV_CH_CMD_CHID_BMSK 0xff +#define GSI_INTER_EE_n_EV_CH_CMD_CHID_SHFT 0x0 + +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c018 + 0x1000 * (n)) +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_RMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MAXn 3 +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0 + +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c01c + 0x1000 * (n)) +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_RMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MAXn 3 +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0 + +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c020 + 0x1000 * (n)) +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_RMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_MAXn 3 +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \ + 0x00003fff +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0 + +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c024 + 0x1000 * (n)) +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_RMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_MAXn 3 +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \ + 0x000003ff +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 + +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c028 + 0x1000 * (n)) +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_RMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_MAXn 3 +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0 + +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000c02c + 0x1000 * (n)) +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_RMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_MAXn 3 +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff +#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0 + + +#endif /* __GSI_REG_H__ */ diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile new file mode 100644 index 000000000000..15ed471f383c --- /dev/null +++ b/drivers/platform/msm/ipa/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_IPA) += ipa_v2/ ipa_clients/ ipa_common +obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/ ipa_common +obj-$(CONFIG_IPA_UT) += test/ + +ipa_common += ipa_api.o ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c new file mode 100644 index 000000000000..c446c164fa7c --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -0,0 +1,2997 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_api.h" + +#define DRV_NAME "ipa" + +#define IPA_API_DISPATCH_RETURN(api, p...) \ + do { \ + if (!ipa_api_ctrl) { \ + pr_err("%s:%d IPA HW is not supported\n", \ + __func__, __LINE__); \ + ret = -EPERM; \ + } \ + else { \ + if (ipa_api_ctrl->api) { \ + ret = ipa_api_ctrl->api(p); \ + } else { \ + WARN(1, \ + "%s not implemented for IPA ver %d\n", \ + __func__, ipa_api_hw_type); \ + ret = -EPERM; \ + } \ + } \ + } while (0) + +#define IPA_API_DISPATCH(api, p...) \ + do { \ + if (!ipa_api_ctrl) \ + pr_err("%s:%d IPA HW is not supported\n", \ + __func__, __LINE__); \ + else { \ + if (ipa_api_ctrl->api) { \ + ipa_api_ctrl->api(p); \ + } else { \ + WARN(1, \ + "%s not implemented for IPA ver %d\n",\ + __func__, ipa_api_hw_type); \ + } \ + } \ + } while (0) + +#define IPA_API_DISPATCH_RETURN_PTR(api, p...) \ + do { \ + if (!ipa_api_ctrl) { \ + pr_err("%s:%d IPA HW is not supported\n", \ + __func__, __LINE__); \ + ret = NULL; \ + } \ + else { \ + if (ipa_api_ctrl->api) { \ + ret = ipa_api_ctrl->api(p); \ + } else { \ + WARN(1, "%s not implemented for IPA ver %d\n",\ + __func__, ipa_api_hw_type); \ + ret = NULL; \ + } \ + } \ + } while (0) + +#define IPA_API_DISPATCH_RETURN_BOOL(api, p...) \ + do { \ + if (!ipa_api_ctrl) { \ + pr_err("%s:%d IPA HW is not supported\n", \ + __func__, __LINE__); \ + ret = false; \ + } \ + else { \ + if (ipa_api_ctrl->api) { \ + ret = ipa_api_ctrl->api(p); \ + } else { \ + WARN(1, "%s not implemented for IPA ver %d\n",\ + __func__, ipa_api_hw_type); \ + ret = false; \ + } \ + } \ + } while (0) + +static enum ipa_hw_type ipa_api_hw_type; +static struct ipa_api_controller *ipa_api_ctrl; + +const char *ipa_clients_strings[IPA_CLIENT_MAX] = { + __stringify(IPA_CLIENT_HSIC1_PROD), + __stringify(IPA_CLIENT_HSIC1_CONS), + __stringify(IPA_CLIENT_HSIC2_PROD), + __stringify(IPA_CLIENT_HSIC2_CONS), + __stringify(IPA_CLIENT_HSIC3_PROD), + __stringify(IPA_CLIENT_HSIC3_CONS), + __stringify(IPA_CLIENT_HSIC4_PROD), + __stringify(IPA_CLIENT_HSIC4_CONS), + __stringify(IPA_CLIENT_HSIC5_PROD), + __stringify(IPA_CLIENT_HSIC5_CONS), + __stringify(IPA_CLIENT_WLAN1_PROD), + __stringify(IPA_CLIENT_WLAN1_CONS), + __stringify(IPA_CLIENT_A5_WLAN_AMPDU_PROD), + __stringify(IPA_CLIENT_WLAN2_CONS), + __stringify(RESERVERD_PROD_14), + __stringify(IPA_CLIENT_WLAN3_CONS), + __stringify(RESERVERD_PROD_16), + __stringify(IPA_CLIENT_WLAN4_CONS), + __stringify(IPA_CLIENT_USB_PROD), + __stringify(IPA_CLIENT_USB_CONS), + __stringify(IPA_CLIENT_USB2_PROD), + __stringify(IPA_CLIENT_USB2_CONS), + __stringify(IPA_CLIENT_USB3_PROD), + __stringify(IPA_CLIENT_USB3_CONS), + __stringify(IPA_CLIENT_USB4_PROD), + __stringify(IPA_CLIENT_USB4_CONS), + __stringify(IPA_CLIENT_UC_USB_PROD), + __stringify(IPA_CLIENT_USB_DPL_CONS), + __stringify(IPA_CLIENT_A2_EMBEDDED_PROD), + __stringify(IPA_CLIENT_A2_EMBEDDED_CONS), + __stringify(IPA_CLIENT_A2_TETHERED_PROD), + __stringify(IPA_CLIENT_A2_TETHERED_CONS), + __stringify(IPA_CLIENT_APPS_LAN_PROD), + __stringify(IPA_CLIENT_APPS_LAN_CONS), + __stringify(IPA_CLIENT_APPS_WAN_PROD), + __stringify(IPA_CLIENT_APPS_WAN_CONS), + __stringify(IPA_CLIENT_APPS_CMD_PROD), + __stringify(IPA_CLIENT_A5_LAN_WAN_CONS), + __stringify(IPA_CLIENT_ODU_PROD), + __stringify(IPA_CLIENT_ODU_EMB_CONS), + __stringify(RESERVERD_PROD_40), + __stringify(IPA_CLIENT_ODU_TETH_CONS), + __stringify(IPA_CLIENT_MHI_PROD), + __stringify(IPA_CLIENT_MHI_CONS), + __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD), + __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS), + __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD), + __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS), + __stringify(IPA_CLIENT_ETHERNET_PROD), + __stringify(IPA_CLIENT_ETHERNET_CONS), + __stringify(IPA_CLIENT_Q6_LAN_PROD), + __stringify(IPA_CLIENT_Q6_LAN_CONS), + __stringify(IPA_CLIENT_Q6_WAN_PROD), + __stringify(IPA_CLIENT_Q6_WAN_CONS), + __stringify(IPA_CLIENT_Q6_CMD_PROD), + __stringify(IPA_CLIENT_Q6_DUN_CONS), + __stringify(IPA_CLIENT_Q6_DECOMP_PROD), + __stringify(IPA_CLIENT_Q6_DECOMP_CONS), + __stringify(IPA_CLIENT_Q6_DECOMP2_PROD), + __stringify(IPA_CLIENT_Q6_DECOMP2_CONS), + __stringify(RESERVERD_PROD_60), + __stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS), + __stringify(IPA_CLIENT_TEST_PROD), + __stringify(IPA_CLIENT_TEST_CONS), + __stringify(IPA_CLIENT_TEST1_PROD), + __stringify(IPA_CLIENT_TEST1_CONS), + __stringify(IPA_CLIENT_TEST2_PROD), + __stringify(IPA_CLIENT_TEST2_CONS), + __stringify(IPA_CLIENT_TEST3_PROD), + __stringify(IPA_CLIENT_TEST3_CONS), + __stringify(IPA_CLIENT_TEST4_PROD), + __stringify(IPA_CLIENT_TEST4_CONS), + __stringify(IPA_CLIENT_DUMMY_CONS), +}; + +/** + * ipa_write_64() - convert 64 bit value to byte array + * @w: 64 bit integer + * @dest: byte array + * + * Return value: converted value + */ +u8 *ipa_write_64(u64 w, u8 *dest) +{ + if (unlikely(dest == NULL)) { + pr_err("%s: NULL address\n", __func__); + return dest; + } + *dest++ = (u8)((w) & 0xFF); + *dest++ = (u8)((w >> 8) & 0xFF); + *dest++ = (u8)((w >> 16) & 0xFF); + *dest++ = (u8)((w >> 24) & 0xFF); + *dest++ = (u8)((w >> 32) & 0xFF); + *dest++ = (u8)((w >> 40) & 0xFF); + *dest++ = (u8)((w >> 48) & 0xFF); + *dest++ = (u8)((w >> 56) & 0xFF); + + return dest; +} + +/** + * ipa_write_32() - convert 32 bit value to byte array + * @w: 32 bit integer + * @dest: byte array + * + * Return value: converted value + */ +u8 *ipa_write_32(u32 w, u8 *dest) +{ + if (unlikely(dest == NULL)) { + pr_err("%s: NULL address\n", __func__); + return dest; + } + *dest++ = (u8)((w) & 0xFF); + *dest++ = (u8)((w >> 8) & 0xFF); + *dest++ = (u8)((w >> 16) & 0xFF); + *dest++ = (u8)((w >> 24) & 0xFF); + + return dest; +} + +/** + * ipa_write_16() - convert 16 bit value to byte array + * @hw: 16 bit integer + * @dest: byte array + * + * Return value: converted value + */ +u8 *ipa_write_16(u16 hw, u8 *dest) +{ + if (unlikely(dest == NULL)) { + pr_err("%s: NULL address\n", __func__); + return dest; + } + *dest++ = (u8)((hw) & 0xFF); + *dest++ = (u8)((hw >> 8) & 0xFF); + + return dest; +} + +/** + * ipa_write_8() - convert 8 bit value to byte array + * @hw: 8 bit integer + * @dest: byte array + * + * Return value: converted value + */ +u8 *ipa_write_8(u8 b, u8 *dest) +{ + if (unlikely(dest == NULL)) { + WARN(1, "%s: NULL address\n", __func__); + return dest; + } + *dest++ = (b) & 0xFF; + + return dest; +} + +/** + * ipa_pad_to_64() - pad byte array to 64 bit value + * @dest: byte array + * + * Return value: padded value + */ +u8 *ipa_pad_to_64(u8 *dest) +{ + int i; + int j; + + if (unlikely(dest == NULL)) { + WARN(1, "%s: NULL address\n", __func__); + return dest; + } + + i = (long)dest & 0x7; + + if (i) + for (j = 0; j < (8 - i); j++) + *dest++ = 0; + + return dest; +} + +/** + * ipa_pad_to_32() - pad byte array to 32 bit value + * @dest: byte array + * + * Return value: padded value + */ +u8 *ipa_pad_to_32(u8 *dest) +{ + int i; + int j; + + if (unlikely(dest == NULL)) { + WARN(1, "%s: NULL address\n", __func__); + return dest; + } + + i = (long)dest & 0x7; + + if (i) + for (j = 0; j < (4 - i); j++) + *dest++ = 0; + + return dest; +} + +/** + * ipa_clear_endpoint_delay() - Clear ep_delay. + * @clnt_hdl: [in] IPA client handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_clear_endpoint_delay(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_clear_endpoint_delay, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_clear_endpoint_delay); + +/** + * ipa_reset_endpoint() - reset an endpoint from BAM perspective + * @clnt_hdl: [in] IPA client handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_reset_endpoint(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_reset_endpoint, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_reset_endpoint); + +/** + * ipa_disable_endpoint() - Disable an endpoint from IPA perspective + * @clnt_hdl: [in] IPA client handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_disable_endpoint(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disable_endpoint, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_disable_endpoint); + + +/** + * ipa_cfg_ep - IPA end-point configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * This includes nat, header, mode, aggregation and route settings and is a one + * shot API to configure the IPA end-point fully + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep, clnt_hdl, ipa_ep_cfg); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep); + +/** + * ipa_cfg_ep_nat() - IPA end-point NAT configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_nat: [in] IPA NAT end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_nat, clnt_hdl, ep_nat); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_nat); + +/** + * ipa_cfg_ep_conn_track() - IPA end-point IPv6CT configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_conn_track: [in] IPA IPv6CT end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_conn_track(u32 clnt_hdl, + const struct ipa_ep_cfg_conn_track *ep_conn_track) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_conn_track, clnt_hdl, + ep_conn_track); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_conn_track); + +/** + * ipa_cfg_ep_hdr() - IPA end-point header configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_hdr, clnt_hdl, ep_hdr); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_hdr); + +/** + * ipa_cfg_ep_hdr_ext() - IPA end-point extended header configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_hdr_ext: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_hdr_ext, clnt_hdl, ep_hdr_ext); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_hdr_ext); + +/** + * ipa_cfg_ep_mode() - IPA end-point mode configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_mode, clnt_hdl, ep_mode); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_mode); + +/** + * ipa_cfg_ep_aggr() - IPA end-point aggregation configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_aggr, clnt_hdl, ep_aggr); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_aggr); + +/** + * ipa_cfg_ep_deaggr() - IPA end-point deaggregation configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_deaggr: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_deaggr(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ep_deaggr) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_deaggr, clnt_hdl, ep_deaggr); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_deaggr); + +/** + * ipa_cfg_ep_route() - IPA end-point routing configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_route, clnt_hdl, ep_route); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_route); + +/** + * ipa_cfg_ep_holb() - IPA end-point holb configuration + * + * If an IPA producer pipe is full, IPA HW by default will block + * indefinitely till space opens up. During this time no packets + * including those from unrelated pipes will be processed. Enabling + * HOLB means IPA HW will be allowed to drop packets as/when needed + * and indefinite blocking is avoided. + * + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_holb, clnt_hdl, ep_holb); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_holb); + + +/** + * ipa_cfg_ep_cfg() - IPA end-point cfg configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_cfg, clnt_hdl, cfg); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_cfg); + +/** + * ipa_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_cfg_ep_metadata_mask(u32 clnt_hdl, const struct ipa_ep_cfg_metadata_mask + *metadata_mask) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_metadata_mask, clnt_hdl, + metadata_mask); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_metadata_mask); + +/** + * ipa_cfg_ep_holb_by_client() - IPA end-point holb configuration + * + * Wrapper function for ipa_cfg_ep_holb() with client name instead of + * client handle. This function is used for clients that does not have + * client handle. + * + * @client: [in] client name + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa_cfg_ep_holb_by_client(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ep_holb) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_holb_by_client, client, ep_holb); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_holb_by_client); + +/** + * ipa_cfg_ep_ctrl() - IPA end-point Control configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_cfg_ep_ctrl, clnt_hdl, ep_ctrl); + + return ret; +} +EXPORT_SYMBOL(ipa_cfg_ep_ctrl); + +/** + * ipa_add_hdr() - add the specified headers to SW and optionally commit them to + * IPA HW + * @hdrs: [inout] set of headers to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_hdr, hdrs); + + return ret; +} +EXPORT_SYMBOL(ipa_add_hdr); + +/** + * ipa_del_hdr() - Remove the specified headers from SW and optionally + * commit them to IPA HW + * @hdls: [inout] set of headers to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_del_hdr, hdls); + + return ret; +} +EXPORT_SYMBOL(ipa_del_hdr); + +/** + * ipa_commit_hdr() - commit to IPA HW the current header table in SW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_commit_hdr(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_commit_hdr); + + return ret; +} +EXPORT_SYMBOL(ipa_commit_hdr); + +/** + * ipa_reset_hdr() - reset the current header table in SW (does not commit to + * HW) + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_reset_hdr(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_reset_hdr); + + return ret; +} +EXPORT_SYMBOL(ipa_reset_hdr); + +/** + * ipa_get_hdr() - Lookup the specified header resource + * @lookup: [inout] header to lookup and its handle + * + * lookup the specified header resource and return handle if it exists + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + * Caller should call ipa_put_hdr later if this function succeeds + */ +int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_hdr, lookup); + + return ret; +} +EXPORT_SYMBOL(ipa_get_hdr); + +/** + * ipa_put_hdr() - Release the specified header handle + * @hdr_hdl: [in] the header handle to release + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_put_hdr(u32 hdr_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_put_hdr, hdr_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_put_hdr); + +/** + * ipa_copy_hdr() - Lookup the specified header resource and return a copy of it + * @copy: [inout] header to lookup and its copy + * + * lookup the specified header resource and return a copy of it (along with its + * attributes) if it exists, this would be called for partial headers + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_copy_hdr, copy); + + return ret; +} +EXPORT_SYMBOL(ipa_copy_hdr); + +/** + * ipa_add_hdr_proc_ctx() - add the specified headers to SW + * and optionally commit them to IPA HW + * @proc_ctxs: [inout] set of processing context headers to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_hdr_proc_ctx, proc_ctxs); + + return ret; +} +EXPORT_SYMBOL(ipa_add_hdr_proc_ctx); + +/** + * ipa_del_hdr_proc_ctx() - + * Remove the specified processing context headers from SW and + * optionally commit them to IPA HW. + * @hdls: [inout] set of processing context headers to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_del_hdr_proc_ctx, hdls); + + return ret; +} +EXPORT_SYMBOL(ipa_del_hdr_proc_ctx); + +/** + * ipa_add_rt_rule() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_rt_rule, rules); + + return ret; +} +EXPORT_SYMBOL(ipa_add_rt_rule); + +/** + * ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally + * commit to IPA HW + * @hdls: [inout] set of routing rules to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_del_rt_rule, hdls); + + return ret; +} +EXPORT_SYMBOL(ipa_del_rt_rule); + +/** + * ipa_commit_rt_rule() - Commit the current SW routing table of specified type + * to IPA HW + * @ip: The family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_commit_rt(enum ipa_ip_type ip) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_commit_rt, ip); + + return ret; +} +EXPORT_SYMBOL(ipa_commit_rt); + +/** + * ipa_reset_rt() - reset the current SW routing table of specified type + * (does not commit to HW) + * @ip: The family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_reset_rt(enum ipa_ip_type ip) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_reset_rt, ip); + + return ret; +} +EXPORT_SYMBOL(ipa_reset_rt); + +/** + * ipa_get_rt_tbl() - lookup the specified routing table and return handle if it + * exists, if lookup succeeds the routing table ref cnt is increased + * @lookup: [inout] routing table to lookup and its handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + * Caller should call ipa_put_rt_tbl later if this function succeeds + */ +int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_rt_tbl, lookup); + + return ret; +} +EXPORT_SYMBOL(ipa_get_rt_tbl); + +/** + * ipa_put_rt_tbl() - Release the specified routing table handle + * @rt_tbl_hdl: [in] the routing table handle to release + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_put_rt_tbl(u32 rt_tbl_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_put_rt_tbl, rt_tbl_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_put_rt_tbl); + +/** + * ipa_query_rt_index() - find the routing table index + * which name and ip type are given as parameters + * @in: [out] the index of the wanted routing table + * + * Returns: the routing table which name is given as parameter, or NULL if it + * doesn't exist + */ +int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_query_rt_index, in); + + return ret; +} +EXPORT_SYMBOL(ipa_query_rt_index); + +/** + * ipa_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally + * commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mdfy_rt_rule, hdls); + + return ret; +} +EXPORT_SYMBOL(ipa_mdfy_rt_rule); + +/** + * ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally + * commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_flt_rule, rules); + + return ret; +} +EXPORT_SYMBOL(ipa_add_flt_rule); + +/** + * ipa_del_flt_rule() - Remove the specified filtering rules from SW and + * optionally commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_del_flt_rule, hdls); + + return ret; +} +EXPORT_SYMBOL(ipa_del_flt_rule); + +/** + * ipa_mdfy_flt_rule() - Modify the specified filtering rules in SW and + * optionally commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mdfy_flt_rule, hdls); + + return ret; +} +EXPORT_SYMBOL(ipa_mdfy_flt_rule); + +/** + * ipa_commit_flt() - Commit the current SW filtering table of specified type to + * IPA HW + * @ip: [in] the family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_commit_flt(enum ipa_ip_type ip) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_commit_flt, ip); + + return ret; +} +EXPORT_SYMBOL(ipa_commit_flt); + +/** + * ipa_reset_flt() - Reset the current SW filtering table of specified type + * (does not commit to HW) + * @ip: [in] the family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_reset_flt(enum ipa_ip_type ip) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_reset_flt, ip); + + return ret; +} +EXPORT_SYMBOL(ipa_reset_flt); + +/** + * allocate_nat_device() - Allocates memory for the NAT device + * @mem: [in/out] memory parameters + * + * Called by NAT client driver to allocate memory for the NAT entries. Based on + * the request size either shared or system memory will be used. + * + * Returns: 0 on success, negative on failure + */ +int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem) +{ + int ret; + + IPA_API_DISPATCH_RETURN(allocate_nat_device, mem); + + return ret; +} +EXPORT_SYMBOL(allocate_nat_device); + +/** + * ipa_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW + * @init: [in] initialization command attributes + * + * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_nat_init_cmd, init); + + return ret; +} +EXPORT_SYMBOL(ipa_nat_init_cmd); + +/** + * ipa_nat_dma_cmd() - Post NAT_DMA command to IPA HW + * @dma: [in] initialization command attributes + * + * Called by NAT client driver to post NAT_DMA command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_nat_dma_cmd, dma); + + return ret; +} +EXPORT_SYMBOL(ipa_nat_dma_cmd); + +/** + * ipa_nat_del_cmd() - Delete a NAT table + * @del: [in] delete table table table parameters + * + * Called by NAT client driver to delete the nat table + * + * Returns: 0 on success, negative on failure + */ +int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_nat_del_cmd, del); + + return ret; +} +EXPORT_SYMBOL(ipa_nat_del_cmd); + +/** + * ipa_send_msg() - Send "message" from kernel client to IPA driver + * @meta: [in] message meta-data + * @buff: [in] the payload for message + * @callback: [in] free callback + * + * Client supplies the message meta-data and payload which IPA driver buffers + * till read by user-space. After read from user space IPA driver invokes the + * callback supplied to free the message payload. Client must not touch/free + * the message payload after calling this API. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_send_msg, meta, buff, callback); + + return ret; +} +EXPORT_SYMBOL(ipa_send_msg); + +/** + * ipa_register_pull_msg() - register pull message type + * @meta: [in] message meta-data + * @callback: [in] pull callback + * + * Register message callback by kernel client with IPA driver for IPA driver to + * pull message on-demand. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_register_pull_msg, meta, callback); + + return ret; +} +EXPORT_SYMBOL(ipa_register_pull_msg); + +/** + * ipa_deregister_pull_msg() - De-register pull message type + * @meta: [in] message meta-data + * + * De-register "message" by kernel client from IPA driver + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_deregister_pull_msg(struct ipa_msg_meta *meta) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_deregister_pull_msg, meta); + + return ret; +} +EXPORT_SYMBOL(ipa_deregister_pull_msg); + +/** + * ipa_register_intf() - register "logical" interface + * @name: [in] interface name + * @tx: [in] TX properties of the interface + * @rx: [in] RX properties of the interface + * + * Register an interface and its tx and rx properties, this allows + * configuration of rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_register_intf(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_register_intf, name, tx, rx); + + return ret; +} +EXPORT_SYMBOL(ipa_register_intf); + +/** + * ipa_register_intf_ext() - register "logical" interface which has only + * extended properties + * @name: [in] interface name + * @tx: [in] TX properties of the interface + * @rx: [in] RX properties of the interface + * @ext: [in] EXT properties of the interface + * + * Register an interface and its tx, rx and ext properties, this allows + * configuration of rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_register_intf_ext(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_register_intf_ext, name, tx, rx, ext); + + return ret; +} +EXPORT_SYMBOL(ipa_register_intf_ext); + +/** + * ipa_deregister_intf() - de-register previously registered logical interface + * @name: [in] interface name + * + * De-register a previously registered interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_deregister_intf(const char *name) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_deregister_intf, name); + + return ret; +} +EXPORT_SYMBOL(ipa_deregister_intf); + +/** + * ipa_set_aggr_mode() - Set the aggregation mode which is a global setting + * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM, + * etc + * + * Returns: 0 on success + */ +int ipa_set_aggr_mode(enum ipa_aggr_mode mode) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_set_aggr_mode, mode); + + return ret; +} +EXPORT_SYMBOL(ipa_set_aggr_mode); + + +/** + * ipa_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation + * mode + * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be + * "QND") + * + * Set the NDP signature used for QCNCM aggregation mode. The fourth byte + * (expected to be 'P') needs to be set using the header addition mechanism + * + * Returns: 0 on success, negative on failure + */ +int ipa_set_qcncm_ndp_sig(char sig[3]) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_set_qcncm_ndp_sig, sig); + + return ret; +} +EXPORT_SYMBOL(ipa_set_qcncm_ndp_sig); + +/** + * ipa_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame + * configuration + * @enable: [in] true for single NDP/MBIM; false otherwise + * + * Returns: 0 on success + */ +int ipa_set_single_ndp_per_mbim(bool enable) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_set_single_ndp_per_mbim, enable); + + return ret; +} +EXPORT_SYMBOL(ipa_set_single_ndp_per_mbim); + +/** + * ipa_tx_dp() - Data-path tx handler + * @dst: [in] which IPA destination to route tx packets to + * @skb: [in] the packet to send + * @metadata: [in] TX packet meta-data + * + * Data-path tx handler, this is used for both SW data-path which by-passes most + * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If + * dst is a "valid" CONS type, then SW data-path is used. If dst is the + * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else + * is an error. For errors, client needs to free the skb as needed. For success, + * IPA driver will later invoke client callback if one was supplied. That + * callback should free the skb. If no callback supplied, IPA driver will free + * the skb internally + * + * The function will use two descriptors for this send command + * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent), + * the first descriptor will be used to inform the IPA hardware that + * apps need to push data into the IPA (IP_PACKET_INIT immediate command). + * Once this send was done from SPS point-of-view the IPA driver will + * get notified by the supplied callback - ipa_sps_irq_tx_comp() + * + * ipa_sps_irq_tx_comp will call to the user supplied + * callback (from ipa_connect) + * + * Returns: 0 on success, negative on failure + */ +int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *meta) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_tx_dp, dst, skb, meta); + + return ret; +} +EXPORT_SYMBOL(ipa_tx_dp); + +/** + * ipa_tx_dp_mul() - Data-path tx handler for multiple packets + * @src: [in] - Client that is sending data + * @ipa_tx_data_desc: [in] data descriptors from wlan + * + * this is used for to transfer data descriptors that received + * from WLAN1_PROD pipe to IPA HW + * + * The function will send data descriptors from WLAN1_PROD (one + * at a time) using sps_transfer_one. Will set EOT flag for last + * descriptor Once this send was done from SPS point-of-view the + * IPA driver will get notified by the supplied callback - + * ipa_sps_irq_tx_no_aggr_notify() + * + * ipa_sps_irq_tx_no_aggr_notify will call to the user supplied + * callback (from ipa_connect) + * + * Returns: 0 on success, negative on failure + */ +int ipa_tx_dp_mul(enum ipa_client_type src, + struct ipa_tx_data_desc *data_desc) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_tx_dp_mul, src, data_desc); + + return ret; +} +EXPORT_SYMBOL(ipa_tx_dp_mul); + +void ipa_free_skb(struct ipa_rx_data *data) +{ + IPA_API_DISPATCH(ipa_free_skb, data); +} +EXPORT_SYMBOL(ipa_free_skb); + +/** + * ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform + * IPA EP configuration + * @sys_in: [in] input needed to setup BAM pipe and configure EP + * @clnt_hdl: [out] client handle + * + * - configure the end-point registers with the supplied + * parameters from the user. + * - call SPS APIs to create a system-to-bam connection with IPA. + * - allocate descriptor FIFO + * - register callback function(ipa_sps_irq_rx_notify or + * ipa_sps_irq_tx_notify - depends on client type) in case the driver is + * not configured to pulling mode + * + * Returns: 0 on success, negative on failure + */ +int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_setup_sys_pipe, sys_in, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_setup_sys_pipe); + +/** + * ipa_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP + * @clnt_hdl: [in] the handle obtained from ipa_setup_sys_pipe + * + * Returns: 0 on success, negative on failure + */ +int ipa_teardown_sys_pipe(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_teardown_sys_pipe, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_teardown_sys_pipe); + +int ipa_sys_setup(struct ipa_sys_connect_params *sys_in, + unsigned long *ipa_bam_or_gsi_hdl, + u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status) + +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_sys_setup, sys_in, ipa_bam_or_gsi_hdl, + ipa_pipe_num, clnt_hdl, en_status); + + return ret; +} +EXPORT_SYMBOL(ipa_sys_setup); + +int ipa_sys_teardown(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_sys_teardown, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_sys_teardown); + +int ipa_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl, + unsigned long gsi_ev_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_sys_update_gsi_hdls, clnt_hdl, + gsi_ch_hdl, gsi_ev_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_sys_update_gsi_hdls); + +/** + * ipa_connect_wdi_pipe() - WDI client connect + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_connect_wdi_pipe, in, out); + + return ret; +} +EXPORT_SYMBOL(ipa_connect_wdi_pipe); + +/** + * ipa_disconnect_wdi_pipe() - WDI client disconnect + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_disconnect_wdi_pipe(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disconnect_wdi_pipe, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_disconnect_wdi_pipe); + +/** + * ipa_enable_wdi_pipe() - WDI client enable + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_enable_wdi_pipe(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_enable_wdi_pipe, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_enable_wdi_pipe); + +/** + * ipa_disable_wdi_pipe() - WDI client disable + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_disable_wdi_pipe(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disable_wdi_pipe, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_disable_wdi_pipe); + +/** + * ipa_resume_wdi_pipe() - WDI client resume + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_resume_wdi_pipe(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_resume_wdi_pipe, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_resume_wdi_pipe); + +/** + * ipa_suspend_wdi_pipe() - WDI client suspend + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_suspend_wdi_pipe(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_suspend_wdi_pipe, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_suspend_wdi_pipe); + +/** + * ipa_get_wdi_stats() - Query WDI statistics from uc + * @stats: [inout] stats blob from client populated by driver + * + * Returns: 0 on success, negative on failure + * + * @note Cannot be called from atomic context + * + */ +int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_wdi_stats, stats); + + return ret; +} +EXPORT_SYMBOL(ipa_get_wdi_stats); + +/** + * ipa_get_smem_restr_bytes()- Return IPA smem restricted bytes + * + * Return value: u16 - number of IPA smem restricted bytes + */ +u16 ipa_get_smem_restr_bytes(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_smem_restr_bytes); + + return ret; +} +EXPORT_SYMBOL(ipa_get_smem_restr_bytes); + +/** + * ipa_broadcast_wdi_quota_reach_ind() - quota reach + * @uint32_t fid: [in] input netdev ID + * @uint64_t num_bytes: [in] used bytes + * + * Returns: 0 on success, negative on failure + */ +int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid, + uint64_t num_bytes) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_broadcast_wdi_quota_reach_ind, + fid, num_bytes); + + return ret; +} +EXPORT_SYMBOL(ipa_broadcast_wdi_quota_reach_ind); + +/** + * ipa_uc_wdi_get_dbpa() - To retrieve + * doorbell physical address of wlan pipes + * @param: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_uc_wdi_get_dbpa( + struct ipa_wdi_db_params *param) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_wdi_get_dbpa, param); + + return ret; +} +EXPORT_SYMBOL(ipa_uc_wdi_get_dbpa); + +/** + * ipa_uc_reg_rdyCB() - To register uC + * ready CB if uC not ready + * @inout: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_uc_reg_rdyCB( + struct ipa_wdi_uc_ready_params *inout) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_reg_rdyCB, inout); + + return ret; +} +EXPORT_SYMBOL(ipa_uc_reg_rdyCB); + +/** + * ipa_uc_dereg_rdyCB() - To de-register uC ready CB + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_uc_dereg_rdyCB(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_dereg_rdyCB); + + return ret; +} +EXPORT_SYMBOL(ipa_uc_dereg_rdyCB); + +/** + * teth_bridge_init() - Initialize the Tethering bridge driver + * @params - in/out params for USB initialization API (please look at struct + * definition for more info) + * + * USB driver gets a pointer to a callback function (usb_notify_cb) and an + * associated data. USB driver installs this callback function in the call to + * ipa_connect(). + * + * Builds IPA resource manager dependency graph. + * + * Return codes: 0: success, + * -EINVAL - Bad parameter + * Other negative value - Failure + */ +int teth_bridge_init(struct teth_bridge_init_params *params) +{ + int ret; + + IPA_API_DISPATCH_RETURN(teth_bridge_init, params); + + return ret; +} +EXPORT_SYMBOL(teth_bridge_init); + +/** + * teth_bridge_disconnect() - Disconnect tethering bridge module + */ +int teth_bridge_disconnect(enum ipa_client_type client) +{ + int ret; + + IPA_API_DISPATCH_RETURN(teth_bridge_disconnect, client); + + return ret; +} +EXPORT_SYMBOL(teth_bridge_disconnect); + +/** + * teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call + * @connect_params: Connection info + * + * Return codes: 0: success + * -EINVAL: invalid parameters + * -EPERM: Operation not permitted as the bridge is already + * connected + */ +int teth_bridge_connect(struct teth_bridge_connect_params *connect_params) +{ + int ret; + + IPA_API_DISPATCH_RETURN(teth_bridge_connect, connect_params); + + return ret; +} +EXPORT_SYMBOL(teth_bridge_connect); + +/* ipa_set_client() - provide client mapping + * @client: client type + * + * Return value: none + */ + +void ipa_set_client(int index, enum ipacm_client_enum client, bool uplink) +{ + IPA_API_DISPATCH(ipa_set_client, index, client, uplink); +} +EXPORT_SYMBOL(ipa_set_client); + +/** + * ipa_get_client() - provide client mapping + * @client: client type + * + * Return value: none + */ +enum ipacm_client_enum ipa_get_client(int pipe_idx) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_client, pipe_idx); + + return ret; +} +EXPORT_SYMBOL(ipa_get_client); + +/** + * ipa_get_client_uplink() - provide client mapping + * @client: client type + * + * Return value: none + */ +bool ipa_get_client_uplink(int pipe_idx) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_client_uplink, pipe_idx); + + return ret; +} +EXPORT_SYMBOL(ipa_get_client_uplink); + +/** + * ipa_dma_init() -Initialize IPADMA. + * + * This function initialize all IPADMA internal data and connect in dma: + * MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS + * MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS + * + * Return codes: 0: success + * -EFAULT: IPADMA is already initialized + * -ENOMEM: allocating memory error + * -EPERM: pipe connection failed + */ +int ipa_dma_init(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_dma_init); + + return ret; +} +EXPORT_SYMBOL(ipa_dma_init); + +/** + * ipa_dma_enable() -Vote for IPA clocks. + * + *Return codes: 0: success + * -EINVAL: IPADMA is not initialized + * -EPERM: Operation not permitted as ipa_dma is already + * enabled + */ +int ipa_dma_enable(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_dma_enable); + + return ret; +} +EXPORT_SYMBOL(ipa_dma_enable); + +/** + * ipa_dma_disable()- Unvote for IPA clocks. + * + * enter to power save mode. + * + * Return codes: 0: success + * -EINVAL: IPADMA is not initialized + * -EPERM: Operation not permitted as ipa_dma is already + * diabled + * -EFAULT: can not disable ipa_dma as there are pending + * memcopy works + */ +int ipa_dma_disable(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_dma_disable); + + return ret; +} +EXPORT_SYMBOL(ipa_dma_disable); + +/** + * ipa_dma_sync_memcpy()- Perform synchronous memcpy using IPA. + * + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -SPS_ERROR: on sps faliures + * -EFAULT: other + */ +int ipa_dma_sync_memcpy(u64 dest, u64 src, int len) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_dma_sync_memcpy, dest, src, len); + + return ret; +} +EXPORT_SYMBOL(ipa_dma_sync_memcpy); + +/** + * ipa_dma_async_memcpy()- Perform asynchronous memcpy using IPA. + * + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * @user_cb: callback function to notify the client when the copy was done. + * @user_param: cookie for user_cb. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -SPS_ERROR: on sps faliures + * -EFAULT: descr fifo is full. + */ +int ipa_dma_async_memcpy(u64 dest, u64 src, int len, + void (*user_cb)(void *user1), void *user_param) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_dma_async_memcpy, dest, src, len, user_cb, + user_param); + + return ret; +} +EXPORT_SYMBOL(ipa_dma_async_memcpy); + +/** + * ipa_dma_uc_memcpy() - Perform a memcpy action using IPA uC + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -EBADF: IPA uC is not loaded + */ +int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_dma_uc_memcpy, dest, src, len); + + return ret; +} +EXPORT_SYMBOL(ipa_dma_uc_memcpy); + +/** + * ipa_dma_destroy() -teardown IPADMA pipes and release ipadma. + * + * this is a blocking function, returns just after destroying IPADMA. + */ +void ipa_dma_destroy(void) +{ + IPA_API_DISPATCH(ipa_dma_destroy); +} +EXPORT_SYMBOL(ipa_dma_destroy); + +int ipa_mhi_init_engine(struct ipa_mhi_init_engine *params) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mhi_init_engine, params); + + return ret; +} +EXPORT_SYMBOL(ipa_mhi_init_engine); + +/** + * ipa_connect_mhi_pipe() - Connect pipe to IPA and start corresponding + * MHI channel + * @in: connect parameters + * @clnt_hdl: [out] client handle for this pipe + * + * This function is called by IPA MHI client driver on MHI channel start. + * This function is called after MHI engine was started. + * This function is doing the following: + * - Send command to uC to start corresponding MHI channel + * - Configure IPA EP control + * + * Return codes: 0 : success + * negative : error + */ +int ipa_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in, + u32 *clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_connect_mhi_pipe, in, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_connect_mhi_pipe); + +/** + * ipa_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding + * MHI channel + * @in: connect parameters + * @clnt_hdl: [out] client handle for this pipe + * + * This function is called by IPA MHI client driver on MHI channel reset. + * This function is called after MHI channel was started. + * This function is doing the following: + * - Send command to uC to reset corresponding MHI channel + * - Configure IPA EP control + * + * Return codes: 0 : success + * negative : error + */ +int ipa_disconnect_mhi_pipe(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disconnect_mhi_pipe, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_disconnect_mhi_pipe); + +bool ipa_mhi_stop_gsi_channel(enum ipa_client_type client) +{ + bool ret; + + IPA_API_DISPATCH_RETURN_BOOL(ipa_mhi_stop_gsi_channel, client); + + return ret; +} + +int ipa_uc_mhi_reset_channel(int channelHandle) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_mhi_reset_channel, channelHandle); + + return ret; +} + +bool ipa_mhi_sps_channel_empty(enum ipa_client_type client) +{ + bool ret; + + IPA_API_DISPATCH_RETURN_BOOL(ipa_mhi_sps_channel_empty, client); + + return ret; +} + +int ipa_qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_qmi_enable_force_clear_datapath_send, req); + + return ret; +} + +int ipa_qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_qmi_disable_force_clear_datapath_send, req); + + return ret; +} + +int ipa_generate_tag_process(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_generate_tag_process); + + return ret; +} + +int ipa_disable_sps_pipe(enum ipa_client_type client) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disable_sps_pipe, client); + + return ret; +} + +int ipa_mhi_reset_channel_internal(enum ipa_client_type client) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mhi_reset_channel_internal, client); + + return ret; +} + +int ipa_mhi_start_channel_internal(enum ipa_client_type client) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mhi_start_channel_internal, client); + + return ret; +} + +void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb) +{ + IPA_API_DISPATCH(ipa_get_holb, ep_idx, holb); +} + +void ipa_set_tag_process_before_gating(bool val) +{ + IPA_API_DISPATCH(ipa_set_tag_process_before_gating, val); +} + +int ipa_mhi_query_ch_info(enum ipa_client_type client, + struct gsi_chan_info *ch_info) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mhi_query_ch_info, client, ch_info); + + return ret; +} + +int ipa_uc_mhi_suspend_channel(int channelHandle) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_mhi_suspend_channel, channelHandle); + + return ret; +} + +int ipa_uc_mhi_stop_event_update_channel(int channelHandle) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_mhi_stop_event_update_channel, + channelHandle); + + return ret; +} + +bool ipa_has_open_aggr_frame(enum ipa_client_type client) +{ + bool ret; + + IPA_API_DISPATCH_RETURN_BOOL(ipa_has_open_aggr_frame, client); + + return ret; +} + +int ipa_mhi_resume_channels_internal(enum ipa_client_type client, + bool LPTransitionRejected, bool brstmode_enabled, + union __packed gsi_channel_scratch ch_scratch, u8 index) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mhi_resume_channels_internal, client, + LPTransitionRejected, brstmode_enabled, ch_scratch, + index); + + return ret; +} + +int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_mhi_send_dl_ul_sync_info, + cmd); + + return ret; +} + +int ipa_mhi_destroy_channel(enum ipa_client_type client) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_mhi_destroy_channel, client); + + return ret; +} + +int ipa_uc_mhi_init(void (*ready_cb)(void), + void (*wakeup_request_cb)(void)) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_mhi_init, ready_cb, wakeup_request_cb); + + return ret; +} + +void ipa_uc_mhi_cleanup(void) +{ + IPA_API_DISPATCH(ipa_uc_mhi_cleanup); +} + +int ipa_uc_mhi_print_stats(char *dbg_buff, int size) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_mhi_print_stats, dbg_buff, size); + + return ret; +} + +/** + * ipa_uc_state_check() - Check the status of the uC interface + * + * Return value: 0 if the uC is loaded, interface is initialized + * and there was no recent failure in one of the commands. + * A negative value is returned otherwise. + */ +int ipa_uc_state_check(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_uc_state_check); + + return ret; +} + +int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_write_qmap_id, param_in); + + return ret; +} +EXPORT_SYMBOL(ipa_write_qmap_id); + +/** + * ipa_add_interrupt_handler() - Adds handler to an interrupt type + * @interrupt: Interrupt type + * @handler: The handler to be added + * @deferred_flag: whether the handler processing should be deferred in + * a workqueue + * @private_data: the client's private data + * + * Adds handler to an interrupt type and enable the specific bit + * in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled + */ +int ipa_add_interrupt_handler(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_interrupt_handler, interrupt, handler, + deferred_flag, private_data); + + return ret; +} +EXPORT_SYMBOL(ipa_add_interrupt_handler); + +/** + * ipa_remove_interrupt_handler() - Removes handler to an interrupt type + * @interrupt: Interrupt type + * + * Removes the handler and disable the specific bit in IRQ_EN register + */ +int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_remove_interrupt_handler, interrupt); + + return ret; +} +EXPORT_SYMBOL(ipa_remove_interrupt_handler); + +/** + * ipa_restore_suspend_handler() - restores the original suspend IRQ handler + * as it was registered in the IPA init sequence. + * Return codes: + * 0: success + * -EPERM: failed to remove current handler or failed to add original handler + */ +int ipa_restore_suspend_handler(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_restore_suspend_handler); + + return ret; +} +EXPORT_SYMBOL(ipa_restore_suspend_handler); + +/** + * ipa_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM + * + * Function is rate limited to avoid flooding kernel log buffer + */ +void ipa_bam_reg_dump(void) +{ + IPA_API_DISPATCH(ipa_bam_reg_dump); +} +EXPORT_SYMBOL(ipa_bam_reg_dump); + +/** + * ipa_get_ep_mapping() - provide endpoint mapping + * @client: client type + * + * Return value: endpoint mapping + */ +int ipa_get_ep_mapping(enum ipa_client_type client) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_ep_mapping, client); + + return ret; +} +EXPORT_SYMBOL(ipa_get_ep_mapping); + +/** + * ipa_is_ready() - check if IPA module was initialized + * successfully + * + * Return value: true for yes; false for no + */ +bool ipa_is_ready(void) +{ + if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_is_ready) + return false; + return ipa_api_ctrl->ipa_is_ready(); +} +EXPORT_SYMBOL(ipa_is_ready); + +/** + * ipa_proxy_clk_vote() - called to add IPA clock proxy vote + * + * Return value: none + */ +void ipa_proxy_clk_vote(void) +{ + IPA_API_DISPATCH(ipa_proxy_clk_vote); +} +EXPORT_SYMBOL(ipa_proxy_clk_vote); + +/** + * ipa_proxy_clk_unvote() - called to remove IPA clock proxy vote + * + * Return value: none + */ +void ipa_proxy_clk_unvote(void) +{ + IPA_API_DISPATCH(ipa_proxy_clk_unvote); +} +EXPORT_SYMBOL(ipa_proxy_clk_unvote); + +/** + * ipa_get_hw_type() - Return IPA HW version + * + * Return value: enum ipa_hw_type + */ +enum ipa_hw_type ipa_get_hw_type(void) +{ + return ipa_api_hw_type; +} +EXPORT_SYMBOL(ipa_get_hw_type); + +/** + * ipa_is_client_handle_valid() - check if IPA client handle is valid handle + * + * Return value: true for yes; false for no + */ +bool ipa_is_client_handle_valid(u32 clnt_hdl) +{ + if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_is_client_handle_valid) + return false; + return ipa_api_ctrl->ipa_is_client_handle_valid(clnt_hdl); +} +EXPORT_SYMBOL(ipa_is_client_handle_valid); + +/** + * ipa_get_client_mapping() - provide client mapping + * @pipe_idx: IPA end-point number + * + * Return value: client mapping + */ +enum ipa_client_type ipa_get_client_mapping(int pipe_idx) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_client_mapping, pipe_idx); + + return ret; +} +EXPORT_SYMBOL(ipa_get_client_mapping); + +/** + * ipa_get_rm_resource_from_ep() - get the IPA_RM resource which is related to + * the supplied pipe index. + * + * @pipe_idx: + * + * Return value: IPA_RM resource related to the pipe, -1 if a resource was not + * found. + */ +enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(int pipe_idx) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_rm_resource_from_ep, pipe_idx); + + return ret; +} +EXPORT_SYMBOL(ipa_get_rm_resource_from_ep); + +/** + * ipa_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt + * + * Return value: true if modem configures embedded pipe flt, false otherwise + */ +bool ipa_get_modem_cfg_emb_pipe_flt(void) +{ + if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_get_modem_cfg_emb_pipe_flt) + return false; + return ipa_api_ctrl->ipa_get_modem_cfg_emb_pipe_flt(); +} +EXPORT_SYMBOL(ipa_get_modem_cfg_emb_pipe_flt); + +/** + * ipa_get_transport_type()- Return ipa_ctx->transport_prototype + * + * Return value: enum ipa_transport_type + */ +enum ipa_transport_type ipa_get_transport_type(void) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_get_transport_type); + + return ret; +} +EXPORT_SYMBOL(ipa_get_transport_type); + +/** + * ipa_get_smmu_domain()- Return the smmu domain + * + * Return value: pointer to iommu domain if smmu_cb valid, NULL otherwise + */ +struct iommu_domain *ipa_get_smmu_domain(void) +{ + struct iommu_domain *ret; + + IPA_API_DISPATCH_RETURN_PTR(ipa_get_smmu_domain); + + return ret; +} +EXPORT_SYMBOL(ipa_get_smmu_domain); + +/** + * ipa_disable_apps_wan_cons_deaggr()- set + * ipa_ctx->ipa_client_apps_wan_cons_agg_gro + * + * Return value: 0 or negative in case of failure + */ +int ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disable_apps_wan_cons_deaggr, agg_size, + agg_count); + + return ret; +} +EXPORT_SYMBOL(ipa_disable_apps_wan_cons_deaggr); + +/** + * ipa_get_dma_dev()- Returns ipa_ctx dma dev pointer + * + * Return value: pointer to ipa_ctx dma dev pointer + */ +struct device *ipa_get_dma_dev(void) +{ + struct device *ret; + + IPA_API_DISPATCH_RETURN_PTR(ipa_get_dma_dev); + + return ret; +} +EXPORT_SYMBOL(ipa_get_dma_dev); + +/** + * ipa_release_wdi_mapping() - release iommu mapping + * + * + * @num_buffers: number of buffers to be released + * + * @info: pointer to wdi buffers info array + * + * Return codes: 0 : success + * negative : error + */ +int ipa_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_release_wdi_mapping, num_buffers, info); + + return ret; +} +EXPORT_SYMBOL(ipa_release_wdi_mapping); + +/** + * ipa_create_wdi_mapping() - Perform iommu mapping + * + * + * @num_buffers: number of buffers to be mapped + * + * @info: pointer to wdi buffers info array + * + * Return codes: 0 : success + * negative : error + */ +int ipa_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_create_wdi_mapping, num_buffers, info); + + return ret; +} +EXPORT_SYMBOL(ipa_create_wdi_mapping); + +/** + * ipa_get_gsi_ep_info() - provide gsi ep information + * @client: IPA client type + * + * Return value: pointer to ipa_gsi_ep_info + */ +const struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(enum ipa_client_type client) +{ + if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_get_gsi_ep_info) + return NULL; + return ipa_api_ctrl->ipa_get_gsi_ep_info(client); +} +EXPORT_SYMBOL(ipa_get_gsi_ep_info); + +/** + * ipa_stop_gsi_channel()- Stops a GSI channel in IPA + * + * Return value: 0 on success, negative otherwise + */ +int ipa_stop_gsi_channel(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_stop_gsi_channel, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_stop_gsi_channel); + +/** + * ipa_start_gsi_channel()- Startsa GSI channel in IPA + * + * Return value: 0 on success, negative otherwise + */ +int ipa_start_gsi_channel(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_start_gsi_channel, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_start_gsi_channel); + +/** + * ipa_get_version_string() - Get string representation of IPA version + * @ver: IPA version + * + * Return: Constant string representation + */ +const char *ipa_get_version_string(enum ipa_hw_type ver) +{ + const char *str; + + switch (ver) { + case IPA_HW_v1_0: + str = "1.0"; + break; + case IPA_HW_v1_1: + str = "1.1"; + break; + case IPA_HW_v2_0: + str = "2.0"; + break; + case IPA_HW_v2_1: + str = "2.1"; + break; + case IPA_HW_v2_5: + str = "2.5/2.6"; + break; + case IPA_HW_v2_6L: + str = "2.6L"; + break; + case IPA_HW_v3_0: + str = "3.0"; + break; + case IPA_HW_v3_1: + str = "3.1"; + break; + case IPA_HW_v3_5: + str = "3.5"; + break; + case IPA_HW_v3_5_1: + str = "3.5.1"; + break; + case IPA_HW_v4_0: + str = "4.0"; + break; + default: + str = "Invalid version"; + break; + } + + return str; +} +EXPORT_SYMBOL(ipa_get_version_string); + +static const struct of_device_id ipa_plat_drv_match[] = { + { .compatible = "qcom,ipa", }, + { .compatible = "qcom,ipa-smmu-ap-cb", }, + { .compatible = "qcom,ipa-smmu-wlan-cb", }, + { .compatible = "qcom,ipa-smmu-uc-cb", }, + { .compatible = "qcom,smp2pgpio-map-ipa-1-in", }, + { .compatible = "qcom,smp2pgpio-map-ipa-1-out", }, + {} +}; + +static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p) +{ + int result; + + /* + * IPA probe function can be called for multiple times as the same probe + * function handles multiple compatibilities + */ + pr_debug("ipa: IPA driver probing started for %s\n", + pdev_p->dev.of_node->name); + + if (!ipa_api_ctrl) { + ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL); + if (!ipa_api_ctrl) + return -ENOMEM; + + /* Get IPA HW Version */ + result = of_property_read_u32(pdev_p->dev.of_node, + "qcom,ipa-hw-ver", &ipa_api_hw_type); + if ((result) || (ipa_api_hw_type == 0)) { + pr_err("ipa: get resource failed for ipa-hw-ver!\n"); + kfree(ipa_api_ctrl); + ipa_api_ctrl = 0; + return -ENODEV; + } + pr_debug("ipa: ipa_api_hw_type = %d", ipa_api_hw_type); + } + + /* call probe based on IPA HW version */ + switch (ipa_api_hw_type) { + case IPA_HW_v2_0: + case IPA_HW_v2_1: + case IPA_HW_v2_5: + case IPA_HW_v2_6L: + result = ipa_plat_drv_probe(pdev_p, ipa_api_ctrl, + ipa_plat_drv_match); + break; + case IPA_HW_v3_0: + case IPA_HW_v3_1: + case IPA_HW_v3_5: + case IPA_HW_v3_5_1: + case IPA_HW_v4_0: + result = ipa3_plat_drv_probe(pdev_p, ipa_api_ctrl, + ipa_plat_drv_match); + break; + default: + pr_err("ipa: unsupported version %d\n", ipa_api_hw_type); + return -EPERM; + } + + if (result && result != -EPROBE_DEFER) + pr_err("ipa: ipa_plat_drv_probe failed\n"); + + return result; +} + +static int ipa_ap_suspend(struct device *dev) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_ap_suspend, dev); + + return ret; +} + +static int ipa_ap_resume(struct device *dev) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_ap_resume, dev); + + return ret; +} + +int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data), + void *user_data) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_register_ipa_ready_cb, + ipa_ready_cb, user_data); + + return ret; +} +EXPORT_SYMBOL(ipa_register_ipa_ready_cb); + +/** + * ipa_inc_client_enable_clks() - Increase active clients counter, and + * enable ipa clocks if necessary + * + * Please do not use this API, use the wrapper macros instead (ipa_i.h) + * IPA_ACTIVE_CLIENTS_INC_XXX(); + * + * Return codes: + * None + */ +void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id) +{ + IPA_API_DISPATCH(ipa_inc_client_enable_clks, id); +} +EXPORT_SYMBOL(ipa_inc_client_enable_clks); + +/** + * ipa_dec_client_disable_clks() - Increase active clients counter, and + * enable ipa clocks if necessary + * + * Please do not use this API, use the wrapper macros instead (ipa_i.h) + * IPA_ACTIVE_CLIENTS_DEC_XXX(); + * + * Return codes: + * None + */ +void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id) +{ + IPA_API_DISPATCH(ipa_dec_client_disable_clks, id); +} +EXPORT_SYMBOL(ipa_dec_client_disable_clks); + +/** + * ipa_inc_client_enable_clks_no_block() - Only increment the number of active + * clients if no asynchronous actions should be done.Asynchronous actions are + * locking a mutex and waking up IPA HW. + * + * Please do not use this API, use the wrapper macros instead(ipa_i.h) + * + * + * Return codes : 0 for success + * -EPERM if an asynchronous action should have been done + */ +int ipa_inc_client_enable_clks_no_block( + struct ipa_active_client_logging_info *id) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_inc_client_enable_clks_no_block, id); + + return ret; +} +EXPORT_SYMBOL(ipa_inc_client_enable_clks_no_block); + +/** + * ipa_suspend_resource_no_block() - suspend client endpoints related to the + * IPA_RM resource and decrement active clients counter. This function is + * guaranteed to avoid sleeping. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_suspend_resource_no_block, resource); + + return ret; +} +EXPORT_SYMBOL(ipa_suspend_resource_no_block); +/** + * ipa_resume_resource() - resume client endpoints related to the IPA_RM + * resource. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa_resume_resource(enum ipa_rm_resource_name resource) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_resume_resource, resource); + + return ret; +} +EXPORT_SYMBOL(ipa_resume_resource); + +/** + * ipa_suspend_resource_sync() - suspend client endpoints related to the IPA_RM + * resource and decrement active clients counter, which may result in clock + * gating of IPA clocks. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_suspend_resource_sync, resource); + + return ret; +} +EXPORT_SYMBOL(ipa_suspend_resource_sync); + +/** + * ipa_set_required_perf_profile() - set IPA to the specified performance + * profile based on the bandwidth, unless minimum voltage required is + * higher. In this case the floor_voltage specified will be used. + * @floor_voltage: minimum voltage to operate + * @bandwidth_mbps: needed bandwidth from IPA + * + * Return codes: 0 on success, negative on failure. + */ +int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage, + u32 bandwidth_mbps) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_set_required_perf_profile, floor_voltage, + bandwidth_mbps); + + return ret; +} +EXPORT_SYMBOL(ipa_set_required_perf_profile); + +/** + * ipa_get_ipc_logbuf() - return a pointer to IPA driver IPC log + */ +void *ipa_get_ipc_logbuf(void) +{ + void *ret; + + IPA_API_DISPATCH_RETURN_PTR(ipa_get_ipc_logbuf); + + return ret; +} +EXPORT_SYMBOL(ipa_get_ipc_logbuf); + +/** + * ipa_get_ipc_logbuf_low() - return a pointer to IPA driver IPC low prio log + */ +void *ipa_get_ipc_logbuf_low(void) +{ + void *ret; + + IPA_API_DISPATCH_RETURN_PTR(ipa_get_ipc_logbuf_low); + + return ret; +} +EXPORT_SYMBOL(ipa_get_ipc_logbuf_low); + +/** + * ipa_assert() - general function for assertion + */ +void ipa_assert(void) +{ + pr_err("IPA: unrecoverable error has occurred, asserting\n"); + BUG(); +} + +/** + * ipa_rx_poll() - Poll the rx packets from IPA HW in the + * softirq context + * + * @budget: number of packets to be polled in single iteration + * + * Return codes: >= 0 : Actual number of packets polled + * + */ +int ipa_rx_poll(u32 clnt_hdl, int budget) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_rx_poll, clnt_hdl, budget); + + return ret; +} +EXPORT_SYMBOL(ipa_rx_poll); + +/** + * ipa_recycle_wan_skb() - Recycle the Wan skb + * + * @skb: skb that needs to recycle + * + */ +void ipa_recycle_wan_skb(struct sk_buff *skb) +{ + IPA_API_DISPATCH(ipa_recycle_wan_skb, skb); +} +EXPORT_SYMBOL(ipa_recycle_wan_skb); + +/** + * ipa_setup_uc_ntn_pipes() - setup uc offload pipes + */ +int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_setup_uc_ntn_pipes, inp, + notify, priv, hdr_len, outp); + + return ret; +} + +/** + * ipa_tear_down_uc_offload_pipes() - tear down uc offload pipes + */ +int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, + int ipa_ep_idx_dl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_tear_down_uc_offload_pipes, ipa_ep_idx_ul, + ipa_ep_idx_dl); + + return ret; +} + +/** + * ipa_get_pdev() - return a pointer to IPA dev struct + * + * Return value: a pointer to IPA dev struct + * + */ +struct device *ipa_get_pdev(void) +{ + struct device *ret; + + IPA_API_DISPATCH_RETURN_PTR(ipa_get_pdev); + + return ret; +} +EXPORT_SYMBOL(ipa_get_pdev); + +int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data), + void *user_data) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_ntn_uc_reg_rdyCB, + ipauc_ready_cb, user_data); + + return ret; +} +EXPORT_SYMBOL(ipa_ntn_uc_reg_rdyCB); + +void ipa_ntn_uc_dereg_rdyCB(void) +{ + IPA_API_DISPATCH(ipa_ntn_uc_dereg_rdyCB); +} +EXPORT_SYMBOL(ipa_ntn_uc_dereg_rdyCB); + + +static const struct dev_pm_ops ipa_pm_ops = { + .suspend_noirq = ipa_ap_suspend, + .resume_noirq = ipa_ap_resume, +}; + +static struct platform_driver ipa_plat_drv = { + .probe = ipa_generic_plat_drv_probe, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .pm = &ipa_pm_ops, + .of_match_table = ipa_plat_drv_match, + }, +}; + +static int __init ipa_module_init(void) +{ + pr_debug("IPA module init\n"); + + /* Register as a platform device driver */ + return platform_driver_register(&ipa_plat_drv); +} +subsys_initcall(ipa_module_init); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA HW device driver"); diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h new file mode 100644 index 000000000000..139d65f4f3d3 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_api.h @@ -0,0 +1,411 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include "ipa_common_i.h" + +#ifndef _IPA_API_H_ +#define _IPA_API_H_ + +struct ipa_api_controller { + int (*ipa_reset_endpoint)(u32 clnt_hdl); + + int (*ipa_clear_endpoint_delay)(u32 clnt_hdl); + + int (*ipa_disable_endpoint)(u32 clnt_hdl); + + int (*ipa_cfg_ep)(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg); + + int (*ipa_cfg_ep_nat)(u32 clnt_hdl, + const struct ipa_ep_cfg_nat *ipa_ep_cfg); + + int (*ipa_cfg_ep_conn_track)(u32 clnt_hdl, + const struct ipa_ep_cfg_conn_track *ipa_ep_cfg); + + int (*ipa_cfg_ep_hdr)(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr *ipa_ep_cfg); + + int (*ipa_cfg_ep_hdr_ext)(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg); + + int (*ipa_cfg_ep_mode)(u32 clnt_hdl, + const struct ipa_ep_cfg_mode *ipa_ep_cfg); + + int (*ipa_cfg_ep_aggr)(u32 clnt_hdl, + const struct ipa_ep_cfg_aggr *ipa_ep_cfg); + + int (*ipa_cfg_ep_deaggr)(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ipa_ep_cfg); + + int (*ipa_cfg_ep_route)(u32 clnt_hdl, + const struct ipa_ep_cfg_route *ipa_ep_cfg); + + int (*ipa_cfg_ep_holb)(u32 clnt_hdl, + const struct ipa_ep_cfg_holb *ipa_ep_cfg); + + int (*ipa_cfg_ep_cfg)(u32 clnt_hdl, + const struct ipa_ep_cfg_cfg *ipa_ep_cfg); + + int (*ipa_cfg_ep_metadata_mask)(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg); + + int (*ipa_cfg_ep_holb_by_client)(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ipa_ep_cfg); + + int (*ipa_cfg_ep_ctrl)(u32 clnt_hdl, + const struct ipa_ep_cfg_ctrl *ep_ctrl); + + int (*ipa_add_hdr)(struct ipa_ioc_add_hdr *hdrs); + + int (*ipa_del_hdr)(struct ipa_ioc_del_hdr *hdls); + + int (*ipa_commit_hdr)(void); + + int (*ipa_reset_hdr)(void); + + int (*ipa_get_hdr)(struct ipa_ioc_get_hdr *lookup); + + int (*ipa_put_hdr)(u32 hdr_hdl); + + int (*ipa_copy_hdr)(struct ipa_ioc_copy_hdr *copy); + + int (*ipa_add_hdr_proc_ctx)(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs); + + int (*ipa_del_hdr_proc_ctx)(struct ipa_ioc_del_hdr_proc_ctx *hdls); + + int (*ipa_add_rt_rule)(struct ipa_ioc_add_rt_rule *rules); + + int (*ipa_del_rt_rule)(struct ipa_ioc_del_rt_rule *hdls); + + int (*ipa_commit_rt)(enum ipa_ip_type ip); + + int (*ipa_reset_rt)(enum ipa_ip_type ip); + + int (*ipa_get_rt_tbl)(struct ipa_ioc_get_rt_tbl *lookup); + + int (*ipa_put_rt_tbl)(u32 rt_tbl_hdl); + + int (*ipa_query_rt_index)(struct ipa_ioc_get_rt_tbl_indx *in); + + int (*ipa_mdfy_rt_rule)(struct ipa_ioc_mdfy_rt_rule *rules); + + int (*ipa_add_flt_rule)(struct ipa_ioc_add_flt_rule *rules); + + int (*ipa_del_flt_rule)(struct ipa_ioc_del_flt_rule *hdls); + + int (*ipa_mdfy_flt_rule)(struct ipa_ioc_mdfy_flt_rule *rules); + + int (*ipa_commit_flt)(enum ipa_ip_type ip); + + int (*ipa_reset_flt)(enum ipa_ip_type ip); + + int (*allocate_nat_device)(struct ipa_ioc_nat_alloc_mem *mem); + + int (*ipa_nat_init_cmd)(struct ipa_ioc_v4_nat_init *init); + + int (*ipa_nat_dma_cmd)(struct ipa_ioc_nat_dma_cmd *dma); + + int (*ipa_nat_del_cmd)(struct ipa_ioc_v4_nat_del *del); + + int (*ipa_send_msg)(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback); + + int (*ipa_register_pull_msg)(struct ipa_msg_meta *meta, + ipa_msg_pull_fn callback); + + int (*ipa_deregister_pull_msg)(struct ipa_msg_meta *meta); + + int (*ipa_register_intf)(const char *name, + const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx); + + int (*ipa_register_intf_ext)(const char *name, + const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext); + + int (*ipa_deregister_intf)(const char *name); + + int (*ipa_set_aggr_mode)(enum ipa_aggr_mode mode); + + int (*ipa_set_qcncm_ndp_sig)(char sig[3]); + + int (*ipa_set_single_ndp_per_mbim)(bool enable); + + int (*ipa_tx_dp)(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *metadata); + + int (*ipa_tx_dp_mul)(enum ipa_client_type dst, + struct ipa_tx_data_desc *data_desc); + + void (*ipa_free_skb)(struct ipa_rx_data *); + + int (*ipa_setup_sys_pipe)(struct ipa_sys_connect_params *sys_in, + u32 *clnt_hdl); + + int (*ipa_teardown_sys_pipe)(u32 clnt_hdl); + + int (*ipa_sys_setup)(struct ipa_sys_connect_params *sys_in, + unsigned long *ipa_bam_hdl, + u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status); + + int (*ipa_sys_teardown)(u32 clnt_hdl); + + int (*ipa_sys_update_gsi_hdls)(u32 clnt_hdl, unsigned long gsi_ch_hdl, + unsigned long gsi_ev_hdl); + + int (*ipa_connect_wdi_pipe)(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out); + + int (*ipa_disconnect_wdi_pipe)(u32 clnt_hdl); + + int (*ipa_enable_wdi_pipe)(u32 clnt_hdl); + + int (*ipa_disable_wdi_pipe)(u32 clnt_hdl); + + int (*ipa_resume_wdi_pipe)(u32 clnt_hdl); + + int (*ipa_suspend_wdi_pipe)(u32 clnt_hdl); + + int (*ipa_get_wdi_stats)(struct IpaHwStatsWDIInfoData_t *stats); + + u16 (*ipa_get_smem_restr_bytes)(void); + + int (*ipa_broadcast_wdi_quota_reach_ind)(uint32_t fid, + uint64_t num_bytes); + + int (*ipa_uc_wdi_get_dbpa)(struct ipa_wdi_db_params *out); + + int (*ipa_uc_reg_rdyCB)(struct ipa_wdi_uc_ready_params *param); + + int (*ipa_uc_dereg_rdyCB)(void); + + int (*teth_bridge_init)(struct teth_bridge_init_params *params); + + int (*teth_bridge_disconnect)(enum ipa_client_type client); + + int (*teth_bridge_connect)( + struct teth_bridge_connect_params *connect_params); + + void (*ipa_set_client)( + int index, enum ipacm_client_enum client, bool uplink); + + enum ipacm_client_enum (*ipa_get_client)(int pipe_idx); + + bool (*ipa_get_client_uplink)(int pipe_idx); + + int (*ipa_dma_init)(void); + + int (*ipa_dma_enable)(void); + + int (*ipa_dma_disable)(void); + + int (*ipa_dma_sync_memcpy)(u64 dest, u64 src, int len); + + int (*ipa_dma_async_memcpy)(u64 dest, u64 src, int len, + void (*user_cb)(void *user1), void *user_param); + + int (*ipa_dma_uc_memcpy)(phys_addr_t dest, phys_addr_t src, int len); + + void (*ipa_dma_destroy)(void); + + bool (*ipa_has_open_aggr_frame)(enum ipa_client_type client); + + int (*ipa_generate_tag_process)(void); + + int (*ipa_disable_sps_pipe)(enum ipa_client_type client); + + void (*ipa_set_tag_process_before_gating)(bool val); + + int (*ipa_mhi_init_engine)(struct ipa_mhi_init_engine *params); + + int (*ipa_connect_mhi_pipe)(struct ipa_mhi_connect_params_internal *in, + u32 *clnt_hdl); + + int (*ipa_disconnect_mhi_pipe)(u32 clnt_hdl); + + bool (*ipa_mhi_stop_gsi_channel)(enum ipa_client_type client); + + int (*ipa_qmi_disable_force_clear)(u32 request_id); + + int (*ipa_qmi_enable_force_clear_datapath_send)( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req); + + int (*ipa_qmi_disable_force_clear_datapath_send)( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req); + + bool (*ipa_mhi_sps_channel_empty)(enum ipa_client_type client); + + int (*ipa_mhi_reset_channel_internal)(enum ipa_client_type client); + + int (*ipa_mhi_start_channel_internal)(enum ipa_client_type client); + + void (*ipa_get_holb)(int ep_idx, struct ipa_ep_cfg_holb *holb); + + int (*ipa_mhi_query_ch_info)(enum ipa_client_type client, + struct gsi_chan_info *ch_info); + + int (*ipa_mhi_resume_channels_internal)( + enum ipa_client_type client, + bool LPTransitionRejected, + bool brstmode_enabled, + union __packed gsi_channel_scratch ch_scratch, + u8 index); + + int (*ipa_mhi_destroy_channel)(enum ipa_client_type client); + + int (*ipa_uc_mhi_send_dl_ul_sync_info) + (union IpaHwMhiDlUlSyncCmdData_t *cmd); + + int (*ipa_uc_mhi_init) + (void (*ready_cb)(void), void (*wakeup_request_cb)(void)); + + void (*ipa_uc_mhi_cleanup)(void); + + int (*ipa_uc_mhi_print_stats)(char *dbg_buff, int size); + + int (*ipa_uc_mhi_reset_channel)(int channelHandle); + + int (*ipa_uc_mhi_suspend_channel)(int channelHandle); + + int (*ipa_uc_mhi_stop_event_update_channel)(int channelHandle); + + int (*ipa_uc_state_check)(void); + + int (*ipa_write_qmap_id)(struct ipa_ioc_write_qmapid *param_in); + + int (*ipa_add_interrupt_handler)(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data); + + int (*ipa_remove_interrupt_handler)(enum ipa_irq_type interrupt); + + int (*ipa_restore_suspend_handler)(void); + + void (*ipa_bam_reg_dump)(void); + + int (*ipa_get_ep_mapping)(enum ipa_client_type client); + + bool (*ipa_is_ready)(void); + + void (*ipa_proxy_clk_vote)(void); + + void (*ipa_proxy_clk_unvote)(void); + + bool (*ipa_is_client_handle_valid)(u32 clnt_hdl); + + enum ipa_client_type (*ipa_get_client_mapping)(int pipe_idx); + + enum ipa_rm_resource_name (*ipa_get_rm_resource_from_ep)(int pipe_idx); + + bool (*ipa_get_modem_cfg_emb_pipe_flt)(void); + + enum ipa_transport_type (*ipa_get_transport_type)(void); + + int (*ipa_ap_suspend)(struct device *dev); + + int (*ipa_ap_resume)(struct device *dev); + + int (*ipa_stop_gsi_channel)(u32 clnt_hdl); + + int (*ipa_start_gsi_channel)(u32 clnt_hdl); + + struct iommu_domain *(*ipa_get_smmu_domain)(void); + + int (*ipa_disable_apps_wan_cons_deaggr)(uint32_t agg_size, + uint32_t agg_count); + + struct device *(*ipa_get_dma_dev)(void); + + int (*ipa_release_wdi_mapping)(u32 num_buffers, + struct ipa_wdi_buffer_info *info); + + int (*ipa_create_wdi_mapping)(u32 num_buffers, + struct ipa_wdi_buffer_info *info); + + const struct ipa_gsi_ep_config *(*ipa_get_gsi_ep_info) + (enum ipa_client_type client); + + int (*ipa_register_ipa_ready_cb)(void (*ipa_ready_cb)(void *user_data), + void *user_data); + + void (*ipa_inc_client_enable_clks)( + struct ipa_active_client_logging_info *id); + + void (*ipa_dec_client_disable_clks)( + struct ipa_active_client_logging_info *id); + + int (*ipa_inc_client_enable_clks_no_block)( + struct ipa_active_client_logging_info *id); + + int (*ipa_suspend_resource_no_block)( + enum ipa_rm_resource_name resource); + + int (*ipa_resume_resource)(enum ipa_rm_resource_name name); + + int (*ipa_suspend_resource_sync)(enum ipa_rm_resource_name resource); + + int (*ipa_set_required_perf_profile)( + enum ipa_voltage_level floor_voltage, u32 bandwidth_mbps); + + void *(*ipa_get_ipc_logbuf)(void); + + void *(*ipa_get_ipc_logbuf_low)(void); + + int (*ipa_rx_poll)(u32 clnt_hdl, int budget); + + void (*ipa_recycle_wan_skb)(struct sk_buff *skb); + + int (*ipa_setup_uc_ntn_pipes)(struct ipa_ntn_conn_in_params *in, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *); + + int (*ipa_tear_down_uc_offload_pipes)(int ipa_ep_idx_ul, + int ipa_ep_idx_dl); + + struct device *(*ipa_get_pdev)(void); + + int (*ipa_ntn_uc_reg_rdyCB)(void (*ipauc_ready_cb)(void *user_data), + void *user_data); + + void (*ipa_ntn_uc_dereg_rdyCB)(void); +}; + +#ifdef CONFIG_IPA +int ipa_plat_drv_probe(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match); +#else +static inline int ipa_plat_drv_probe(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + return -ENODEV; +} +#endif /* (CONFIG_IPA) */ + +#ifdef CONFIG_IPA3 +int ipa3_plat_drv_probe(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match); +#else +static inline int ipa3_plat_drv_probe(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + return -ENODEV; +} +#endif /* (CONFIG_IPA3) */ + +#endif /* _IPA_API_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile new file mode 100644 index 000000000000..61625f562178 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o +obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o +obj-$(CONFIG_ECM_IPA) += ecm_ipa.o +obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o diff --git a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c new file mode 100644 index 000000000000..92843fa44f53 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c @@ -0,0 +1,1465 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "ecm_ipa" +#define ECM_IPA_IPV4_HDR_NAME "ecm_eth_ipv4" +#define ECM_IPA_IPV6_HDR_NAME "ecm_eth_ipv6" +#define INACTIVITY_MSEC_DELAY 100 +#define DEFAULT_OUTSTANDING_HIGH 64 +#define DEFAULT_OUTSTANDING_LOW 32 +#define DEBUGFS_TEMP_BUF_SIZE 4 +#define TX_TIMEOUT (5 * HZ) + +#define ECM_IPA_DEBUG(fmt, args...) \ + pr_debug("ctx:%s: "\ + fmt, current->comm, ## args) + +#define ECM_IPA_INFO(fmt, args...) \ + pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\ + fmt, __func__, __LINE__, current->comm, ## args) + +#define ECM_IPA_ERROR(fmt, args...) \ + pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\ + fmt, __func__, __LINE__, current->comm, ## args) + +#define NULL_CHECK(ptr) \ + do { \ + if (!(ptr)) { \ + ECM_IPA_ERROR("null pointer #ptr\n"); \ + ret = -EINVAL; \ + } \ + } \ + while (0) + +#define ECM_IPA_LOG_ENTRY() ECM_IPA_DEBUG("begin\n") +#define ECM_IPA_LOG_EXIT() ECM_IPA_DEBUG("end\n") + +/** + * enum ecm_ipa_state - specify the current driver internal state + * which is guarded by a state machine. + * + * The driver internal state changes due to its external API usage. + * The driver saves its internal state to guard from caller illegal + * call sequence. + * states: + * UNLOADED is the first state which is the default one and is also the state + * after the driver gets unloaded(cleanup). + * INITIALIZED is the driver state once it finished registering + * the network device and all internal data struct were initialized + * CONNECTED is the driver state once the USB pipes were connected to IPA + * UP is the driver state after the interface mode was set to UP but the + * pipes are not connected yet - this state is meta-stable state. + * CONNECTED_AND_UP is the driver state when the pipe were connected and + * the interface got UP request from the network stack. this is the driver + * idle operation state which allows it to transmit/receive data. + * INVALID is a state which is not allowed. + */ +enum ecm_ipa_state { + ECM_IPA_UNLOADED = 0, + ECM_IPA_INITIALIZED, + ECM_IPA_CONNECTED, + ECM_IPA_UP, + ECM_IPA_CONNECTED_AND_UP, + ECM_IPA_INVALID, +}; + +/** + * enum ecm_ipa_operation - enumerations used to descibe the API operation + * + * Those enums are used as input for the driver state machine. + */ +enum ecm_ipa_operation { + ECM_IPA_INITIALIZE, + ECM_IPA_CONNECT, + ECM_IPA_OPEN, + ECM_IPA_STOP, + ECM_IPA_DISCONNECT, + ECM_IPA_CLEANUP, +}; + +#define ECM_IPA_STATE_DEBUG(ecm_ipa_ctx) \ + ECM_IPA_DEBUG("Driver state - %s\n",\ + ecm_ipa_state_string((ecm_ipa_ctx)->state)) + +/** + * struct ecm_ipa_dev - main driver context parameters + * @net: network interface struct implemented by this driver + * @directory: debugfs directory for various debuging switches + * @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table + * @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table + * @usb_to_ipa_hdl: save handle for IPA pipe operations + * @ipa_to_usb_hdl: save handle for IPA pipe operations + * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed + * @outstanding_high: number of outstanding packets allowed + * @outstanding_low: number of outstanding packets which shall cause + * to netdev queue start (after stopped due to outstanding_high reached) + * @state: current state of ecm_ipa driver + * @device_ready_notify: callback supplied by USB core driver + * This callback shall be called by the Netdev once the Netdev internal + * state is changed to RNDIS_IPA_CONNECTED_AND_UP + * @ipa_to_usb_client: consumer client + * @usb_to_ipa_client: producer client + * @ipa_rm_resource_name_prod: IPA resource manager producer resource + * @ipa_rm_resource_name_cons: IPA resource manager consumer resource + */ +struct ecm_ipa_dev { + struct net_device *net; + struct dentry *directory; + u32 eth_ipv4_hdr_hdl; + u32 eth_ipv6_hdr_hdl; + u32 usb_to_ipa_hdl; + u32 ipa_to_usb_hdl; + atomic_t outstanding_pkts; + u8 outstanding_high; + u8 outstanding_low; + enum ecm_ipa_state state; + void (*device_ready_notify)(void); + enum ipa_client_type ipa_to_usb_client; + enum ipa_client_type usb_to_ipa_client; + enum ipa_rm_resource_name ipa_rm_resource_name_prod; + enum ipa_rm_resource_name ipa_rm_resource_name_cons; +}; + +static int ecm_ipa_open(struct net_device *net); +static void ecm_ipa_packet_receive_notify + (void *priv, enum ipa_dp_evt_type evt, unsigned long data); +static void ecm_ipa_tx_complete_notify + (void *priv, enum ipa_dp_evt_type evt, unsigned long data); +static void ecm_ipa_tx_timeout(struct net_device *net); +static int ecm_ipa_stop(struct net_device *net); +static void ecm_ipa_enable_data_path(struct ecm_ipa_dev *ecm_ipa_ctx); +static int ecm_ipa_rules_cfg + (struct ecm_ipa_dev *ecm_ipa_ctx, const void *dst_mac, + const void *src_mac); +static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *ecm_ipa_ctx); +static int ecm_ipa_register_properties(struct ecm_ipa_dev *ecm_ipa_ctx); +static void ecm_ipa_deregister_properties(void); +static void ecm_ipa_rm_notify + (void *user_data, enum ipa_rm_event event, unsigned long data); +static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net); +static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx); +static void ecm_ipa_destroy_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx); +static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx); +static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx); +static netdev_tx_t ecm_ipa_start_xmit + (struct sk_buff *skb, struct net_device *net); +static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file); +static ssize_t ecm_ipa_debugfs_atomic_read + (struct file *file, char __user *ubuf, size_t count, loff_t *ppos); +static void ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx); +static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx); +static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl); +static int ecm_ipa_set_device_ethernet_addr + (u8 *dev_ethaddr, u8 device_ethaddr[]); +static enum ecm_ipa_state ecm_ipa_next_state + (enum ecm_ipa_state current_state, enum ecm_ipa_operation operation); +static const char *ecm_ipa_state_string(enum ecm_ipa_state state); +static int ecm_ipa_init_module(void); +static void ecm_ipa_cleanup_module(void); + +static const struct net_device_ops ecm_ipa_netdev_ops = { + .ndo_open = ecm_ipa_open, + .ndo_stop = ecm_ipa_stop, + .ndo_start_xmit = ecm_ipa_start_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_tx_timeout = ecm_ipa_tx_timeout, + .ndo_get_stats = ecm_ipa_get_stats, +}; + +const struct file_operations ecm_ipa_debugfs_atomic_ops = { + .open = ecm_ipa_debugfs_atomic_open, + .read = ecm_ipa_debugfs_atomic_read, +}; + +static void ecm_ipa_msg_free_cb(void *buff, u32 len, u32 type) +{ + kfree(buff); +} + +/** + * ecm_ipa_init() - create network device and initializes internal + * data structures + * @params: in/out parameters required for ecm_ipa initialization + * + * Shall be called prior to pipe connection. + * The out parameters (the callbacks) shall be supplied to ipa_connect. + * Detailed description: + * - allocate the network device + * - set default values for driver internals + * - create debugfs folder and files + * - create IPA resource manager client + * - add header insertion rules for IPA driver (based on host/device + * Ethernet addresses given in input params) + * - register tx/rx properties to IPA driver (will be later used + * by IPA configuration manager to configure reset of the IPA rules) + * - set the carrier state to "off" (until ecm_ipa_connect is called) + * - register the network device + * - set the out parameters + * + * Returns negative errno, or zero on success + */ +int ecm_ipa_init(struct ecm_ipa_params *params) +{ + int result = 0; + struct net_device *net; + struct ecm_ipa_dev *ecm_ipa_ctx; + int ret; + + ECM_IPA_LOG_ENTRY(); + + ECM_IPA_DEBUG("%s initializing\n", DRIVER_NAME); + ret = 0; + NULL_CHECK(params); + if (ret) + return ret; + + ECM_IPA_DEBUG + ("host_ethaddr=%pM, device_ethaddr=%pM\n", + params->host_ethaddr, + params->device_ethaddr); + + net = alloc_etherdev(sizeof(struct ecm_ipa_dev)); + if (!net) { + result = -ENOMEM; + ECM_IPA_ERROR("fail to allocate etherdev\n"); + goto fail_alloc_etherdev; + } + ECM_IPA_DEBUG("network device was successfully allocated\n"); + + ecm_ipa_ctx = netdev_priv(net); + if (!ecm_ipa_ctx) { + ECM_IPA_ERROR("fail to extract netdev priv\n"); + result = -ENOMEM; + goto fail_netdev_priv; + } + memset(ecm_ipa_ctx, 0, sizeof(*ecm_ipa_ctx)); + ECM_IPA_DEBUG("ecm_ipa_ctx (private) = %pK\n", ecm_ipa_ctx); + + ecm_ipa_ctx->net = net; + ecm_ipa_ctx->outstanding_high = DEFAULT_OUTSTANDING_HIGH; + ecm_ipa_ctx->outstanding_low = DEFAULT_OUTSTANDING_LOW; + atomic_set(&ecm_ipa_ctx->outstanding_pkts, 0); + snprintf(net->name, sizeof(net->name), "%s%%d", "ecm"); + net->netdev_ops = &ecm_ipa_netdev_ops; + net->watchdog_timeo = TX_TIMEOUT; + ECM_IPA_DEBUG("internal data structures were initialized\n"); + + if (!params->device_ready_notify) + ECM_IPA_DEBUG("device_ready_notify() was not supplied"); + ecm_ipa_ctx->device_ready_notify = params->device_ready_notify; + + ecm_ipa_debugfs_init(ecm_ipa_ctx); + + result = ecm_ipa_set_device_ethernet_addr + (net->dev_addr, params->device_ethaddr); + if (result) { + ECM_IPA_ERROR("set device MAC failed\n"); + goto fail_set_device_ethernet; + } + ECM_IPA_DEBUG("Device Ethernet address set %pM\n", net->dev_addr); + + result = ecm_ipa_rules_cfg + (ecm_ipa_ctx, params->host_ethaddr, params->device_ethaddr); + if (result) { + ECM_IPA_ERROR("fail on ipa rules set\n"); + goto fail_rules_cfg; + } + ECM_IPA_DEBUG("Ethernet header insertion set\n"); + + netif_carrier_off(net); + ECM_IPA_DEBUG("netif_carrier_off() was called\n"); + + netif_stop_queue(ecm_ipa_ctx->net); + ECM_IPA_DEBUG("netif_stop_queue() was called"); + + result = register_netdev(net); + if (result) { + ECM_IPA_ERROR("register_netdev failed: %d\n", result); + goto fail_register_netdev; + } + ECM_IPA_DEBUG("register_netdev succeeded\n"); + + params->ecm_ipa_rx_dp_notify = ecm_ipa_packet_receive_notify; + params->ecm_ipa_tx_dp_notify = ecm_ipa_tx_complete_notify; + params->private = (void *)ecm_ipa_ctx; + params->skip_ep_cfg = false; + ecm_ipa_ctx->state = ECM_IPA_INITIALIZED; + ECM_IPA_STATE_DEBUG(ecm_ipa_ctx); + + ECM_IPA_INFO("ECM_IPA was initialized successfully\n"); + + ECM_IPA_LOG_EXIT(); + + return 0; + +fail_register_netdev: + ecm_ipa_rules_destroy(ecm_ipa_ctx); +fail_set_device_ethernet: +fail_rules_cfg: + ecm_ipa_debugfs_destroy(ecm_ipa_ctx); +fail_netdev_priv: + free_netdev(net); +fail_alloc_etherdev: + return result; +} +EXPORT_SYMBOL(ecm_ipa_init); + +/** + * ecm_ipa_connect() - notify ecm_ipa for IPA<->USB pipes connection + * @usb_to_ipa_hdl: handle of IPA driver client for USB->IPA + * @ipa_to_usb_hdl: handle of IPA driver client for IPA->USB + * @priv: same value that was set by ecm_ipa_init(), this + * parameter holds the network device pointer. + * + * Once USB driver finishes the pipe connection between IPA core + * and USB core this method shall be called in order to + * allow ecm_ipa complete the data path configurations. + * Caller should make sure that it is calling this function + * from a context that allows it to handle device_ready_notify(). + * Detailed description: + * - configure the IPA end-points register + * - notify the Linux kernel for "carrier_on" + * After this function is done the driver state changes to "Connected". + * This API is expected to be called after ecm_ipa_init() or + * after a call to ecm_ipa_disconnect. + */ +int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, void *priv) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = priv; + int next_state; + struct ipa_ecm_msg *ecm_msg; + struct ipa_msg_meta msg_meta; + int retval; + int ret; + + ECM_IPA_LOG_ENTRY(); + ret = 0; + NULL_CHECK(priv); + if (ret) + return ret; + ECM_IPA_DEBUG("usb_to_ipa_hdl = %d, ipa_to_usb_hdl = %d, priv=0x%pK\n", + usb_to_ipa_hdl, ipa_to_usb_hdl, priv); + + next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_CONNECT); + if (next_state == ECM_IPA_INVALID) { + ECM_IPA_ERROR("can't call connect before calling initialize\n"); + return -EPERM; + } + ecm_ipa_ctx->state = next_state; + ECM_IPA_STATE_DEBUG(ecm_ipa_ctx); + + if (!ipa_is_client_handle_valid(usb_to_ipa_hdl)) { + ECM_IPA_ERROR + ("usb_to_ipa_hdl(%d) is not a valid ipa handle\n", + usb_to_ipa_hdl); + return -EINVAL; + } + if (!ipa_is_client_handle_valid(ipa_to_usb_hdl)) { + ECM_IPA_ERROR + ("ipa_to_usb_hdl(%d) is not a valid ipa handle\n", + ipa_to_usb_hdl); + return -EINVAL; + } + + ecm_ipa_ctx->ipa_to_usb_hdl = ipa_to_usb_hdl; + ecm_ipa_ctx->usb_to_ipa_hdl = usb_to_ipa_hdl; + + ecm_ipa_ctx->ipa_to_usb_client = ipa_get_client_mapping(ipa_to_usb_hdl); + if (ecm_ipa_ctx->ipa_to_usb_client < 0) { + ECM_IPA_ERROR( + "Error getting IPA->USB client from handle %d\n", + ecm_ipa_ctx->ipa_to_usb_client); + return -EINVAL; + } + ECM_IPA_DEBUG("ipa_to_usb_client = %d\n", + ecm_ipa_ctx->ipa_to_usb_client); + + ecm_ipa_ctx->usb_to_ipa_client = ipa_get_client_mapping(usb_to_ipa_hdl); + if (ecm_ipa_ctx->usb_to_ipa_client < 0) { + ECM_IPA_ERROR( + "Error getting USB->IPA client from handle %d\n", + ecm_ipa_ctx->usb_to_ipa_client); + return -EINVAL; + } + ECM_IPA_DEBUG("usb_to_ipa_client = %d\n", + ecm_ipa_ctx->usb_to_ipa_client); + + ecm_ipa_ctx->ipa_rm_resource_name_cons = + ipa_get_rm_resource_from_ep(ipa_to_usb_hdl); + if (ecm_ipa_ctx->ipa_rm_resource_name_cons < 0) { + ECM_IPA_ERROR("Error getting CONS RM resource from handle %d\n", + ecm_ipa_ctx->ipa_rm_resource_name_cons); + return -EINVAL; + } + ECM_IPA_DEBUG("ipa_rm_resource_name_cons = %d\n", + ecm_ipa_ctx->ipa_rm_resource_name_cons); + + ecm_ipa_ctx->ipa_rm_resource_name_prod = + ipa_get_rm_resource_from_ep(usb_to_ipa_hdl); + if (ecm_ipa_ctx->ipa_rm_resource_name_prod < 0) { + ECM_IPA_ERROR("Error getting PROD RM resource from handle %d\n", + ecm_ipa_ctx->ipa_rm_resource_name_prod); + return -EINVAL; + } + ECM_IPA_DEBUG("ipa_rm_resource_name_prod = %d\n", + ecm_ipa_ctx->ipa_rm_resource_name_prod); + + retval = ecm_ipa_create_rm_resource(ecm_ipa_ctx); + if (retval) { + ECM_IPA_ERROR("fail on RM create\n"); + goto fail_create_rm; + } + ECM_IPA_DEBUG("RM resource was created\n"); + + retval = ecm_ipa_register_properties(ecm_ipa_ctx); + if (retval) { + ECM_IPA_ERROR("fail on properties set\n"); + goto fail_create_rm; + } + ECM_IPA_DEBUG("ecm_ipa 2 Tx and 2 Rx properties were registered\n"); + + retval = ecm_ipa_ep_registers_cfg(usb_to_ipa_hdl, ipa_to_usb_hdl); + if (retval) { + ECM_IPA_ERROR("fail on ep cfg\n"); + goto fail; + } + ECM_IPA_DEBUG("end-point configured\n"); + + netif_carrier_on(ecm_ipa_ctx->net); + + ecm_msg = kzalloc(sizeof(*ecm_msg), GFP_KERNEL); + if (!ecm_msg) { + retval = -ENOMEM; + goto fail; + } + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = ECM_CONNECT; + msg_meta.msg_len = sizeof(struct ipa_ecm_msg); + strlcpy(ecm_msg->name, ecm_ipa_ctx->net->name, + IPA_RESOURCE_NAME_MAX); + ecm_msg->ifindex = ecm_ipa_ctx->net->ifindex; + + retval = ipa_send_msg(&msg_meta, ecm_msg, ecm_ipa_msg_free_cb); + if (retval) { + ECM_IPA_ERROR("fail to send ECM_CONNECT message\n"); + kfree(ecm_msg); + goto fail; + } + + if (!netif_carrier_ok(ecm_ipa_ctx->net)) { + ECM_IPA_ERROR("netif_carrier_ok error\n"); + retval = -EBUSY; + goto fail; + } + ECM_IPA_DEBUG("carrier_on notified\n"); + + if (ecm_ipa_ctx->state == ECM_IPA_CONNECTED_AND_UP) + ecm_ipa_enable_data_path(ecm_ipa_ctx); + else + ECM_IPA_DEBUG("data path was not enabled yet\n"); + + ECM_IPA_INFO("ECM_IPA was connected successfully\n"); + + ECM_IPA_LOG_EXIT(); + + return 0; + +fail: + ecm_ipa_deregister_properties(); +fail_create_rm: + ecm_ipa_destroy_rm_resource(ecm_ipa_ctx); + return retval; +} +EXPORT_SYMBOL(ecm_ipa_connect); + +/** + * ecm_ipa_open() - notify Linux network stack to start sending packets + * @net: the network interface supplied by the network stack + * + * Linux uses this API to notify the driver that the network interface + * transitions to the up state. + * The driver will instruct the Linux network stack to start + * delivering data packets. + */ +static int ecm_ipa_open(struct net_device *net) +{ + struct ecm_ipa_dev *ecm_ipa_ctx; + int next_state; + + ECM_IPA_LOG_ENTRY(); + + ecm_ipa_ctx = netdev_priv(net); + + next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_OPEN); + if (next_state == ECM_IPA_INVALID) { + ECM_IPA_ERROR("can't bring driver up before initialize\n"); + return -EPERM; + } + ecm_ipa_ctx->state = next_state; + ECM_IPA_STATE_DEBUG(ecm_ipa_ctx); + + if (ecm_ipa_ctx->state == ECM_IPA_CONNECTED_AND_UP) + ecm_ipa_enable_data_path(ecm_ipa_ctx); + else + ECM_IPA_DEBUG("data path was not enabled yet\n"); + + ECM_IPA_LOG_EXIT(); + + return 0; +} + +/** + * ecm_ipa_start_xmit() - send data from APPs to USB core via IPA core + * @skb: packet received from Linux network stack + * @net: the network device being used to send this packet + * + * Several conditions needed in order to send the packet to IPA: + * - Transmit queue for the network driver is currently + * in "send" state + * - The driver internal state is in "UP" state. + * - Filter Tx switch is turned off + * - The IPA resource manager state for the driver producer client + * is "Granted" which implies that all the resources in the dependency + * graph are valid for data flow. + * - outstanding high boundary did not reach. + * + * In case all of the above conditions are met, the network driver will + * send the packet by using the IPA API for Tx. + * In case the outstanding packet high boundary is reached, the driver will + * stop the send queue until enough packet were proceeded by the IPA core. + */ +static netdev_tx_t ecm_ipa_start_xmit + (struct sk_buff *skb, struct net_device *net) +{ + int ret; + netdev_tx_t status = NETDEV_TX_BUSY; + struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net); + + netif_trans_update(net); + + ECM_IPA_DEBUG + ("Tx, len=%d, skb->protocol=%d, outstanding=%d\n", + skb->len, skb->protocol, + atomic_read(&ecm_ipa_ctx->outstanding_pkts)); + + if (unlikely(netif_queue_stopped(net))) { + ECM_IPA_ERROR("interface queue is stopped\n"); + goto out; + } + + if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) { + ECM_IPA_ERROR("Missing pipe connected and/or iface up\n"); + return NETDEV_TX_BUSY; + } + + ret = resource_request(ecm_ipa_ctx); + if (ret) { + ECM_IPA_DEBUG("Waiting to resource\n"); + netif_stop_queue(net); + goto resource_busy; + } + + if (atomic_read(&ecm_ipa_ctx->outstanding_pkts) >= + ecm_ipa_ctx->outstanding_high) { + ECM_IPA_DEBUG + ("outstanding high (%d)- stopping\n", + ecm_ipa_ctx->outstanding_high); + netif_stop_queue(net); + status = NETDEV_TX_BUSY; + goto out; + } + + ret = ipa_tx_dp(ecm_ipa_ctx->ipa_to_usb_client, skb, NULL); + if (ret) { + ECM_IPA_ERROR("ipa transmit failed (%d)\n", ret); + goto fail_tx_packet; + } + + atomic_inc(&ecm_ipa_ctx->outstanding_pkts); + + status = NETDEV_TX_OK; + goto out; + +fail_tx_packet: +out: + resource_release(ecm_ipa_ctx); +resource_busy: + return status; +} + +/** + * ecm_ipa_packet_receive_notify() - Rx notify + * + * @priv: ecm driver context + * @evt: event type + * @data: data provided with event + * + * IPA will pass a packet to the Linux network stack with skb->data pointing + * to Ethernet packet frame. + */ +static void ecm_ipa_packet_receive_notify + (void *priv, enum ipa_dp_evt_type evt, unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + struct ecm_ipa_dev *ecm_ipa_ctx = priv; + int result; + unsigned int packet_len; + + if (!skb) { + ECM_IPA_ERROR("Bad SKB received from IPA driver\n"); + return; + } + + packet_len = skb->len; + ECM_IPA_DEBUG("packet RX, len=%d\n", skb->len); + + if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) { + ECM_IPA_DEBUG("Missing pipe connected and/or iface up\n"); + return; + } + + if (evt != IPA_RECEIVE) { + ECM_IPA_ERROR("A none IPA_RECEIVE event in ecm_ipa_receive\n"); + return; + } + + skb->dev = ecm_ipa_ctx->net; + skb->protocol = eth_type_trans(skb, ecm_ipa_ctx->net); + + result = netif_rx(skb); + if (result) + ECM_IPA_ERROR("fail on netif_rx\n"); + ecm_ipa_ctx->net->stats.rx_packets++; + ecm_ipa_ctx->net->stats.rx_bytes += packet_len; +} + +/** ecm_ipa_stop() - called when network device transitions to the down + * state. + * @net: the network device being stopped. + * + * This API is used by Linux network stack to notify the network driver that + * its state was changed to "down" + * The driver will stop the "send" queue and change its internal + * state to "Connected". + */ +static int ecm_ipa_stop(struct net_device *net) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net); + int next_state; + + ECM_IPA_LOG_ENTRY(); + + next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_STOP); + if (next_state == ECM_IPA_INVALID) { + ECM_IPA_ERROR("can't do network interface down without up\n"); + return -EPERM; + } + ecm_ipa_ctx->state = next_state; + ECM_IPA_STATE_DEBUG(ecm_ipa_ctx); + + netif_stop_queue(net); + ECM_IPA_DEBUG("network device stopped\n"); + + ECM_IPA_LOG_EXIT(); + return 0; +} + +/** ecm_ipa_disconnect() - called when the USB cable is unplugged. + * @priv: same value that was set by ecm_ipa_init(), this + * parameter holds the network device pointer. + * + * Once the USB cable is unplugged the USB driver will notify the network + * interface driver. + * The internal driver state will returned to its initialized state and + * Linux network stack will be informed for carrier off and the send queue + * will be stopped. + */ +int ecm_ipa_disconnect(void *priv) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = priv; + int next_state; + struct ipa_ecm_msg *ecm_msg; + struct ipa_msg_meta msg_meta; + int retval; + int outstanding_dropped_pkts; + int ret; + + ECM_IPA_LOG_ENTRY(); + ret = 0; + NULL_CHECK(ecm_ipa_ctx); + if (ret) + return ret; + ECM_IPA_DEBUG("priv=0x%pK\n", priv); + + next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_DISCONNECT); + if (next_state == ECM_IPA_INVALID) { + ECM_IPA_ERROR("can't disconnect before connect\n"); + return -EPERM; + } + ecm_ipa_ctx->state = next_state; + ECM_IPA_STATE_DEBUG(ecm_ipa_ctx); + + netif_carrier_off(ecm_ipa_ctx->net); + ECM_IPA_DEBUG("carrier_off notifcation was sent\n"); + + ecm_msg = kzalloc(sizeof(*ecm_msg), GFP_KERNEL); + if (!ecm_msg) + return -ENOMEM; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = ECM_DISCONNECT; + msg_meta.msg_len = sizeof(struct ipa_ecm_msg); + strlcpy(ecm_msg->name, ecm_ipa_ctx->net->name, + IPA_RESOURCE_NAME_MAX); + ecm_msg->ifindex = ecm_ipa_ctx->net->ifindex; + + retval = ipa_send_msg(&msg_meta, ecm_msg, ecm_ipa_msg_free_cb); + if (retval) { + ECM_IPA_ERROR("fail to send ECM_DISCONNECT message\n"); + kfree(ecm_msg); + return -EPERM; + } + + netif_stop_queue(ecm_ipa_ctx->net); + ECM_IPA_DEBUG("queue stopped\n"); + + ecm_ipa_destroy_rm_resource(ecm_ipa_ctx); + + outstanding_dropped_pkts = + atomic_read(&ecm_ipa_ctx->outstanding_pkts); + ecm_ipa_ctx->net->stats.tx_errors += outstanding_dropped_pkts; + atomic_set(&ecm_ipa_ctx->outstanding_pkts, 0); + + ECM_IPA_INFO("ECM_IPA was disconnected successfully\n"); + + ECM_IPA_LOG_EXIT(); + + return 0; +} +EXPORT_SYMBOL(ecm_ipa_disconnect); + +/** + * ecm_ipa_cleanup() - unregister the network interface driver and free + * internal data structs. + * @priv: same value that was set by ecm_ipa_init(), this + * parameter holds the network device pointer. + * + * This function shall be called once the network interface is not + * needed anymore, e.g: when the USB composition does not support ECM. + * This function shall be called after the pipes were disconnected. + * Detailed description: + * - delete the driver dependency defined for IPA resource manager and + * destroy the producer resource. + * - remove the debugfs entries + * - deregister the network interface from Linux network stack + * - free all internal data structs + */ +void ecm_ipa_cleanup(void *priv) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = priv; + int next_state; + + ECM_IPA_LOG_ENTRY(); + + ECM_IPA_DEBUG("priv=0x%pK\n", priv); + + if (!ecm_ipa_ctx) { + ECM_IPA_ERROR("ecm_ipa_ctx NULL pointer\n"); + return; + } + + next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_CLEANUP); + if (next_state == ECM_IPA_INVALID) { + ECM_IPA_ERROR("can't clean driver without cable disconnect\n"); + return; + } + ecm_ipa_ctx->state = next_state; + ECM_IPA_STATE_DEBUG(ecm_ipa_ctx); + + ecm_ipa_rules_destroy(ecm_ipa_ctx); + ecm_ipa_debugfs_destroy(ecm_ipa_ctx); + + unregister_netdev(ecm_ipa_ctx->net); + free_netdev(ecm_ipa_ctx->net); + + ECM_IPA_INFO("ECM_IPA was destroyed successfully\n"); + + ECM_IPA_LOG_EXIT(); +} +EXPORT_SYMBOL(ecm_ipa_cleanup); + +static void ecm_ipa_enable_data_path(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + if (ecm_ipa_ctx->device_ready_notify) { + ecm_ipa_ctx->device_ready_notify(); + ECM_IPA_DEBUG("USB device_ready_notify() was called\n"); + } else { + ECM_IPA_DEBUG("device_ready_notify() not supplied\n"); + } + + netif_start_queue(ecm_ipa_ctx->net); + ECM_IPA_DEBUG("queue started\n"); +} + +/** + * ecm_ipa_rules_cfg() - set header insertion and register Tx/Rx properties + * Headers will be committed to HW + * @ecm_ipa_ctx: main driver context parameters + * @dst_mac: destination MAC address + * @src_mac: source MAC address + * + * Returns negative errno, or zero on success + */ +static int ecm_ipa_rules_cfg + (struct ecm_ipa_dev *ecm_ipa_ctx, + const void *dst_mac, const void *src_mac) +{ + struct ipa_ioc_add_hdr *hdrs; + struct ipa_hdr_add *ipv4_hdr; + struct ipa_hdr_add *ipv6_hdr; + struct ethhdr *eth_ipv4; + struct ethhdr *eth_ipv6; + int result = 0; + + ECM_IPA_LOG_ENTRY(); + hdrs = kzalloc + (sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr), + GFP_KERNEL); + if (!hdrs) { + result = -ENOMEM; + goto out; + } + ipv4_hdr = &hdrs->hdr[0]; + eth_ipv4 = (struct ethhdr *)ipv4_hdr->hdr; + ipv6_hdr = &hdrs->hdr[1]; + eth_ipv6 = (struct ethhdr *)ipv6_hdr->hdr; + strlcpy(ipv4_hdr->name, ECM_IPA_IPV4_HDR_NAME, IPA_RESOURCE_NAME_MAX); + memcpy(eth_ipv4->h_dest, dst_mac, ETH_ALEN); + memcpy(eth_ipv4->h_source, src_mac, ETH_ALEN); + eth_ipv4->h_proto = htons(ETH_P_IP); + ipv4_hdr->hdr_len = ETH_HLEN; + ipv4_hdr->is_partial = 0; + ipv4_hdr->is_eth2_ofst_valid = true; + ipv4_hdr->eth2_ofst = 0; + ipv4_hdr->type = IPA_HDR_L2_ETHERNET_II; + strlcpy(ipv6_hdr->name, ECM_IPA_IPV6_HDR_NAME, IPA_RESOURCE_NAME_MAX); + memcpy(eth_ipv6->h_dest, dst_mac, ETH_ALEN); + memcpy(eth_ipv6->h_source, src_mac, ETH_ALEN); + eth_ipv6->h_proto = htons(ETH_P_IPV6); + ipv6_hdr->hdr_len = ETH_HLEN; + ipv6_hdr->is_partial = 0; + ipv6_hdr->is_eth2_ofst_valid = true; + ipv6_hdr->eth2_ofst = 0; + ipv6_hdr->type = IPA_HDR_L2_ETHERNET_II; + hdrs->commit = 1; + hdrs->num_hdrs = 2; + result = ipa_add_hdr(hdrs); + if (result) { + ECM_IPA_ERROR("Fail on Header-Insertion(%d)\n", result); + goto out_free_mem; + } + if (ipv4_hdr->status) { + ECM_IPA_ERROR + ("Fail on Header-Insertion ipv4(%d)\n", + ipv4_hdr->status); + result = ipv4_hdr->status; + goto out_free_mem; + } + if (ipv6_hdr->status) { + ECM_IPA_ERROR + ("Fail on Header-Insertion ipv6(%d)\n", + ipv6_hdr->status); + result = ipv6_hdr->status; + goto out_free_mem; + } + ecm_ipa_ctx->eth_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl; + ecm_ipa_ctx->eth_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl; + ECM_IPA_LOG_EXIT(); +out_free_mem: + kfree(hdrs); +out: + return result; +} + +/** + * ecm_ipa_rules_destroy() - remove the IPA core configuration done for + * the driver data path. + * @ecm_ipa_ctx: the driver context + * + * Revert the work done on ecm_ipa_rules_cfg. + */ +static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + struct ipa_ioc_del_hdr *del_hdr; + struct ipa_hdr_del *ipv4; + struct ipa_hdr_del *ipv6; + int result; + + del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) + + sizeof(*ipv6), GFP_KERNEL); + if (!del_hdr) + return; + del_hdr->commit = 1; + del_hdr->num_hdls = 2; + ipv4 = &del_hdr->hdl[0]; + ipv4->hdl = ecm_ipa_ctx->eth_ipv4_hdr_hdl; + ipv6 = &del_hdr->hdl[1]; + ipv6->hdl = ecm_ipa_ctx->eth_ipv6_hdr_hdl; + result = ipa_del_hdr(del_hdr); + if (result || ipv4->status || ipv6->status) + ECM_IPA_ERROR("ipa_del_hdr failed\n"); + kfree(del_hdr); +} + +/* ecm_ipa_register_properties() - set Tx/Rx properties for ipacm + * + * Register ecm0 interface with 2 Tx properties and 2 Rx properties: + * The 2 Tx properties are for data flowing from IPA to USB, they + * have Header-Insertion properties both for Ipv4 and Ipv6 Ethernet framing. + * The 2 Rx properties are for data flowing from USB to IPA, they have + * simple rule which always "hit". + * + */ +static int ecm_ipa_register_properties(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + struct ipa_tx_intf tx_properties = {0}; + struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} }; + struct ipa_ioc_tx_intf_prop *ipv4_property; + struct ipa_ioc_tx_intf_prop *ipv6_property; + struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} }; + struct ipa_rx_intf rx_properties = {0}; + struct ipa_ioc_rx_intf_prop *rx_ipv4_property; + struct ipa_ioc_rx_intf_prop *rx_ipv6_property; + int result = 0; + + ECM_IPA_LOG_ENTRY(); + + tx_properties.prop = properties; + ipv4_property = &tx_properties.prop[0]; + ipv4_property->ip = IPA_IP_v4; + ipv4_property->dst_pipe = ecm_ipa_ctx->ipa_to_usb_client; + strlcpy + (ipv4_property->hdr_name, ECM_IPA_IPV4_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + ipv6_property = &tx_properties.prop[1]; + ipv6_property->ip = IPA_IP_v6; + ipv6_property->dst_pipe = ecm_ipa_ctx->ipa_to_usb_client; + ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + strlcpy + (ipv6_property->hdr_name, ECM_IPA_IPV6_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + tx_properties.num_props = 2; + + rx_properties.prop = rx_ioc_properties; + rx_ipv4_property = &rx_properties.prop[0]; + rx_ipv4_property->ip = IPA_IP_v4; + rx_ipv4_property->attrib.attrib_mask = 0; + rx_ipv4_property->src_pipe = ecm_ipa_ctx->usb_to_ipa_client; + rx_ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + rx_ipv6_property = &rx_properties.prop[1]; + rx_ipv6_property->ip = IPA_IP_v6; + rx_ipv6_property->attrib.attrib_mask = 0; + rx_ipv6_property->src_pipe = ecm_ipa_ctx->usb_to_ipa_client; + rx_ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + rx_properties.num_props = 2; + + result = ipa_register_intf("ecm0", &tx_properties, &rx_properties); + if (result) + ECM_IPA_ERROR("fail on Tx/Rx properties registration\n"); + + ECM_IPA_LOG_EXIT(); + + return result; +} + +static void ecm_ipa_deregister_properties(void) +{ + int result; + + ECM_IPA_LOG_ENTRY(); + result = ipa_deregister_intf("ecm0"); + if (result) + ECM_IPA_DEBUG("Fail on Tx prop deregister\n"); + ECM_IPA_LOG_EXIT(); +} + +/** + * ecm_ipa_configure() - make IPA core end-point specific configuration + * @usb_to_ipa_hdl: handle of usb_to_ipa end-point for IPA driver + * @ipa_to_usb_hdl: handle of ipa_to_usb end-point for IPA driver + * @host_ethaddr: host Ethernet address in network order + * @device_ethaddr: device Ethernet address in network order + * + * Configure the usb_to_ipa and ipa_to_usb end-point registers + * - USB->IPA end-point: disable de-aggregation, enable link layer + * header removal (Ethernet removal), source NATing and default routing. + * - IPA->USB end-point: disable aggregation, add link layer header (Ethernet) + * - allocate Ethernet device + * - register to Linux network stack + * + * Returns negative errno, or zero on success + */ + +static void ecm_ipa_rm_notify + (void *user_data, enum ipa_rm_event event, unsigned long data) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = user_data; + + ECM_IPA_LOG_ENTRY(); + if + (event == IPA_RM_RESOURCE_GRANTED && + netif_queue_stopped(ecm_ipa_ctx->net)) { + ECM_IPA_DEBUG("Resource Granted - starting queue\n"); + netif_start_queue(ecm_ipa_ctx->net); + } else { + ECM_IPA_DEBUG("Resource released\n"); + } + ECM_IPA_LOG_EXIT(); +} + +static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net) +{ + return &net->stats; +} + +static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + struct ipa_rm_create_params create_params = {0}; + struct ipa_rm_perf_profile profile; + int result; + + ECM_IPA_LOG_ENTRY(); + create_params.name = IPA_RM_RESOURCE_STD_ECM_PROD; + create_params.reg_params.user_data = ecm_ipa_ctx; + create_params.reg_params.notify_cb = ecm_ipa_rm_notify; + result = ipa_rm_create_resource(&create_params); + if (result) { + ECM_IPA_ERROR("Fail on ipa_rm_create_resource\n"); + goto fail_rm_create; + } + ECM_IPA_DEBUG("rm client was created"); + + profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; + ipa_rm_set_perf_profile(IPA_RM_RESOURCE_STD_ECM_PROD, &profile); + + result = ipa_rm_inactivity_timer_init + (IPA_RM_RESOURCE_STD_ECM_PROD, + INACTIVITY_MSEC_DELAY); + if (result) { + ECM_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n"); + goto fail_it; + } + ECM_IPA_DEBUG("rm_it client was created"); + + result = ipa_rm_add_dependency_sync + (IPA_RM_RESOURCE_STD_ECM_PROD, + ecm_ipa_ctx->ipa_rm_resource_name_cons); + if (result && result != -EINPROGRESS) + ECM_IPA_ERROR + ("unable to add ECM/USB dependency (%d)\n", result); + + result = ipa_rm_add_dependency_sync + (ecm_ipa_ctx->ipa_rm_resource_name_prod, + IPA_RM_RESOURCE_APPS_CONS); + if (result && result != -EINPROGRESS) + ECM_IPA_ERROR + ("unable to add USB/APPS dependency (%d)\n", result); + + ECM_IPA_DEBUG("rm dependency was set\n"); + + ECM_IPA_LOG_EXIT(); + return 0; + +fail_it: +fail_rm_create: + return result; +} + +static void ecm_ipa_destroy_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + int result; + + ECM_IPA_LOG_ENTRY(); + + ipa_rm_delete_dependency(IPA_RM_RESOURCE_STD_ECM_PROD, + ecm_ipa_ctx->ipa_rm_resource_name_cons); + ipa_rm_delete_dependency(ecm_ipa_ctx->ipa_rm_resource_name_prod, + IPA_RM_RESOURCE_APPS_CONS); + ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_STD_ECM_PROD); + result = ipa_rm_delete_resource(IPA_RM_RESOURCE_STD_ECM_PROD); + if (result) + ECM_IPA_ERROR("resource deletion failed\n"); + + ECM_IPA_LOG_EXIT(); +} + +static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + return ipa_rm_inactivity_timer_request_resource( + IPA_RM_RESOURCE_STD_ECM_PROD); +} + +static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_STD_ECM_PROD); +} + +/** + * ecm_ipa_tx_complete_notify() - Rx notify + * + * @priv: ecm driver context + * @evt: event type + * @data: data provided with event + * + * Check that the packet is the one we sent and release it + * This function will be called in defered context in IPA wq. + */ +static void ecm_ipa_tx_complete_notify + (void *priv, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + struct ecm_ipa_dev *ecm_ipa_ctx = priv; + + if (!skb) { + ECM_IPA_ERROR("Bad SKB received from IPA driver\n"); + return; + } + + if (!ecm_ipa_ctx) { + ECM_IPA_ERROR("ecm_ipa_ctx is NULL pointer\n"); + return; + } + + ECM_IPA_DEBUG + ("Tx-complete, len=%d, skb->prot=%d, outstanding=%d\n", + skb->len, skb->protocol, + atomic_read(&ecm_ipa_ctx->outstanding_pkts)); + + if (evt != IPA_WRITE_DONE) { + ECM_IPA_ERROR("unsupported event on Tx callback\n"); + return; + } + + if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) { + ECM_IPA_DEBUG + ("dropping Tx-complete pkt, state=%s", + ecm_ipa_state_string(ecm_ipa_ctx->state)); + goto out; + } + + ecm_ipa_ctx->net->stats.tx_packets++; + ecm_ipa_ctx->net->stats.tx_bytes += skb->len; + + atomic_dec(&ecm_ipa_ctx->outstanding_pkts); + if + (netif_queue_stopped(ecm_ipa_ctx->net) && + netif_carrier_ok(ecm_ipa_ctx->net) && + atomic_read(&ecm_ipa_ctx->outstanding_pkts) + < (ecm_ipa_ctx->outstanding_low)) { + ECM_IPA_DEBUG + ("outstanding low (%d) - waking up queue\n", + ecm_ipa_ctx->outstanding_low); + netif_wake_queue(ecm_ipa_ctx->net); + } + +out: + dev_kfree_skb_any(skb); +} + +static void ecm_ipa_tx_timeout(struct net_device *net) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net); + + ECM_IPA_ERROR + ("possible IPA stall was detected, %d outstanding", + atomic_read(&ecm_ipa_ctx->outstanding_pkts)); + + net->stats.tx_errors++; +} + +static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file) +{ + struct ecm_ipa_dev *ecm_ipa_ctx = inode->i_private; + + ECM_IPA_LOG_ENTRY(); + file->private_data = &ecm_ipa_ctx->outstanding_pkts; + ECM_IPA_LOG_EXIT(); + return 0; +} + +static ssize_t ecm_ipa_debugfs_atomic_read + (struct file *file, char __user *ubuf, size_t count, loff_t *ppos) +{ + int nbytes; + u8 atomic_str[DEBUGFS_TEMP_BUF_SIZE] = {0}; + atomic_t *atomic_var = file->private_data; + + nbytes = scnprintf + (atomic_str, sizeof(atomic_str), "%d\n", + atomic_read(atomic_var)); + return simple_read_from_buffer(ubuf, count, ppos, atomic_str, nbytes); +} + +#ifdef CONFIG_DEBUG_FS + +static void ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + const mode_t flags_read_write = 0666; + const mode_t flags_read_only = 0444; + struct dentry *file; + + ECM_IPA_LOG_ENTRY(); + + if (!ecm_ipa_ctx) + return; + + ecm_ipa_ctx->directory = debugfs_create_dir("ecm_ipa", NULL); + if (!ecm_ipa_ctx->directory) { + ECM_IPA_ERROR("could not create debugfs directory entry\n"); + goto fail_directory; + } + file = debugfs_create_u8 + ("outstanding_high", flags_read_write, + ecm_ipa_ctx->directory, &ecm_ipa_ctx->outstanding_high); + if (!file) { + ECM_IPA_ERROR("could not create outstanding_high file\n"); + goto fail_file; + } + file = debugfs_create_u8 + ("outstanding_low", flags_read_write, + ecm_ipa_ctx->directory, &ecm_ipa_ctx->outstanding_low); + if (!file) { + ECM_IPA_ERROR("could not create outstanding_low file\n"); + goto fail_file; + } + file = debugfs_create_file + ("outstanding", flags_read_only, + ecm_ipa_ctx->directory, + ecm_ipa_ctx, &ecm_ipa_debugfs_atomic_ops); + if (!file) { + ECM_IPA_ERROR("could not create outstanding file\n"); + goto fail_file; + } + + ECM_IPA_DEBUG("debugfs entries were created\n"); + ECM_IPA_LOG_EXIT(); + + return; +fail_file: + debugfs_remove_recursive(ecm_ipa_ctx->directory); +fail_directory: + return; +} + +static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx) +{ + debugfs_remove_recursive(ecm_ipa_ctx->directory); +} + +#else /* !CONFIG_DEBUG_FS*/ + +static void ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx) {} + +static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx) {} + +#endif /* CONFIG_DEBUG_FS */ + +/** + * ecm_ipa_ep_cfg() - configure the USB endpoints for ECM + * + *usb_to_ipa_hdl: handle received from ipa_connect + *ipa_to_usb_hdl: handle received from ipa_connect + * + * USB to IPA pipe: + * - No de-aggregation + * - Remove Ethernet header + * - SRC NAT + * - Default routing(0) + * IPA to USB Pipe: + * - No aggregation + * - Add Ethernet header + */ +static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl) +{ + int result = 0; + struct ipa_ep_cfg usb_to_ipa_ep_cfg; + struct ipa_ep_cfg ipa_to_usb_ep_cfg; + + ECM_IPA_LOG_ENTRY(); + memset(&usb_to_ipa_ep_cfg, 0, sizeof(struct ipa_ep_cfg)); + usb_to_ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR; + usb_to_ipa_ep_cfg.hdr.hdr_len = ETH_HLEN; + usb_to_ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT; + usb_to_ipa_ep_cfg.route.rt_tbl_hdl = 0; + usb_to_ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS; + usb_to_ipa_ep_cfg.mode.mode = IPA_BASIC; + result = ipa_cfg_ep(usb_to_ipa_hdl, &usb_to_ipa_ep_cfg); + if (result) { + ECM_IPA_ERROR("failed to configure USB to IPA point\n"); + goto out; + } + memset(&ipa_to_usb_ep_cfg, 0, sizeof(struct ipa_ep_cfg)); + ipa_to_usb_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR; + ipa_to_usb_ep_cfg.hdr.hdr_len = ETH_HLEN; + ipa_to_usb_ep_cfg.nat.nat_en = IPA_BYPASS_NAT; + result = ipa_cfg_ep(ipa_to_usb_hdl, &ipa_to_usb_ep_cfg); + if (result) { + ECM_IPA_ERROR("failed to configure IPA to USB end-point\n"); + goto out; + } + ECM_IPA_DEBUG("end-point registers successfully configured\n"); +out: + ECM_IPA_LOG_EXIT(); + return result; +} + +/** + * ecm_ipa_set_device_ethernet_addr() - set device etherenet address + * @dev_ethaddr: device etherenet address + * + * Returns 0 for success, negative otherwise + */ +static int ecm_ipa_set_device_ethernet_addr + (u8 *dev_ethaddr, u8 device_ethaddr[]) +{ + if (!is_valid_ether_addr(device_ethaddr)) + return -EINVAL; + memcpy(dev_ethaddr, device_ethaddr, ETH_ALEN); + ECM_IPA_DEBUG("device ethernet address: %pM\n", dev_ethaddr); + return 0; +} + +/** ecm_ipa_next_state - return the next state of the driver + * @current_state: the current state of the driver + * @operation: an enum which represent the operation being made on the driver + * by its API. + * + * This function implements the driver internal state machine. + * Its decisions are based on the driver current state and the operation + * being made. + * In case the operation is invalid this state machine will return + * the value ECM_IPA_INVALID to inform the caller for a forbidden sequence. + */ +static enum ecm_ipa_state ecm_ipa_next_state + (enum ecm_ipa_state current_state, enum ecm_ipa_operation operation) +{ + int next_state = ECM_IPA_INVALID; + + switch (current_state) { + case ECM_IPA_UNLOADED: + if (operation == ECM_IPA_INITIALIZE) + next_state = ECM_IPA_INITIALIZED; + break; + case ECM_IPA_INITIALIZED: + if (operation == ECM_IPA_CONNECT) + next_state = ECM_IPA_CONNECTED; + else if (operation == ECM_IPA_OPEN) + next_state = ECM_IPA_UP; + else if (operation == ECM_IPA_CLEANUP) + next_state = ECM_IPA_UNLOADED; + break; + case ECM_IPA_CONNECTED: + if (operation == ECM_IPA_DISCONNECT) + next_state = ECM_IPA_INITIALIZED; + else if (operation == ECM_IPA_OPEN) + next_state = ECM_IPA_CONNECTED_AND_UP; + break; + case ECM_IPA_UP: + if (operation == ECM_IPA_STOP) + next_state = ECM_IPA_INITIALIZED; + else if (operation == ECM_IPA_CONNECT) + next_state = ECM_IPA_CONNECTED_AND_UP; + else if (operation == ECM_IPA_CLEANUP) + next_state = ECM_IPA_UNLOADED; + break; + case ECM_IPA_CONNECTED_AND_UP: + if (operation == ECM_IPA_STOP) + next_state = ECM_IPA_CONNECTED; + else if (operation == ECM_IPA_DISCONNECT) + next_state = ECM_IPA_UP; + break; + default: + ECM_IPA_ERROR("State is not supported\n"); + break; + } + + ECM_IPA_DEBUG + ("state transition ( %s -> %s )- %s\n", + ecm_ipa_state_string(current_state), + ecm_ipa_state_string(next_state), + next_state == ECM_IPA_INVALID ? "Forbidden" : "Allowed"); + + return next_state; +} + +/** + * ecm_ipa_state_string - return the state string representation + * @state: enum which describe the state + */ +static const char *ecm_ipa_state_string(enum ecm_ipa_state state) +{ + switch (state) { + case ECM_IPA_UNLOADED: + return "ECM_IPA_UNLOADED"; + case ECM_IPA_INITIALIZED: + return "ECM_IPA_INITIALIZED"; + case ECM_IPA_CONNECTED: + return "ECM_IPA_CONNECTED"; + case ECM_IPA_UP: + return "ECM_IPA_UP"; + case ECM_IPA_CONNECTED_AND_UP: + return "ECM_IPA_CONNECTED_AND_UP"; + default: + return "Not supported"; + } +} + +/** + * ecm_ipa_init_module() - module initialization + * + */ +static int ecm_ipa_init_module(void) +{ + ECM_IPA_LOG_ENTRY(); + ECM_IPA_LOG_EXIT(); + return 0; +} + +/** + * ecm_ipa_cleanup_module() - module cleanup + * + */ +static void ecm_ipa_cleanup_module(void) +{ + ECM_IPA_LOG_ENTRY(); + ECM_IPA_LOG_EXIT(); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ECM IPA network interface"); + +late_initcall(ecm_ipa_init_module); +module_exit(ecm_ipa_cleanup_module); diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c new file mode 100644 index 000000000000..8e0a8049fae4 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c @@ -0,0 +1,2631 @@ +/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ipa_common_i.h" + +#define IPA_MHI_DRV_NAME "ipa_mhi_client" +#define IPA_MHI_DBG(fmt, args...) \ + pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args) +#define IPA_MHI_ERR(fmt, args...) \ + pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) +#define IPA_MHI_FUNC_ENTRY() \ + IPA_MHI_DBG("ENTRY\n") +#define IPA_MHI_FUNC_EXIT() \ + IPA_MHI_DBG("EXIT\n") + +#define IPA_MHI_RM_TIMEOUT_MSEC 10000 +#define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 10 + +#define IPA_MHI_SUSPEND_SLEEP_MIN 900 +#define IPA_MHI_SUSPEND_SLEEP_MAX 1100 + +#define IPA_MHI_MAX_UL_CHANNELS 1 +#define IPA_MHI_MAX_DL_CHANNELS 1 + +#if (IPA_MHI_MAX_UL_CHANNELS + IPA_MHI_MAX_DL_CHANNELS) > \ + (IPA_MHI_GSI_ER_END - IPA_MHI_GSI_ER_START) +#error not enought event rings for MHI +#endif + +/* bit #40 in address should be asserted for MHI transfers over pcie */ +#define IPA_MHI_CLIENT_HOST_ADDR_COND(addr) \ + ((ipa_mhi_client_ctx->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr)) + +enum ipa_mhi_rm_state { + IPA_MHI_RM_STATE_RELEASED, + IPA_MHI_RM_STATE_REQUESTED, + IPA_MHI_RM_STATE_GRANTED, + IPA_MHI_RM_STATE_MAX +}; + +enum ipa_mhi_state { + IPA_MHI_STATE_INITIALIZED, + IPA_MHI_STATE_READY, + IPA_MHI_STATE_STARTED, + IPA_MHI_STATE_SUSPEND_IN_PROGRESS, + IPA_MHI_STATE_SUSPENDED, + IPA_MHI_STATE_RESUME_IN_PROGRESS, + IPA_MHI_STATE_MAX +}; + +static char *ipa_mhi_state_str[] = { + __stringify(IPA_MHI_STATE_INITIALIZED), + __stringify(IPA_MHI_STATE_READY), + __stringify(IPA_MHI_STATE_STARTED), + __stringify(IPA_MHI_STATE_SUSPEND_IN_PROGRESS), + __stringify(IPA_MHI_STATE_SUSPENDED), + __stringify(IPA_MHI_STATE_RESUME_IN_PROGRESS), +}; + +#define MHI_STATE_STR(state) \ + (((state) >= 0 && (state) < IPA_MHI_STATE_MAX) ? \ + ipa_mhi_state_str[(state)] : \ + "INVALID") + +enum ipa_mhi_dma_dir { + IPA_MHI_DMA_TO_HOST, + IPA_MHI_DMA_FROM_HOST, +}; + +/** + * struct ipa_mhi_channel_ctx - MHI Channel context + * @valid: entry is valid + * @id: MHI channel ID + * @hdl: channel handle for uC + * @client: IPA Client + * @state: Channel state + */ +struct ipa_mhi_channel_ctx { + bool valid; + u8 id; + u8 index; + enum ipa_client_type client; + enum ipa_hw_mhi_channel_states state; + bool stop_in_proc; + struct gsi_chan_info ch_info; + u64 channel_context_addr; + struct ipa_mhi_ch_ctx ch_ctx_host; + u64 event_context_addr; + struct ipa_mhi_ev_ctx ev_ctx_host; + bool brstmode_enabled; + union __packed gsi_channel_scratch ch_scratch; + unsigned long cached_gsi_evt_ring_hdl; +}; + +struct ipa_mhi_client_ctx { + enum ipa_mhi_state state; + spinlock_t state_lock; + mhi_client_cb cb_notify; + void *cb_priv; + struct completion rm_prod_granted_comp; + enum ipa_mhi_rm_state rm_cons_state; + struct completion rm_cons_comp; + bool trigger_wakeup; + bool wakeup_notified; + struct workqueue_struct *wq; + struct ipa_mhi_channel_ctx ul_channels[IPA_MHI_MAX_UL_CHANNELS]; + struct ipa_mhi_channel_ctx dl_channels[IPA_MHI_MAX_DL_CHANNELS]; + u32 total_channels; + struct ipa_mhi_msi_info msi; + u32 mmio_addr; + u32 first_ch_idx; + u32 first_er_idx; + u32 host_ctrl_addr; + u32 host_data_addr; + u64 channel_context_array_addr; + u64 event_context_array_addr; + u32 qmi_req_id; + u32 use_ipadma; + bool assert_bit40; + bool test_mode; +}; + +static struct ipa_mhi_client_ctx *ipa_mhi_client_ctx; + +#ifdef CONFIG_DEBUG_FS +#define IPA_MHI_MAX_MSG_LEN 512 +static char dbg_buff[IPA_MHI_MAX_MSG_LEN]; +static struct dentry *dent; + +static char *ipa_mhi_channel_state_str[] = { + __stringify(IPA_HW_MHI_CHANNEL_STATE_DISABLE), + __stringify(IPA_HW_MHI_CHANNEL_STATE_ENABLE), + __stringify(IPA_HW_MHI_CHANNEL_STATE_RUN), + __stringify(IPA_HW_MHI_CHANNEL_STATE_SUSPEND), + __stringify(IPA_HW_MHI_CHANNEL_STATE_STOP), + __stringify(IPA_HW_MHI_CHANNEL_STATE_ERROR), +}; + +#define MHI_CH_STATE_STR(state) \ + (((state) >= 0 && (state) <= IPA_HW_MHI_CHANNEL_STATE_ERROR) ? \ + ipa_mhi_channel_state_str[(state)] : \ + "INVALID") + +static int ipa_mhi_read_write_host(enum ipa_mhi_dma_dir dir, void *dev_addr, + u64 host_addr, int size) +{ + struct ipa_mem_buffer mem; + int res; + struct device *pdev; + + IPA_MHI_FUNC_ENTRY(); + + if (ipa_mhi_client_ctx->use_ipadma) { + pdev = ipa_get_dma_dev(); + host_addr = IPA_MHI_CLIENT_HOST_ADDR_COND(host_addr); + + mem.size = size; + mem.base = dma_alloc_coherent(pdev, mem.size, + &mem.phys_base, GFP_KERNEL); + if (!mem.base) { + IPA_MHI_ERR( + "dma_alloc_coherent failed, DMA buff size %d\n" + , mem.size); + return -ENOMEM; + } + + if (dir == IPA_MHI_DMA_FROM_HOST) { + res = ipa_dma_sync_memcpy(mem.phys_base, host_addr, + size); + if (res) { + IPA_MHI_ERR( + "ipa_dma_sync_memcpy from host fail%d\n" + , res); + goto fail_memcopy; + } + memcpy(dev_addr, mem.base, size); + } else { + memcpy(mem.base, dev_addr, size); + res = ipa_dma_sync_memcpy(host_addr, mem.phys_base, + size); + if (res) { + IPA_MHI_ERR( + "ipa_dma_sync_memcpy to host fail %d\n" + , res); + goto fail_memcopy; + } + } + dma_free_coherent(pdev, mem.size, mem.base, + mem.phys_base); + } else { + void *host_ptr; + + if (!ipa_mhi_client_ctx->test_mode) + host_ptr = ioremap(host_addr, size); + else + host_ptr = phys_to_virt(host_addr); + if (!host_ptr) { + IPA_MHI_ERR("ioremap failed for 0x%llx\n", host_addr); + return -EFAULT; + } + if (dir == IPA_MHI_DMA_FROM_HOST) + memcpy(dev_addr, host_ptr, size); + else + memcpy(host_ptr, dev_addr, size); + if (!ipa_mhi_client_ctx->test_mode) + iounmap(host_ptr); + } + + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_memcopy: + dma_free_coherent(ipa_get_dma_dev(), mem.size, mem.base, + mem.phys_base); + return res; +} + +static int ipa_mhi_print_channel_info(struct ipa_mhi_channel_ctx *channel, + char *buff, int len) +{ + int nbytes = 0; + + if (channel->valid) { + nbytes += scnprintf(&buff[nbytes], + len - nbytes, + "channel idx=%d ch_id=%d client=%d state=%s\n", + channel->index, channel->id, channel->client, + MHI_CH_STATE_STR(channel->state)); + + nbytes += scnprintf(&buff[nbytes], + len - nbytes, + " ch_ctx=%llx\n", + channel->channel_context_addr); + + nbytes += scnprintf(&buff[nbytes], + len - nbytes, + " gsi_evt_ring_hdl=%ld ev_ctx=%llx\n", + channel->cached_gsi_evt_ring_hdl, + channel->event_context_addr); + } + return nbytes; +} + +static int ipa_mhi_print_host_channel_ctx_info( + struct ipa_mhi_channel_ctx *channel, char *buff, int len) +{ + int res, nbytes = 0; + struct ipa_mhi_ch_ctx ch_ctx_host; + + memset(&ch_ctx_host, 0, sizeof(ch_ctx_host)); + + /* reading ch context from host */ + res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST, + &ch_ctx_host, channel->channel_context_addr, + sizeof(ch_ctx_host)); + if (res) { + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "Failed to read from host %d\n", res); + return nbytes; + } + + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "ch_id: %d\n", channel->id); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "chstate: 0x%x\n", ch_ctx_host.chstate); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "brstmode: 0x%x\n", ch_ctx_host.brstmode); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "chtype: 0x%x\n", ch_ctx_host.chtype); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "erindex: 0x%x\n", ch_ctx_host.erindex); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "rbase: 0x%llx\n", ch_ctx_host.rbase); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "rlen: 0x%llx\n", ch_ctx_host.rlen); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "rp: 0x%llx\n", ch_ctx_host.rp); + nbytes += scnprintf(&buff[nbytes], len - nbytes, + "wp: 0x%llx\n", ch_ctx_host.wp); + + return nbytes; +} + +static ssize_t ipa_mhi_debugfs_stats(struct file *file, + char __user *ubuf, + size_t count, + loff_t *ppos) +{ + int nbytes = 0; + int i; + struct ipa_mhi_channel_ctx *channel; + + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + "IPA MHI state: %s\n", + MHI_STATE_STR(ipa_mhi_client_ctx->state)); + + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->ul_channels[i]; + nbytes += ipa_mhi_print_channel_info(channel, + &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes); + } + + for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->dl_channels[i]; + nbytes += ipa_mhi_print_channel_info(channel, + &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes); + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_mhi_debugfs_uc_stats(struct file *file, + char __user *ubuf, + size_t count, + loff_t *ppos) +{ + int nbytes = 0; + + nbytes += ipa_uc_mhi_print_stats(dbg_buff, IPA_MHI_MAX_MSG_LEN); + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_mhi_debugfs_dump_host_ch_ctx_arr(struct file *file, + char __user *ubuf, + size_t count, + loff_t *ppos) +{ + int i, nbytes = 0; + struct ipa_mhi_channel_ctx *channel; + + if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_INITIALIZED || + ipa_mhi_client_ctx->state == IPA_MHI_STATE_READY) { + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + "Cannot dump host channel context "); + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + "before IPA MHI was STARTED\n"); + return simple_read_from_buffer(ubuf, count, ppos, + dbg_buff, nbytes); + } + if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) { + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + "IPA MHI is suspended, cannot dump channel ctx array"); + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + " from host -PCIe can be in D3 state\n"); + return simple_read_from_buffer(ubuf, count, ppos, + dbg_buff, nbytes); + } + + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + "channel contex array - dump from host\n"); + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + "***** UL channels *******\n"); + + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->ul_channels[i]; + if (!channel->valid) + continue; + nbytes += ipa_mhi_print_host_channel_ctx_info(channel, + &dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes); + } + + nbytes += scnprintf(&dbg_buff[nbytes], + IPA_MHI_MAX_MSG_LEN - nbytes, + "\n***** DL channels *******\n"); + + for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->dl_channels[i]; + if (!channel->valid) + continue; + nbytes += ipa_mhi_print_host_channel_ctx_info(channel, + &dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes); + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +const struct file_operations ipa_mhi_stats_ops = { + .read = ipa_mhi_debugfs_stats, +}; + +const struct file_operations ipa_mhi_uc_stats_ops = { + .read = ipa_mhi_debugfs_uc_stats, +}; + +const struct file_operations ipa_mhi_dump_host_ch_ctx_ops = { + .read = ipa_mhi_debugfs_dump_host_ch_ctx_arr, +}; + + +static void ipa_mhi_debugfs_init(void) +{ + const mode_t read_only_mode = 0444; + const mode_t read_write_mode = 0664; + struct dentry *file; + + IPA_MHI_FUNC_ENTRY(); + + dent = debugfs_create_dir("ipa_mhi", 0); + if (IS_ERR(dent)) { + IPA_MHI_ERR("fail to create folder ipa_mhi\n"); + return; + } + + file = debugfs_create_file("stats", read_only_mode, dent, + 0, &ipa_mhi_stats_ops); + if (!file || IS_ERR(file)) { + IPA_MHI_ERR("fail to create file stats\n"); + goto fail; + } + + file = debugfs_create_file("uc_stats", read_only_mode, dent, + 0, &ipa_mhi_uc_stats_ops); + if (!file || IS_ERR(file)) { + IPA_MHI_ERR("fail to create file uc_stats\n"); + goto fail; + } + + file = debugfs_create_u32("use_ipadma", read_write_mode, dent, + &ipa_mhi_client_ctx->use_ipadma); + if (!file || IS_ERR(file)) { + IPA_MHI_ERR("fail to create file use_ipadma\n"); + goto fail; + } + + file = debugfs_create_file("dump_host_channel_ctx_array", + read_only_mode, dent, 0, &ipa_mhi_dump_host_ch_ctx_ops); + if (!file || IS_ERR(file)) { + IPA_MHI_ERR("fail to create file dump_host_channel_ctx_arr\n"); + goto fail; + } + + IPA_MHI_FUNC_EXIT(); + return; +fail: + debugfs_remove_recursive(dent); +} + +#else +static void ipa_mhi_debugfs_init(void) {} +static void ipa_mhi_debugfs_destroy(void) {} +#endif /* CONFIG_DEBUG_FS */ + +static union IpaHwMhiDlUlSyncCmdData_t ipa_cached_dl_ul_sync_info; + +static void ipa_mhi_wq_notify_wakeup(struct work_struct *work); +static DECLARE_WORK(ipa_mhi_notify_wakeup_work, ipa_mhi_wq_notify_wakeup); + +static void ipa_mhi_wq_notify_ready(struct work_struct *work); +static DECLARE_WORK(ipa_mhi_notify_ready_work, ipa_mhi_wq_notify_ready); + +/** + * ipa_mhi_notify_wakeup() - Schedule work to notify data available + * + * This function will schedule a work to notify data available event. + * In case this function is called more than once, only one notification will + * be sent to MHI client driver. No further notifications will be sent until + * IPA MHI state will become STARTED. + */ +static void ipa_mhi_notify_wakeup(void) +{ + IPA_MHI_FUNC_ENTRY(); + if (ipa_mhi_client_ctx->wakeup_notified) { + IPA_MHI_DBG("wakeup already called\n"); + return; + } + queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_wakeup_work); + ipa_mhi_client_ctx->wakeup_notified = true; + IPA_MHI_FUNC_EXIT(); +} + +/** + * ipa_mhi_rm_cons_request() - callback function for IPA RM request resource + * + * In case IPA MHI is not suspended, MHI CONS will be granted immediately. + * In case IPA MHI is suspended, MHI CONS will be granted after resume. + */ +static int ipa_mhi_rm_cons_request(void) +{ + unsigned long flags; + int res; + + IPA_MHI_FUNC_ENTRY(); + + IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_client_ctx->state)); + spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags); + ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_REQUESTED; + if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_STARTED) { + ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED; + res = 0; + } else if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) { + ipa_mhi_notify_wakeup(); + res = -EINPROGRESS; + } else if (ipa_mhi_client_ctx->state == + IPA_MHI_STATE_SUSPEND_IN_PROGRESS) { + /* wakeup event will be trigger after suspend finishes */ + ipa_mhi_client_ctx->trigger_wakeup = true; + res = -EINPROGRESS; + } else { + res = -EINPROGRESS; + } + + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + IPA_MHI_DBG("EXIT with %d\n", res); + return res; +} + +static int ipa_mhi_rm_cons_release(void) +{ + unsigned long flags; + + IPA_MHI_FUNC_ENTRY(); + + spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags); + ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED; + complete_all(&ipa_mhi_client_ctx->rm_cons_comp); + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static void ipa_mhi_rm_prod_notify(void *user_data, enum ipa_rm_event event, + unsigned long data) +{ + IPA_MHI_FUNC_ENTRY(); + + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + IPA_MHI_DBG("IPA_RM_RESOURCE_GRANTED\n"); + complete_all(&ipa_mhi_client_ctx->rm_prod_granted_comp); + break; + + case IPA_RM_RESOURCE_RELEASED: + IPA_MHI_DBG("IPA_RM_RESOURCE_RELEASED\n"); + break; + + default: + IPA_MHI_ERR("unexpected event %d\n", event); + WARN_ON(1); + break; + } + + IPA_MHI_FUNC_EXIT(); +} + +/** + * ipa_mhi_wq_notify_wakeup() - Notify MHI client on data available + * + * This function is called from IPA MHI workqueue to notify + * MHI client driver on data available event. + */ +static void ipa_mhi_wq_notify_wakeup(struct work_struct *work) +{ + IPA_MHI_FUNC_ENTRY(); + ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv, + IPA_MHI_EVENT_DATA_AVAILABLE, 0); + IPA_MHI_FUNC_EXIT(); +} + +/** + * ipa_mhi_wq_notify_ready() - Notify MHI client on ready + * + * This function is called from IPA MHI workqueue to notify + * MHI client driver on ready event when IPA uC is loaded + */ +static void ipa_mhi_wq_notify_ready(struct work_struct *work) +{ + IPA_MHI_FUNC_ENTRY(); + ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv, + IPA_MHI_EVENT_READY, 0); + IPA_MHI_FUNC_EXIT(); +} + +/** + * ipa_mhi_notify_ready() - Schedule work to notify ready + * + * This function will schedule a work to notify ready event. + */ +static void ipa_mhi_notify_ready(void) +{ + IPA_MHI_FUNC_ENTRY(); + queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_ready_work); + IPA_MHI_FUNC_EXIT(); +} + +/** + * ipa_mhi_set_state() - Set new state to IPA MHI + * @state: new state + * + * Sets a new state to IPA MHI if possible according to IPA MHI state machine. + * In some state transitions a wakeup request will be triggered. + * + * Returns: 0 on success, -1 otherwise + */ +static int ipa_mhi_set_state(enum ipa_mhi_state new_state) +{ + unsigned long flags; + int res = -EPERM; + + spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags); + IPA_MHI_DBG("Current state: %s\n", + MHI_STATE_STR(ipa_mhi_client_ctx->state)); + + switch (ipa_mhi_client_ctx->state) { + case IPA_MHI_STATE_INITIALIZED: + if (new_state == IPA_MHI_STATE_READY) { + ipa_mhi_notify_ready(); + res = 0; + } + break; + + case IPA_MHI_STATE_READY: + if (new_state == IPA_MHI_STATE_READY) + res = 0; + if (new_state == IPA_MHI_STATE_STARTED) + res = 0; + break; + + case IPA_MHI_STATE_STARTED: + if (new_state == IPA_MHI_STATE_INITIALIZED) + res = 0; + else if (new_state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS) + res = 0; + break; + + case IPA_MHI_STATE_SUSPEND_IN_PROGRESS: + if (new_state == IPA_MHI_STATE_SUSPENDED) { + if (ipa_mhi_client_ctx->trigger_wakeup) { + ipa_mhi_client_ctx->trigger_wakeup = false; + ipa_mhi_notify_wakeup(); + } + res = 0; + } else if (new_state == IPA_MHI_STATE_STARTED) { + ipa_mhi_client_ctx->wakeup_notified = false; + ipa_mhi_client_ctx->trigger_wakeup = false; + if (ipa_mhi_client_ctx->rm_cons_state == + IPA_MHI_RM_STATE_REQUESTED) { + ipa_rm_notify_completion( + IPA_RM_RESOURCE_GRANTED, + IPA_RM_RESOURCE_MHI_CONS); + ipa_mhi_client_ctx->rm_cons_state = + IPA_MHI_RM_STATE_GRANTED; + } + res = 0; + } + break; + + case IPA_MHI_STATE_SUSPENDED: + if (new_state == IPA_MHI_STATE_RESUME_IN_PROGRESS) + res = 0; + break; + + case IPA_MHI_STATE_RESUME_IN_PROGRESS: + if (new_state == IPA_MHI_STATE_SUSPENDED) { + if (ipa_mhi_client_ctx->trigger_wakeup) { + ipa_mhi_client_ctx->trigger_wakeup = false; + ipa_mhi_notify_wakeup(); + } + res = 0; + } else if (new_state == IPA_MHI_STATE_STARTED) { + ipa_mhi_client_ctx->trigger_wakeup = false; + ipa_mhi_client_ctx->wakeup_notified = false; + if (ipa_mhi_client_ctx->rm_cons_state == + IPA_MHI_RM_STATE_REQUESTED) { + ipa_rm_notify_completion( + IPA_RM_RESOURCE_GRANTED, + IPA_RM_RESOURCE_MHI_CONS); + ipa_mhi_client_ctx->rm_cons_state = + IPA_MHI_RM_STATE_GRANTED; + } + res = 0; + } + break; + + default: + IPA_MHI_ERR("Invalid state %d\n", ipa_mhi_client_ctx->state); + WARN_ON(1); + } + + if (res) + IPA_MHI_ERR("Invalid state change to %s\n", + MHI_STATE_STR(new_state)); + else { + IPA_MHI_DBG("New state change to %s\n", + MHI_STATE_STR(new_state)); + ipa_mhi_client_ctx->state = new_state; + } + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + return res; +} + +static void ipa_mhi_uc_ready_cb(void) +{ + IPA_MHI_FUNC_ENTRY(); + ipa_mhi_set_state(IPA_MHI_STATE_READY); + IPA_MHI_FUNC_EXIT(); +} + +static void ipa_mhi_uc_wakeup_request_cb(void) +{ + unsigned long flags; + + IPA_MHI_FUNC_ENTRY(); + IPA_MHI_DBG("MHI state: %s\n", + MHI_STATE_STR(ipa_mhi_client_ctx->state)); + spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags); + if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) + ipa_mhi_notify_wakeup(); + else if (ipa_mhi_client_ctx->state == + IPA_MHI_STATE_SUSPEND_IN_PROGRESS) + /* wakeup event will be triggered after suspend finishes */ + ipa_mhi_client_ctx->trigger_wakeup = true; + + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + IPA_MHI_FUNC_EXIT(); +} + +static int ipa_mhi_request_prod(void) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + + reinit_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp); + IPA_MHI_DBG("requesting mhi prod\n"); + res = ipa_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD); + if (res) { + if (res != -EINPROGRESS) { + IPA_MHI_ERR("failed to request mhi prod %d\n", res); + return res; + } + res = wait_for_completion_timeout( + &ipa_mhi_client_ctx->rm_prod_granted_comp, + msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC)); + if (res == 0) { + IPA_MHI_ERR("timeout request mhi prod\n"); + return -ETIME; + } + } + + IPA_MHI_DBG("mhi prod granted\n"); + IPA_MHI_FUNC_EXIT(); + return 0; + +} + +static int ipa_mhi_release_prod(void) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + + res = ipa_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD); + + IPA_MHI_FUNC_EXIT(); + return res; + +} + +/** + * ipa_mhi_start() - Start IPA MHI engine + * @params: pcie addresses for MHI + * + * This function is called by MHI client driver on MHI engine start for + * handling MHI accelerated channels. This function is called after + * ipa_mhi_init() was called and can be called after MHI reset to restart MHI + * engine. When this function returns device can move to M0 state. + * + * Return codes: 0 : success + * negative : error + */ +int ipa_mhi_start(struct ipa_mhi_start_params *params) +{ + int res; + struct ipa_mhi_init_engine init_params; + + IPA_MHI_FUNC_ENTRY(); + + if (!params) { + IPA_MHI_ERR("null args\n"); + return -EINVAL; + } + + if (!ipa_mhi_client_ctx) { + IPA_MHI_ERR("not initialized\n"); + return -EPERM; + } + + res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED); + if (res) { + IPA_MHI_ERR("ipa_mhi_set_state %d\n", res); + return res; + } + + ipa_mhi_client_ctx->host_ctrl_addr = params->host_ctrl_addr; + ipa_mhi_client_ctx->host_data_addr = params->host_data_addr; + ipa_mhi_client_ctx->channel_context_array_addr = + params->channel_context_array_addr; + ipa_mhi_client_ctx->event_context_array_addr = + params->event_context_array_addr; + IPA_MHI_DBG("host_ctrl_addr 0x%x\n", + ipa_mhi_client_ctx->host_ctrl_addr); + IPA_MHI_DBG("host_data_addr 0x%x\n", + ipa_mhi_client_ctx->host_data_addr); + IPA_MHI_DBG("channel_context_array_addr 0x%llx\n", + ipa_mhi_client_ctx->channel_context_array_addr); + IPA_MHI_DBG("event_context_array_addr 0x%llx\n", + ipa_mhi_client_ctx->event_context_array_addr); + + /* Add MHI <-> Q6 dependencies to IPA RM */ + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res && res != -EINPROGRESS) { + IPA_MHI_ERR("failed to add dependency %d\n", res); + goto fail_add_mhi_q6_dep; + } + + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_MHI_CONS); + if (res && res != -EINPROGRESS) { + IPA_MHI_ERR("failed to add dependency %d\n", res); + goto fail_add_q6_mhi_dep; + } + + res = ipa_mhi_request_prod(); + if (res) { + IPA_MHI_ERR("failed request prod %d\n", res); + goto fail_request_prod; + } + + /* gsi params */ + init_params.gsi.first_ch_idx = + ipa_mhi_client_ctx->first_ch_idx; + /* uC params */ + init_params.uC.first_ch_idx = + ipa_mhi_client_ctx->first_ch_idx; + init_params.uC.first_er_idx = + ipa_mhi_client_ctx->first_er_idx; + init_params.uC.host_ctrl_addr = params->host_ctrl_addr; + init_params.uC.host_data_addr = params->host_data_addr; + init_params.uC.mmio_addr = ipa_mhi_client_ctx->mmio_addr; + init_params.uC.msi = &ipa_mhi_client_ctx->msi; + init_params.uC.ipa_cached_dl_ul_sync_info = + &ipa_cached_dl_ul_sync_info; + + res = ipa_mhi_init_engine(&init_params); + if (res) { + IPA_MHI_ERR("IPA core failed to start MHI %d\n", res); + goto fail_init_engine; + } + + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_init_engine: + ipa_mhi_release_prod(); +fail_request_prod: + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_MHI_CONS); +fail_add_q6_mhi_dep: + ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD, + IPA_RM_RESOURCE_Q6_CONS); +fail_add_mhi_q6_dep: + ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED); + return res; +} + +/** + * ipa_mhi_get_channel_context() - Get corresponding channel context + * @ep: IPA ep + * @channel_id: Channel ID + * + * This function will return the corresponding channel context or allocate new + * one in case channel context for channel does not exist. + */ +static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context( + enum ipa_client_type client, u8 channel_id) +{ + int ch_idx; + struct ipa_mhi_channel_ctx *channels; + int max_channels; + + if (IPA_CLIENT_IS_PROD(client)) { + channels = ipa_mhi_client_ctx->ul_channels; + max_channels = IPA_MHI_MAX_UL_CHANNELS; + } else { + channels = ipa_mhi_client_ctx->dl_channels; + max_channels = IPA_MHI_MAX_DL_CHANNELS; + } + + /* find the channel context according to channel id */ + for (ch_idx = 0; ch_idx < max_channels; ch_idx++) { + if (channels[ch_idx].valid && + channels[ch_idx].id == channel_id) + return &channels[ch_idx]; + } + + /* channel context does not exists, allocate a new one */ + for (ch_idx = 0; ch_idx < max_channels; ch_idx++) { + if (!channels[ch_idx].valid) + break; + } + + if (ch_idx == max_channels) { + IPA_MHI_ERR("no more channels available\n"); + return NULL; + } + + channels[ch_idx].valid = true; + channels[ch_idx].id = channel_id; + channels[ch_idx].index = ipa_mhi_client_ctx->total_channels++; + channels[ch_idx].client = client; + channels[ch_idx].state = IPA_HW_MHI_CHANNEL_STATE_INVALID; + + return &channels[ch_idx]; +} + +/** + * ipa_mhi_get_channel_context_by_clnt_hdl() - Get corresponding channel + * context + * @clnt_hdl: client handle as provided in ipa_mhi_connect_pipe() + * + * This function will return the corresponding channel context or NULL in case + * that channel does not exist. + */ +static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context_by_clnt_hdl( + u32 clnt_hdl) +{ + int ch_idx; + + for (ch_idx = 0; ch_idx < IPA_MHI_MAX_UL_CHANNELS; ch_idx++) { + if (ipa_mhi_client_ctx->ul_channels[ch_idx].valid && + ipa_get_ep_mapping( + ipa_mhi_client_ctx->ul_channels[ch_idx].client) + == clnt_hdl) + return &ipa_mhi_client_ctx->ul_channels[ch_idx]; + } + + for (ch_idx = 0; ch_idx < IPA_MHI_MAX_DL_CHANNELS; ch_idx++) { + if (ipa_mhi_client_ctx->dl_channels[ch_idx].valid && + ipa_get_ep_mapping( + ipa_mhi_client_ctx->dl_channels[ch_idx].client) + == clnt_hdl) + return &ipa_mhi_client_ctx->dl_channels[ch_idx]; + } + + return NULL; +} + +static void ipa_mhi_dump_ch_ctx(struct ipa_mhi_channel_ctx *channel) +{ + IPA_MHI_DBG("ch_id %d\n", channel->id); + IPA_MHI_DBG("chstate 0x%x\n", channel->ch_ctx_host.chstate); + IPA_MHI_DBG("brstmode 0x%x\n", channel->ch_ctx_host.brstmode); + IPA_MHI_DBG("pollcfg 0x%x\n", channel->ch_ctx_host.pollcfg); + IPA_MHI_DBG("chtype 0x%x\n", channel->ch_ctx_host.chtype); + IPA_MHI_DBG("erindex 0x%x\n", channel->ch_ctx_host.erindex); + IPA_MHI_DBG("rbase 0x%llx\n", channel->ch_ctx_host.rbase); + IPA_MHI_DBG("rlen 0x%llx\n", channel->ch_ctx_host.rlen); + IPA_MHI_DBG("rp 0x%llx\n", channel->ch_ctx_host.rp); + IPA_MHI_DBG("wp 0x%llx\n", channel->ch_ctx_host.wp); +} + +static void ipa_mhi_dump_ev_ctx(struct ipa_mhi_channel_ctx *channel) +{ + IPA_MHI_DBG("ch_id %d event id %d\n", channel->id, + channel->ch_ctx_host.erindex); + + IPA_MHI_DBG("intmodc 0x%x\n", channel->ev_ctx_host.intmodc); + IPA_MHI_DBG("intmodt 0x%x\n", channel->ev_ctx_host.intmodt); + IPA_MHI_DBG("ertype 0x%x\n", channel->ev_ctx_host.ertype); + IPA_MHI_DBG("msivec 0x%x\n", channel->ev_ctx_host.msivec); + IPA_MHI_DBG("rbase 0x%llx\n", channel->ev_ctx_host.rbase); + IPA_MHI_DBG("rlen 0x%llx\n", channel->ev_ctx_host.rlen); + IPA_MHI_DBG("rp 0x%llx\n", channel->ev_ctx_host.rp); + IPA_MHI_DBG("wp 0x%llx\n", channel->ev_ctx_host.wp); +} + +static int ipa_mhi_read_ch_ctx(struct ipa_mhi_channel_ctx *channel) +{ + int res; + + res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST, + &channel->ch_ctx_host, channel->channel_context_addr, + sizeof(channel->ch_ctx_host)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res); + return res; + + } + ipa_mhi_dump_ch_ctx(channel); + + channel->event_context_addr = + ipa_mhi_client_ctx->event_context_array_addr + + channel->ch_ctx_host.erindex * sizeof(struct ipa_mhi_ev_ctx); + IPA_MHI_DBG("ch %d event_context_addr 0x%llx\n", channel->id, + channel->event_context_addr); + + res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST, + &channel->ev_ctx_host, channel->event_context_addr, + sizeof(channel->ev_ctx_host)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res); + return res; + + } + ipa_mhi_dump_ev_ctx(channel); + + return 0; +} + +static void ipa_mhi_gsi_ev_err_cb(struct gsi_evt_err_notify *notify) +{ + struct ipa_mhi_channel_ctx *channel = notify->user_data; + + IPA_MHI_ERR("channel id=%d client=%d state=%d\n", + channel->id, channel->client, channel->state); + switch (notify->evt_id) { + case GSI_EVT_OUT_OF_BUFFERS_ERR: + IPA_MHI_ERR("Received GSI_EVT_OUT_OF_BUFFERS_ERR\n"); + break; + case GSI_EVT_OUT_OF_RESOURCES_ERR: + IPA_MHI_ERR("Received GSI_EVT_OUT_OF_RESOURCES_ERR\n"); + break; + case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR: + IPA_MHI_ERR("Received GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n"); + break; + case GSI_EVT_EVT_RING_EMPTY_ERR: + IPA_MHI_ERR("Received GSI_EVT_EVT_RING_EMPTY_ERR\n"); + break; + default: + IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id); + } + IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc); + ipa_assert(); +} + +static void ipa_mhi_gsi_ch_err_cb(struct gsi_chan_err_notify *notify) +{ + struct ipa_mhi_channel_ctx *channel = notify->chan_user_data; + + IPA_MHI_ERR("channel id=%d client=%d state=%d\n", + channel->id, channel->client, channel->state); + switch (notify->evt_id) { + case GSI_CHAN_INVALID_TRE_ERR: + IPA_MHI_ERR("Received GSI_CHAN_INVALID_TRE_ERR\n"); + break; + case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR: + IPA_MHI_ERR("Received GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n"); + break; + case GSI_CHAN_OUT_OF_BUFFERS_ERR: + IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n"); + break; + case GSI_CHAN_OUT_OF_RESOURCES_ERR: + IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n"); + break; + case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR: + IPA_MHI_ERR("Received GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n"); + break; + case GSI_CHAN_HWO_1_ERR: + IPA_MHI_ERR("Received GSI_CHAN_HWO_1_ERR\n"); + break; + default: + IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id); + } + IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc); + ipa_assert(); +} + + +static bool ipa_mhi_gsi_channel_empty(struct ipa_mhi_channel_ctx *channel) +{ + IPA_MHI_FUNC_ENTRY(); + + if (!channel->stop_in_proc) { + IPA_MHI_DBG("Channel is not in STOP_IN_PROC\n"); + return true; + } + + if (ipa_mhi_stop_gsi_channel(channel->client) == true) { + channel->stop_in_proc = false; + return true; + } + + return false; +} + +/** + * ipa_mhi_wait_for_ul_empty_timeout() - wait for pending packets in uplink + * @msecs: timeout to wait + * + * This function will poll until there are no packets pending in uplink channels + * or timeout occurred. + * + * Return code: true - no pending packets in uplink channels + * false - timeout occurred + */ +static bool ipa_mhi_wait_for_ul_empty_timeout(unsigned int msecs) +{ + unsigned long jiffies_timeout = msecs_to_jiffies(msecs); + unsigned long jiffies_start = jiffies; + bool empty = false; + int i; + + IPA_MHI_FUNC_ENTRY(); + while (!empty) { + empty = true; + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + if (!ipa_mhi_client_ctx->ul_channels[i].valid) + continue; + if (ipa_get_transport_type() == + IPA_TRANSPORT_TYPE_GSI) + empty &= ipa_mhi_gsi_channel_empty( + &ipa_mhi_client_ctx->ul_channels[i]); + else + empty &= ipa_mhi_sps_channel_empty( + ipa_mhi_client_ctx->ul_channels[i].client); + } + + if (time_after(jiffies, jiffies_start + jiffies_timeout)) { + IPA_MHI_DBG("finished waiting for UL empty\n"); + break; + } + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI && + IPA_MHI_MAX_UL_CHANNELS == 1) + usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC, + IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC); + } + + IPA_MHI_DBG("IPA UL is %s\n", (empty) ? "empty" : "not empty"); + + IPA_MHI_FUNC_EXIT(); + return empty; +} + +static int ipa_mhi_enable_force_clear(u32 request_id, bool throttle_source) +{ + struct ipa_enable_force_clear_datapath_req_msg_v01 req; + int i; + int res; + + IPA_MHI_FUNC_ENTRY(); + memset(&req, 0, sizeof(req)); + req.request_id = request_id; + req.source_pipe_bitmask = 0; + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + if (!ipa_mhi_client_ctx->ul_channels[i].valid) + continue; + req.source_pipe_bitmask |= 1 << ipa_get_ep_mapping( + ipa_mhi_client_ctx->ul_channels[i].client); + } + if (throttle_source) { + req.throttle_source_valid = 1; + req.throttle_source = 1; + } + IPA_MHI_DBG("req_id=0x%x src_pipe_btmk=0x%x throt_src=%d\n", + req.request_id, req.source_pipe_bitmask, + req.throttle_source); + res = ipa_qmi_enable_force_clear_datapath_send(&req); + if (res) { + IPA_MHI_ERR( + "ipa_qmi_enable_force_clear_datapath_send failed %d\n" + , res); + return res; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static int ipa_mhi_disable_force_clear(u32 request_id) +{ + struct ipa_disable_force_clear_datapath_req_msg_v01 req; + int res; + + IPA_MHI_FUNC_ENTRY(); + memset(&req, 0, sizeof(req)); + req.request_id = request_id; + IPA_MHI_DBG("req_id=0x%x\n", req.request_id); + res = ipa_qmi_disable_force_clear_datapath_send(&req); + if (res) { + IPA_MHI_ERR( + "ipa_qmi_disable_force_clear_datapath_send failed %d\n" + , res); + return res; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static void ipa_mhi_set_holb_on_dl_channels(bool enable, + struct ipa_ep_cfg_holb old_holb[]) +{ + int i; + struct ipa_ep_cfg_holb ep_holb; + int ep_idx; + int res; + + for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) { + if (!ipa_mhi_client_ctx->dl_channels[i].valid) + continue; + if (ipa_mhi_client_ctx->dl_channels[i].state == + IPA_HW_MHI_CHANNEL_STATE_INVALID) + continue; + ep_idx = ipa_get_ep_mapping( + ipa_mhi_client_ctx->dl_channels[i].client); + if (-1 == ep_idx) { + IPA_MHI_ERR("Client %u is not mapped\n", + ipa_mhi_client_ctx->dl_channels[i].client); + ipa_assert(); + return; + } + memset(&ep_holb, 0, sizeof(ep_holb)); + if (enable) { + ipa_get_holb(ep_idx, &old_holb[i]); + ep_holb.en = 1; + ep_holb.tmr_val = 0; + } else { + ep_holb = old_holb[i]; + } + res = ipa_cfg_ep_holb(ep_idx, &ep_holb); + if (res) { + IPA_MHI_ERR("ipa_cfg_ep_holb failed %d\n", res); + ipa_assert(); + return; + } + } +} + +static int ipa_mhi_suspend_gsi_channel(struct ipa_mhi_channel_ctx *channel) +{ + int clnt_hdl; + int res; + + IPA_MHI_FUNC_ENTRY(); + clnt_hdl = ipa_get_ep_mapping(channel->client); + if (clnt_hdl < 0) + return -EFAULT; + + res = ipa_stop_gsi_channel(clnt_hdl); + if (res != 0 && res != -GSI_STATUS_AGAIN && + res != -GSI_STATUS_TIMED_OUT) { + IPA_MHI_ERR("GSI stop channel failed %d\n", res); + return -EFAULT; + } + + /* check if channel was stopped completely */ + if (res) + channel->stop_in_proc = true; + + IPA_MHI_DBG("GSI channel is %s\n", (channel->stop_in_proc) ? + "STOP_IN_PROC" : "STOP"); + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static int ipa_mhi_reset_ul_channel(struct ipa_mhi_channel_ctx *channel) +{ + int res; + bool empty; + struct ipa_ep_cfg_holb old_ep_holb[IPA_MHI_MAX_DL_CHANNELS]; + + IPA_MHI_FUNC_ENTRY(); + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + res = ipa_mhi_suspend_gsi_channel(channel); + if (res) { + IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n", + res); + return res; + } + } else { + res = ipa_uc_mhi_reset_channel(channel->index); + if (res) { + IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n", + res); + return res; + } + } + + empty = ipa_mhi_wait_for_ul_empty_timeout( + IPA_MHI_CH_EMPTY_TIMEOUT_MSEC); + if (!empty) { + IPA_MHI_DBG("%s not empty\n", + (ipa_get_transport_type() == + IPA_TRANSPORT_TYPE_GSI) ? "GSI" : "BAM"); + res = ipa_mhi_enable_force_clear( + ipa_mhi_client_ctx->qmi_req_id, false); + if (res) { + IPA_MHI_ERR("ipa_mhi_enable_force_clear failed %d\n", + res); + ipa_assert(); + return res; + } + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + empty = ipa_mhi_wait_for_ul_empty_timeout( + IPA_MHI_CH_EMPTY_TIMEOUT_MSEC); + + IPA_MHI_DBG("empty=%d\n", empty); + } else { + /* enable packet drop on all DL channels */ + ipa_mhi_set_holb_on_dl_channels(true, old_ep_holb); + ipa_generate_tag_process(); + /* disable packet drop on all DL channels */ + ipa_mhi_set_holb_on_dl_channels(false, old_ep_holb); + + res = ipa_disable_sps_pipe(channel->client); + if (res) { + IPA_MHI_ERR("sps_pipe_disable fail %d\n", res); + ipa_assert(); + return res; + } + } + + res = + ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id); + if (res) { + IPA_MHI_ERR("ipa_mhi_disable_force_clear failed %d\n", + res); + ipa_assert(); + return res; + } + ipa_mhi_client_ctx->qmi_req_id++; + } + + res = ipa_mhi_reset_channel_internal(channel->client); + if (res) { + IPA_MHI_ERR("ipa_mhi_reset_ul_channel_internal failed %d\n" + , res); + return res; + } + + IPA_MHI_FUNC_EXIT(); + + return 0; +} + +static int ipa_mhi_reset_dl_channel(struct ipa_mhi_channel_ctx *channel) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + res = ipa_mhi_suspend_gsi_channel(channel); + if (res) { + IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n" + , res); + return res; + } + + res = ipa_mhi_reset_channel_internal(channel->client); + if (res) { + IPA_MHI_ERR( + "ipa_mhi_reset_ul_channel_internal failed %d\n" + , res); + return res; + } + } else { + res = ipa_mhi_reset_channel_internal(channel->client); + if (res) { + IPA_MHI_ERR( + "ipa_mhi_reset_ul_channel_internal failed %d\n" + , res); + return res; + } + + res = ipa_uc_mhi_reset_channel(channel->index); + if (res) { + IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n", + res); + ipa_mhi_start_channel_internal(channel->client); + return res; + } + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static int ipa_mhi_reset_channel(struct ipa_mhi_channel_ctx *channel) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + if (IPA_CLIENT_IS_PROD(channel->client)) + res = ipa_mhi_reset_ul_channel(channel); + else + res = ipa_mhi_reset_dl_channel(channel); + if (res) { + IPA_MHI_ERR("failed to reset channel error %d\n", res); + return res; + } + + channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE; + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, + &channel->state, channel->channel_context_addr + + offsetof(struct ipa_mhi_ch_ctx, chstate), + sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res); + return res; + } + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +/** + * ipa_mhi_connect_pipe() - Connect pipe to IPA and start corresponding + * MHI channel + * @in: connect parameters + * @clnt_hdl: [out] client handle for this pipe + * + * This function is called by MHI client driver on MHI channel start. + * This function is called after MHI engine was started. + * + * Return codes: 0 : success + * negative : error + */ +int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl) +{ + int res; + unsigned long flags; + struct ipa_mhi_channel_ctx *channel = NULL; + + IPA_MHI_FUNC_ENTRY(); + + if (!in || !clnt_hdl) { + IPA_MHI_ERR("NULL args\n"); + return -EINVAL; + } + + if (in->sys.client >= IPA_CLIENT_MAX) { + IPA_MHI_ERR("bad param client:%d\n", in->sys.client); + return -EINVAL; + } + + if (!IPA_CLIENT_IS_MHI(in->sys.client)) { + IPA_MHI_ERR( + "Invalid MHI client, client: %d\n", in->sys.client); + return -EINVAL; + } + + IPA_MHI_DBG("channel=%d\n", in->channel_id); + + spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags); + if (!ipa_mhi_client_ctx || + ipa_mhi_client_ctx->state != IPA_MHI_STATE_STARTED) { + IPA_MHI_ERR("IPA MHI was not started\n"); + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + + channel = ipa_mhi_get_channel_context(in->sys.client, in->channel_id); + if (!channel) { + IPA_MHI_ERR("ipa_mhi_get_channel_context failed\n"); + return -EINVAL; + } + + if (channel->state != IPA_HW_MHI_CHANNEL_STATE_INVALID && + channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) { + IPA_MHI_ERR("Invalid channel state %d\n", channel->state); + return -EFAULT; + } + + channel->channel_context_addr = + ipa_mhi_client_ctx->channel_context_array_addr + + channel->id * sizeof(struct ipa_mhi_ch_ctx); + + /* for event context address index needs to read from host */ + + IPA_MHI_DBG("client %d channelIndex %d channelID %d, state %d\n", + channel->client, channel->index, channel->id, channel->state); + IPA_MHI_DBG("channel_context_addr 0x%llx cached_gsi_evt_ring_hdl %lu\n", + channel->channel_context_addr, + channel->cached_gsi_evt_ring_hdl); + + IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client); + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + struct ipa_mhi_connect_params_internal internal; + + IPA_MHI_DBG("reading ch/ev context from host\n"); + res = ipa_mhi_read_ch_ctx(channel); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res); + goto fail_start_channel; + } + + internal.channel_id = in->channel_id; + internal.sys = &in->sys; + internal.start.gsi.state = channel->state; + internal.start.gsi.msi = &ipa_mhi_client_ctx->msi; + internal.start.gsi.ev_ctx_host = &channel->ev_ctx_host; + internal.start.gsi.event_context_addr = + channel->event_context_addr; + internal.start.gsi.ch_ctx_host = &channel->ch_ctx_host; + internal.start.gsi.channel_context_addr = + channel->channel_context_addr; + internal.start.gsi.ch_err_cb = ipa_mhi_gsi_ch_err_cb; + internal.start.gsi.channel = (void *)channel; + internal.start.gsi.ev_err_cb = ipa_mhi_gsi_ev_err_cb; + internal.start.gsi.assert_bit40 = + ipa_mhi_client_ctx->assert_bit40; + internal.start.gsi.mhi = &channel->ch_scratch.mhi; + internal.start.gsi.cached_gsi_evt_ring_hdl = + &channel->cached_gsi_evt_ring_hdl; + internal.start.gsi.evchid = + channel->index + IPA_MHI_GSI_ER_START; + + res = ipa_connect_mhi_pipe(&internal, clnt_hdl); + if (res) { + IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res); + goto fail_connect_pipe; + } + channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN; + channel->brstmode_enabled = + channel->ch_scratch.mhi.burst_mode_enabled; + + res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, + &channel->state, channel->channel_context_addr + + offsetof(struct ipa_mhi_ch_ctx, chstate), + sizeof(channel->state)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed\n"); + return res; + + } + } else { + struct ipa_mhi_connect_params_internal internal; + + internal.channel_id = in->channel_id; + internal.sys = &in->sys; + internal.start.uC.index = channel->index; + internal.start.uC.id = channel->id; + internal.start.uC.state = channel->state; + res = ipa_connect_mhi_pipe(&internal, clnt_hdl); + if (res) { + IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res); + goto fail_connect_pipe; + } + channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN; + } + + if (!in->sys.keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); + + IPA_MHI_FUNC_EXIT(); + + return 0; +fail_connect_pipe: + ipa_mhi_reset_channel(channel); +fail_start_channel: + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); + return -EPERM; +} + +/** + * ipa_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding + * MHI channel + * @clnt_hdl: client handle for this pipe + * + * This function is called by MHI client driver on MHI channel reset. + * This function is called after MHI channel was started. + * This function is doing the following: + * - Send command to uC/GSI to reset corresponding MHI channel + * - Configure IPA EP control + * + * Return codes: 0 : success + * negative : error + */ +int ipa_mhi_disconnect_pipe(u32 clnt_hdl) +{ + int res; + enum ipa_client_type client; + static struct ipa_mhi_channel_ctx *channel; + + IPA_MHI_FUNC_ENTRY(); + + if (!ipa_mhi_client_ctx) { + IPA_MHI_ERR("IPA MHI was not initialized\n"); + return -EINVAL; + } + + client = ipa_get_client_mapping(clnt_hdl); + + if (!IPA_CLIENT_IS_MHI(client)) { + IPA_MHI_ERR("invalid IPA MHI client, client: %d\n", client); + return -EINVAL; + } + + channel = ipa_mhi_get_channel_context_by_clnt_hdl(clnt_hdl); + if (!channel) { + IPA_MHI_ERR("invalid clnt index\n"); + return -EINVAL; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa_get_client_mapping(clnt_hdl)); + + res = ipa_mhi_reset_channel(channel); + if (res) { + IPA_MHI_ERR("ipa_mhi_reset_channel failed %d\n", res); + goto fail_reset_channel; + } + + res = ipa_disconnect_mhi_pipe(clnt_hdl); + if (res) { + IPA_MHI_ERR( + "IPA core driver failed to disconnect the pipe hdl %d, res %d" + , clnt_hdl, res); + return res; + } + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl)); + + IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl); + IPA_MHI_FUNC_EXIT(); + return 0; +fail_reset_channel: + IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl)); + return res; +} + +static int ipa_mhi_wait_for_cons_release(void) +{ + unsigned long flags; + int res; + + IPA_MHI_FUNC_ENTRY(); + reinit_completion(&ipa_mhi_client_ctx->rm_cons_comp); + spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags); + if (ipa_mhi_client_ctx->rm_cons_state != IPA_MHI_RM_STATE_GRANTED) { + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + return 0; + } + spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags); + + res = wait_for_completion_timeout( + &ipa_mhi_client_ctx->rm_cons_comp, + msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC)); + if (res == 0) { + IPA_MHI_ERR("timeout release mhi cons\n"); + return -ETIME; + } + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static int ipa_mhi_suspend_channels(struct ipa_mhi_channel_ctx *channels) +{ + int i; + int res; + + IPA_MHI_FUNC_ENTRY(); + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + if (!channels[i].valid) + continue; + if (channels[i].state != + IPA_HW_MHI_CHANNEL_STATE_RUN) + continue; + IPA_MHI_DBG("suspending channel %d\n", + channels[i].id); + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) + res = ipa_mhi_suspend_gsi_channel( + &channels[i]); + else + res = ipa_uc_mhi_suspend_channel( + channels[i].index); + + if (res) { + IPA_MHI_ERR("failed to suspend channel %d error %d\n", + i, res); + return res; + } + channels[i].state = + IPA_HW_MHI_CHANNEL_STATE_SUSPEND; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static int ipa_mhi_stop_event_update_channels( + struct ipa_mhi_channel_ctx *channels) +{ + int i; + int res; + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) + return 0; + + IPA_MHI_FUNC_ENTRY(); + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + if (!channels[i].valid) + continue; + if (channels[i].state != + IPA_HW_MHI_CHANNEL_STATE_SUSPEND) + continue; + IPA_MHI_DBG("stop update event channel %d\n", + channels[i].id); + res = ipa_uc_mhi_stop_event_update_channel( + channels[i].index); + if (res) { + IPA_MHI_ERR("failed stop event channel %d error %d\n", + i, res); + return res; + } + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static bool ipa_mhi_check_pending_packets_from_host(void) +{ + int i; + int res; + struct ipa_mhi_channel_ctx *channel; + + IPA_MHI_FUNC_ENTRY(); + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->ul_channels[i]; + if (!channel->valid) + continue; + + res = ipa_mhi_query_ch_info(channel->client, + &channel->ch_info); + if (res) { + IPA_MHI_ERR("gsi_query_channel_info failed\n"); + return true; + } + res = ipa_mhi_read_ch_ctx(channel); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res); + return true; + } + + if (channel->ch_info.rp != channel->ch_ctx_host.wp) { + IPA_MHI_DBG("There are pending packets from host\n"); + IPA_MHI_DBG("device rp 0x%llx host 0x%llx\n", + channel->ch_info.rp, channel->ch_ctx_host.wp); + + return true; + } + } + + IPA_MHI_FUNC_EXIT(); + return false; +} + +static int ipa_mhi_resume_channels(bool LPTransitionRejected, + struct ipa_mhi_channel_ctx *channels) +{ + int i; + int res; + struct ipa_mhi_channel_ctx *channel; + + IPA_MHI_FUNC_ENTRY(); + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + if (!channels[i].valid) + continue; + if (channels[i].state != + IPA_HW_MHI_CHANNEL_STATE_SUSPEND) + continue; + channel = &channels[i]; + IPA_MHI_DBG("resuming channel %d\n", channel->id); + + res = ipa_mhi_resume_channels_internal(channel->client, + LPTransitionRejected, channel->brstmode_enabled, + channel->ch_scratch, channel->index); + + if (res) { + IPA_MHI_ERR("failed to resume channel %d error %d\n", + i, res); + return res; + } + + channel->stop_in_proc = false; + channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +/** + * ipa_mhi_suspend_ul() - Suspend MHI accelerated up link channels + * @force: + * false: in case of data pending in IPA, MHI channels will not be + * suspended and function will fail. + * true: in case of data pending in IPA, make sure no further access from + * IPA to PCIe is possible. In this case suspend cannot fail. + * + * + * This function is called by MHI client driver on MHI suspend. + * This function is called after MHI channel was started. + * When this function returns device can move to M1/M2/M3/D3cold state. + * + * Return codes: 0 : success + * negative : error + */ +static int ipa_mhi_suspend_ul(bool force, bool *empty, bool *force_clear) +{ + int res; + + *force_clear = false; + + res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels); + if (res) { + IPA_MHI_ERR("ipa_mhi_suspend_ul_channels failed %d\n", res); + goto fail_suspend_ul_channel; + } + + *empty = ipa_mhi_wait_for_ul_empty_timeout( + IPA_MHI_CH_EMPTY_TIMEOUT_MSEC); + + if (!*empty) { + if (force) { + res = ipa_mhi_enable_force_clear( + ipa_mhi_client_ctx->qmi_req_id, false); + if (res) { + IPA_MHI_ERR("failed to enable force clear\n"); + ipa_assert(); + return res; + } + *force_clear = true; + IPA_MHI_DBG("force clear datapath enabled\n"); + + *empty = ipa_mhi_wait_for_ul_empty_timeout( + IPA_MHI_CH_EMPTY_TIMEOUT_MSEC); + IPA_MHI_DBG("empty=%d\n", *empty); + if (!*empty && ipa_get_transport_type() + == IPA_TRANSPORT_TYPE_GSI) { + IPA_MHI_ERR("Failed to suspend UL channels\n"); + if (ipa_mhi_client_ctx->test_mode) { + res = -EAGAIN; + goto fail_suspend_ul_channel; + } + + ipa_assert(); + } + } else { + IPA_MHI_DBG("IPA not empty\n"); + res = -EAGAIN; + goto fail_suspend_ul_channel; + } + } + + if (*force_clear) { + res = + ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id); + if (res) { + IPA_MHI_ERR("failed to disable force clear\n"); + ipa_assert(); + return res; + } + IPA_MHI_DBG("force clear datapath disabled\n"); + ipa_mhi_client_ctx->qmi_req_id++; + } + + if (!force && ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + if (ipa_mhi_check_pending_packets_from_host()) { + res = -EAGAIN; + goto fail_suspend_ul_channel; + } + } + + res = ipa_mhi_stop_event_update_channels( + ipa_mhi_client_ctx->ul_channels); + if (res) { + IPA_MHI_ERR( + "ipa_mhi_stop_event_update_ul_channels failed %d\n", + res); + goto fail_suspend_ul_channel; + } + + return 0; + +fail_suspend_ul_channel: + return res; +} + +static bool ipa_mhi_has_open_aggr_frame(void) +{ + struct ipa_mhi_channel_ctx *channel; + int i; + + for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->dl_channels[i]; + + if (!channel->valid) + continue; + + if (ipa_has_open_aggr_frame(channel->client)) + return true; + } + + return false; +} + +static void ipa_mhi_update_host_ch_state(bool update_rp) +{ + int i; + int res; + struct ipa_mhi_channel_ctx *channel; + + for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->ul_channels[i]; + if (!channel->valid) + continue; + + if (update_rp) { + res = ipa_mhi_query_ch_info(channel->client, + &channel->ch_info); + if (res) { + IPA_MHI_ERR("gsi_query_channel_info failed\n"); + ipa_assert(); + return; + } + + res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, + &channel->ch_info.rp, + channel->channel_context_addr + + offsetof(struct ipa_mhi_ch_ctx, rp), + sizeof(channel->ch_info.rp)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed\n"); + ipa_assert(); + return; + } + } + + res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, + &channel->state, channel->channel_context_addr + + offsetof(struct ipa_mhi_ch_ctx, chstate), + sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed\n"); + ipa_assert(); + return; + } + } + + for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) { + channel = &ipa_mhi_client_ctx->dl_channels[i]; + if (!channel->valid) + continue; + + if (update_rp) { + res = ipa_mhi_query_ch_info(channel->client, + &channel->ch_info); + if (res) { + IPA_MHI_ERR("gsi_query_channel_info failed\n"); + ipa_assert(); + return; + } + + res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, + &channel->ch_info.rp, + channel->channel_context_addr + + offsetof(struct ipa_mhi_ch_ctx, rp), + sizeof(channel->ch_info.rp)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed\n"); + ipa_assert(); + return; + } + } + + res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST, + &channel->state, channel->channel_context_addr + + offsetof(struct ipa_mhi_ch_ctx, chstate), + sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate)); + if (res) { + IPA_MHI_ERR("ipa_mhi_read_write_host failed\n"); + ipa_assert(); + } + } +} + +static int ipa_mhi_suspend_dl(bool force) +{ + int res; + + res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels); + if (res) { + IPA_MHI_ERR( + "ipa_mhi_suspend_channels for dl failed %d\n", res); + goto fail_suspend_dl_channel; + } + + res = ipa_mhi_stop_event_update_channels + (ipa_mhi_client_ctx->dl_channels); + if (res) { + IPA_MHI_ERR("failed to stop event update on DL %d\n", res); + goto fail_stop_event_update_dl_channel; + } + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + if (ipa_mhi_has_open_aggr_frame()) { + IPA_MHI_DBG("There is an open aggr frame\n"); + if (force) { + ipa_mhi_client_ctx->trigger_wakeup = true; + } else { + res = -EAGAIN; + goto fail_stop_event_update_dl_channel; + } + } + } + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) + ipa_mhi_update_host_ch_state(true); + + return 0; + +fail_stop_event_update_dl_channel: + ipa_mhi_resume_channels(true, + ipa_mhi_client_ctx->dl_channels); +fail_suspend_dl_channel: + return res; +} + +/** + * ipa_mhi_suspend() - Suspend MHI accelerated channels + * @force: + * false: in case of data pending in IPA, MHI channels will not be + * suspended and function will fail. + * true: in case of data pending in IPA, make sure no further access from + * IPA to PCIe is possible. In this case suspend cannot fail. + * + * This function is called by MHI client driver on MHI suspend. + * This function is called after MHI channel was started. + * When this function returns device can move to M1/M2/M3/D3cold state. + * + * Return codes: 0 : success + * negative : error + */ +int ipa_mhi_suspend(bool force) +{ + int res; + bool empty; + bool force_clear; + + IPA_MHI_FUNC_ENTRY(); + + res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPEND_IN_PROGRESS); + if (res) { + IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res); + return res; + } + res = ipa_mhi_suspend_ul(force, &empty, &force_clear); + if (res) { + IPA_MHI_ERR("ipa_mhi_suspend_ul failed %d\n", res); + goto fail_suspend_ul_channel; + } + + /* + * hold IPA clocks and release them after all + * IPA RM resource are released to make sure tag process will not start + */ + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + IPA_MHI_DBG("release prod\n"); + res = ipa_mhi_release_prod(); + if (res) { + IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res); + goto fail_release_prod; + } + + IPA_MHI_DBG("wait for cons release\n"); + res = ipa_mhi_wait_for_cons_release(); + if (res) { + IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed %d\n", res); + goto fail_release_cons; + } + + usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX); + + res = ipa_mhi_suspend_dl(force); + if (res) { + IPA_MHI_ERR("ipa_mhi_suspend_dl failed %d\n", res); + goto fail_suspend_dl_channel; + } + + if (!empty) + ipa_set_tag_process_before_gating(false); + + res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED); + if (res) { + IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res); + goto fail_release_cons; + } + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_suspend_dl_channel: +fail_release_cons: + ipa_mhi_request_prod(); +fail_release_prod: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +fail_suspend_ul_channel: + ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->ul_channels); + ipa_mhi_set_state(IPA_MHI_STATE_STARTED); + if (force_clear) { + if ( + ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id)) { + IPA_MHI_ERR("failed to disable force clear\n"); + ipa_assert(); + } + IPA_MHI_DBG("force clear datapath disabled\n"); + ipa_mhi_client_ctx->qmi_req_id++; + } + return res; +} + +/** + * ipa_mhi_resume() - Resume MHI accelerated channels + * + * This function is called by MHI client driver on MHI resume. + * This function is called after MHI channel was suspended. + * When this function returns device can move to M0 state. + * This function is doing the following: + * - Send command to uC/GSI to resume corresponding MHI channel + * - Request MHI_PROD in IPA RM + * - Resume data to IPA + * + * Return codes: 0 : success + * negative : error + */ +int ipa_mhi_resume(void) +{ + int res; + bool dl_channel_resumed = false; + + IPA_MHI_FUNC_ENTRY(); + + res = ipa_mhi_set_state(IPA_MHI_STATE_RESUME_IN_PROGRESS); + if (res) { + IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res); + return res; + } + + if (ipa_mhi_client_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) { + /* resume all DL channels */ + res = ipa_mhi_resume_channels(false, + ipa_mhi_client_ctx->dl_channels); + if (res) { + IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n", + res); + goto fail_resume_dl_channels; + } + dl_channel_resumed = true; + + ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED, + IPA_RM_RESOURCE_MHI_CONS); + ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED; + } + + res = ipa_mhi_request_prod(); + if (res) { + IPA_MHI_ERR("ipa_mhi_request_prod failed %d\n", res); + goto fail_request_prod; + } + + /* resume all UL channels */ + res = ipa_mhi_resume_channels(false, + ipa_mhi_client_ctx->ul_channels); + if (res) { + IPA_MHI_ERR("ipa_mhi_resume_ul_channels failed %d\n", res); + goto fail_resume_ul_channels; + } + + if (!dl_channel_resumed) { + res = ipa_mhi_resume_channels(false, + ipa_mhi_client_ctx->dl_channels); + if (res) { + IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n", + res); + goto fail_resume_dl_channels2; + } + } + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) + ipa_mhi_update_host_ch_state(false); + + res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED); + if (res) { + IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res); + goto fail_set_state; + } + + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_set_state: + ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels); +fail_resume_dl_channels2: + ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels); +fail_resume_ul_channels: + ipa_mhi_release_prod(); +fail_request_prod: + ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels); +fail_resume_dl_channels: + ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED); + return res; +} + + +static int ipa_mhi_destroy_channels(struct ipa_mhi_channel_ctx *channels, + int num_of_channels) +{ + struct ipa_mhi_channel_ctx *channel; + int i, res; + u32 clnt_hdl; + + for (i = 0; i < num_of_channels; i++) { + channel = &channels[i]; + if (!channel->valid) + continue; + if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) + continue; + if (channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) { + clnt_hdl = ipa_get_ep_mapping(channel->client); + IPA_MHI_DBG("disconnect pipe (ep: %d)\n", clnt_hdl); + res = ipa_mhi_disconnect_pipe(clnt_hdl); + if (res) { + IPA_MHI_ERR( + "failed to disconnect pipe %d, err %d\n" + , clnt_hdl, res); + goto fail; + } + } + res = ipa_mhi_destroy_channel(channel->client); + if (res) { + IPA_MHI_ERR( + "ipa_mhi_destroy_channel failed %d" + , res); + goto fail; + } + } + return 0; +fail: + return res; +} + +/** + * ipa_mhi_destroy_all_channels() - Destroy MHI IPA channels + * + * This function is called by IPA MHI client driver on MHI reset to destroy all + * IPA MHI channels. + */ +int ipa_mhi_destroy_all_channels(void) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + /* reset all UL and DL acc channels and its accociated event rings */ + res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->ul_channels, + IPA_MHI_MAX_UL_CHANNELS); + if (res) { + IPA_MHI_ERR("ipa_mhi_destroy_channels(ul_channels) failed %d\n", + res); + return -EPERM; + } + IPA_MHI_DBG("All UL channels are disconnected\n"); + + res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->dl_channels, + IPA_MHI_MAX_DL_CHANNELS); + if (res) { + IPA_MHI_ERR("ipa_mhi_destroy_channels(dl_channels) failed %d\n", + res); + return -EPERM; + } + IPA_MHI_DBG("All DL channels are disconnected\n"); + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +static void ipa_mhi_debugfs_destroy(void) +{ + debugfs_remove_recursive(dent); +} + +/** + * ipa_mhi_destroy() - Destroy MHI IPA + * + * This function is called by MHI client driver on MHI reset to destroy all IPA + * MHI resources. + * When this function returns ipa_mhi can re-initialize. + */ +void ipa_mhi_destroy(void) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + if (!ipa_mhi_client_ctx) { + IPA_MHI_DBG("IPA MHI was not initialized, already destroyed\n"); + return; + } + /* reset all UL and DL acc channels and its accociated event rings */ + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) { + res = ipa_mhi_destroy_all_channels(); + if (res) { + IPA_MHI_ERR("ipa_mhi_destroy_all_channels failed %d\n", + res); + goto fail; + } + } + IPA_MHI_DBG("All channels are disconnected\n"); + + if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_SPS) { + IPA_MHI_DBG("cleanup uC MHI\n"); + ipa_uc_mhi_cleanup(); + } + + + if (ipa_mhi_client_ctx->state != IPA_MHI_STATE_INITIALIZED && + ipa_mhi_client_ctx->state != IPA_MHI_STATE_READY) { + IPA_MHI_DBG("release prod\n"); + res = ipa_mhi_release_prod(); + if (res) { + IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res); + goto fail; + } + IPA_MHI_DBG("wait for cons release\n"); + res = ipa_mhi_wait_for_cons_release(); + if (res) { + IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed %d\n", + res); + goto fail; + } + usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, + IPA_MHI_SUSPEND_SLEEP_MAX); + + IPA_MHI_DBG("deleate dependency Q6_PROD->MHI_CONS\n"); + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_MHI_CONS); + if (res) { + IPA_MHI_ERR( + "Error deleting dependency %d->%d, res=%d\n" + , IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_MHI_CONS, + res); + goto fail; + } + IPA_MHI_DBG("deleate dependency MHI_PROD->Q6_CONS\n"); + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res) { + IPA_MHI_ERR( + "Error deleting dependency %d->%d, res=%d\n", + IPA_RM_RESOURCE_MHI_PROD, + IPA_RM_RESOURCE_Q6_CONS, + res); + goto fail; + } + } + + res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD); + if (res) { + IPA_MHI_ERR("Error deleting resource %d, res=%d\n", + IPA_RM_RESOURCE_MHI_PROD, res); + goto fail; + } + + res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS); + if (res) { + IPA_MHI_ERR("Error deleting resource %d, res=%d\n", + IPA_RM_RESOURCE_MHI_CONS, res); + goto fail; + } + + ipa_mhi_debugfs_destroy(); + destroy_workqueue(ipa_mhi_client_ctx->wq); + kfree(ipa_mhi_client_ctx); + ipa_mhi_client_ctx = NULL; + IPA_MHI_DBG("IPA MHI was reset, ready for re-init\n"); + + IPA_MHI_FUNC_EXIT(); + return; +fail: + ipa_assert(); +} + +/** + * ipa_mhi_init() - Initialize IPA MHI driver + * @params: initialization params + * + * This function is called by MHI client driver on boot to initialize IPA MHI + * Driver. When this function returns device can move to READY state. + * This function is doing the following: + * - Initialize MHI IPA internal data structures + * - Create IPA RM resources + * - Initialize debugfs + * + * Return codes: 0 : success + * negative : error + */ +int ipa_mhi_init(struct ipa_mhi_init_params *params) +{ + int res; + struct ipa_rm_create_params mhi_prod_params; + struct ipa_rm_create_params mhi_cons_params; + struct ipa_rm_perf_profile profile; + + IPA_MHI_FUNC_ENTRY(); + + if (!params) { + IPA_MHI_ERR("null args\n"); + return -EINVAL; + } + + if (!params->notify) { + IPA_MHI_ERR("null notify function\n"); + return -EINVAL; + } + + if (ipa_mhi_client_ctx) { + IPA_MHI_ERR("already initialized\n"); + return -EPERM; + } + + IPA_MHI_DBG("notify = %pF priv = %pK\n", params->notify, params->priv); + IPA_MHI_DBG("msi: addr_lo = 0x%x addr_hi = 0x%x\n", + params->msi.addr_low, params->msi.addr_hi); + IPA_MHI_DBG("msi: data = 0x%x mask = 0x%x\n", + params->msi.data, params->msi.mask); + IPA_MHI_DBG("mmio_addr = 0x%x\n", params->mmio_addr); + IPA_MHI_DBG("first_ch_idx = 0x%x\n", params->first_ch_idx); + IPA_MHI_DBG("first_er_idx = 0x%x\n", params->first_er_idx); + IPA_MHI_DBG("assert_bit40=%d\n", params->assert_bit40); + IPA_MHI_DBG("test_mode=%d\n", params->test_mode); + + /* Initialize context */ + ipa_mhi_client_ctx = kzalloc(sizeof(*ipa_mhi_client_ctx), GFP_KERNEL); + if (!ipa_mhi_client_ctx) { + res = -EFAULT; + goto fail_alloc_ctx; + } + + ipa_mhi_client_ctx->state = IPA_MHI_STATE_INITIALIZED; + ipa_mhi_client_ctx->cb_notify = params->notify; + ipa_mhi_client_ctx->cb_priv = params->priv; + ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED; + init_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp); + spin_lock_init(&ipa_mhi_client_ctx->state_lock); + init_completion(&ipa_mhi_client_ctx->rm_cons_comp); + ipa_mhi_client_ctx->msi = params->msi; + ipa_mhi_client_ctx->mmio_addr = params->mmio_addr; + ipa_mhi_client_ctx->first_ch_idx = params->first_ch_idx; + ipa_mhi_client_ctx->first_er_idx = params->first_er_idx; + ipa_mhi_client_ctx->qmi_req_id = 0; + ipa_mhi_client_ctx->use_ipadma = true; + ipa_mhi_client_ctx->assert_bit40 = !!params->assert_bit40; + ipa_mhi_client_ctx->test_mode = params->test_mode; + + ipa_mhi_client_ctx->wq = create_singlethread_workqueue("ipa_mhi_wq"); + if (!ipa_mhi_client_ctx->wq) { + IPA_MHI_ERR("failed to create workqueue\n"); + res = -EFAULT; + goto fail_create_wq; + } + + /* Create PROD in IPA RM */ + memset(&mhi_prod_params, 0, sizeof(mhi_prod_params)); + mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD; + mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS; + mhi_prod_params.reg_params.notify_cb = ipa_mhi_rm_prod_notify; + res = ipa_rm_create_resource(&mhi_prod_params); + if (res) { + IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n"); + goto fail_create_rm_prod; + } + + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = 1000; + res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_PROD, &profile); + if (res) { + IPA_MHI_ERR("fail to set profile to MHI_PROD\n"); + goto fail_perf_rm_prod; + } + + /* Create CONS in IPA RM */ + memset(&mhi_cons_params, 0, sizeof(mhi_cons_params)); + mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS; + mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS; + mhi_cons_params.request_resource = ipa_mhi_rm_cons_request; + mhi_cons_params.release_resource = ipa_mhi_rm_cons_release; + res = ipa_rm_create_resource(&mhi_cons_params); + if (res) { + IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n"); + goto fail_create_rm_cons; + } + + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = 1000; + res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_CONS, &profile); + if (res) { + IPA_MHI_ERR("fail to set profile to MHI_CONS\n"); + goto fail_perf_rm_cons; + } + + /* Initialize uC interface */ + ipa_uc_mhi_init(ipa_mhi_uc_ready_cb, + ipa_mhi_uc_wakeup_request_cb); + if (ipa_uc_state_check() == 0) + ipa_mhi_set_state(IPA_MHI_STATE_READY); + + /* Initialize debugfs */ + ipa_mhi_debugfs_init(); + + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_perf_rm_cons: + ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS); +fail_create_rm_cons: +fail_perf_rm_prod: + ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD); +fail_create_rm_prod: + destroy_workqueue(ipa_mhi_client_ctx->wq); +fail_create_wq: + kfree(ipa_mhi_client_ctx); + ipa_mhi_client_ctx = NULL; +fail_alloc_ctx: + return res; +} + +static void ipa_mhi_cache_dl_ul_sync_info( + struct ipa_config_req_msg_v01 *config_req) +{ + ipa_cached_dl_ul_sync_info.params.isDlUlSyncEnabled = true; + ipa_cached_dl_ul_sync_info.params.UlAccmVal = + (config_req->ul_accumulation_time_limit_valid) ? + config_req->ul_accumulation_time_limit : 0; + ipa_cached_dl_ul_sync_info.params.ulMsiEventThreshold = + (config_req->ul_msi_event_threshold_valid) ? + config_req->ul_msi_event_threshold : 0; + ipa_cached_dl_ul_sync_info.params.dlMsiEventThreshold = + (config_req->dl_msi_event_threshold_valid) ? + config_req->dl_msi_event_threshold : 0; +} + +/** + * ipa_mhi_handle_ipa_config_req() - hanle IPA CONFIG QMI message + * + * This function is called by by IPA QMI service to indicate that IPA CONFIG + * message was sent from modem. IPA MHI will update this information to IPA uC + * or will cache it until IPA MHI will be initialized. + * + * Return codes: 0 : success + * negative : error + */ +int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req) +{ + IPA_MHI_FUNC_ENTRY(); + + if (ipa_get_transport_type() != IPA_TRANSPORT_TYPE_GSI) { + ipa_mhi_cache_dl_ul_sync_info(config_req); + if (ipa_mhi_client_ctx && + ipa_mhi_client_ctx->state != + IPA_MHI_STATE_INITIALIZED) + ipa_uc_mhi_send_dl_ul_sync_info( + &ipa_cached_dl_ul_sync_info); + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +int ipa_mhi_is_using_dma(bool *flag) +{ + IPA_MHI_FUNC_ENTRY(); + + if (!ipa_mhi_client_ctx) { + IPA_MHI_ERR("not initialized\n"); + return -EPERM; + } + + *flag = ipa_mhi_client_ctx->use_ipadma ? true : false; + + IPA_MHI_FUNC_EXIT(); + return 0; +} +EXPORT_SYMBOL(ipa_mhi_is_using_dma); + +const char *ipa_mhi_get_state_str(int state) +{ + return MHI_STATE_STR(state); +} +EXPORT_SYMBOL(ipa_mhi_get_state_str); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA MHI client driver"); diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c new file mode 100644 index 000000000000..7fdaceccf658 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c @@ -0,0 +1,658 @@ +/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include "../ipa_common_i.h" + +#define IPA_NTN_DMA_POOL_ALIGNMENT 8 +#define OFFLOAD_DRV_NAME "ipa_uc_offload" +#define IPA_UC_OFFLOAD_DBG(fmt, args...) \ + do { \ + pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_UC_OFFLOAD_LOW(fmt, args...) \ + do { \ + pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_UC_OFFLOAD_ERR(fmt, args...) \ + do { \ + pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_UC_OFFLOAD_INFO(fmt, args...) \ + do { \ + pr_info(OFFLOAD_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +enum ipa_uc_offload_state { + IPA_UC_OFFLOAD_STATE_INVALID, + IPA_UC_OFFLOAD_STATE_INITIALIZED, + IPA_UC_OFFLOAD_STATE_UP, +}; + +struct ipa_uc_offload_ctx { + enum ipa_uc_offload_proto proto; + enum ipa_uc_offload_state state; + void *priv; + u8 hdr_len; + u32 partial_hdr_hdl[IPA_IP_MAX]; + char netdev_name[IPA_RESOURCE_NAME_MAX]; + ipa_notify_cb notify; + struct completion ntn_completion; +}; + +static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE]; + +static int ipa_uc_ntn_cons_release(void); +static int ipa_uc_ntn_cons_request(void); +static void ipa_uc_offload_rm_notify(void *, enum ipa_rm_event, unsigned long); + +static int ipa_commit_partial_hdr( + struct ipa_ioc_add_hdr *hdr, + const char *netdev_name, + struct ipa_hdr_info *hdr_info) +{ + int i; + + if (hdr == NULL || hdr_info == NULL) { + IPA_UC_OFFLOAD_ERR("Invalid input\n"); + return -EINVAL; + } + + hdr->commit = 1; + hdr->num_hdrs = 2; + + snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name), + "%s_ipv4", netdev_name); + snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name), + "%s_ipv6", netdev_name); + for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) { + hdr->hdr[i].hdr_len = hdr_info[i].hdr_len; + memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len); + hdr->hdr[i].type = hdr_info[i].hdr_type; + hdr->hdr[i].is_partial = 1; + hdr->hdr[i].is_eth2_ofst_valid = 1; + hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset; + } + + if (ipa_add_hdr(hdr)) { + IPA_UC_OFFLOAD_ERR("fail to add partial headers\n"); + return -EFAULT; + } + + return 0; +} + +static int ipa_uc_offload_ntn_reg_intf( + struct ipa_uc_offload_intf_params *inp, + struct ipa_uc_offload_out_params *outp, + struct ipa_uc_offload_ctx *ntn_ctx) +{ + struct ipa_ioc_add_hdr *hdr = NULL; + struct ipa_tx_intf tx; + struct ipa_rx_intf rx; + struct ipa_ioc_tx_intf_prop tx_prop[2]; + struct ipa_ioc_rx_intf_prop rx_prop[2]; + struct ipa_rm_create_params param; + u32 len; + int ret = 0; + + IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n", + inp->netdev_name); + memset(¶m, 0, sizeof(param)); + param.name = IPA_RM_RESOURCE_ETHERNET_PROD; + param.reg_params.user_data = ntn_ctx; + param.reg_params.notify_cb = ipa_uc_offload_rm_notify; + param.floor_voltage = IPA_VOLTAGE_SVS; + ret = ipa_rm_create_resource(¶m); + if (ret) { + IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_PROD resource\n"); + return -EFAULT; + } + + memset(¶m, 0, sizeof(param)); + param.name = IPA_RM_RESOURCE_ETHERNET_CONS; + param.request_resource = ipa_uc_ntn_cons_request; + param.release_resource = ipa_uc_ntn_cons_release; + ret = ipa_rm_create_resource(¶m); + if (ret) { + IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_CONS resource\n"); + goto fail_create_rm_cons; + } + + memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX); + ntn_ctx->hdr_len = inp->hdr_info[0].hdr_len; + ntn_ctx->notify = inp->notify; + ntn_ctx->priv = inp->priv; + + /* add partial header */ + len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add); + hdr = kzalloc(len, GFP_KERNEL); + if (hdr == NULL) { + ret = -ENOMEM; + goto fail_alloc; + } + + if (ipa_commit_partial_hdr(hdr, ntn_ctx->netdev_name, inp->hdr_info)) { + IPA_UC_OFFLOAD_ERR("fail to commit partial headers\n"); + ret = -EFAULT; + goto fail; + } + + /* populate tx prop */ + tx.num_props = 2; + tx.prop = tx_prop; + + memset(tx_prop, 0, sizeof(tx_prop)); + tx_prop[0].ip = IPA_IP_v4; + tx_prop[0].dst_pipe = IPA_CLIENT_ETHERNET_CONS; + tx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type; + memcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name, + sizeof(tx_prop[0].hdr_name)); + + tx_prop[1].ip = IPA_IP_v6; + tx_prop[1].dst_pipe = IPA_CLIENT_ETHERNET_CONS; + tx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type; + memcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name, + sizeof(tx_prop[1].hdr_name)); + + /* populate rx prop */ + rx.num_props = 2; + rx.prop = rx_prop; + + memset(rx_prop, 0, sizeof(rx_prop)); + rx_prop[0].ip = IPA_IP_v4; + rx_prop[0].src_pipe = IPA_CLIENT_ETHERNET_PROD; + rx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type; + if (inp->is_meta_data_valid) { + rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_prop[0].attrib.meta_data = inp->meta_data; + rx_prop[0].attrib.meta_data_mask = inp->meta_data_mask; + } + + rx_prop[1].ip = IPA_IP_v6; + rx_prop[1].src_pipe = IPA_CLIENT_ETHERNET_PROD; + rx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type; + if (inp->is_meta_data_valid) { + rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_prop[1].attrib.meta_data = inp->meta_data; + rx_prop[1].attrib.meta_data_mask = inp->meta_data_mask; + } + + if (ipa_register_intf(inp->netdev_name, &tx, &rx)) { + IPA_UC_OFFLOAD_ERR("fail to add interface prop\n"); + memset(ntn_ctx, 0, sizeof(*ntn_ctx)); + ret = -EFAULT; + goto fail; + } + + ntn_ctx->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl; + ntn_ctx->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl; + init_completion(&ntn_ctx->ntn_completion); + ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED; + + kfree(hdr); + return ret; + +fail: + kfree(hdr); +fail_alloc: + ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS); +fail_create_rm_cons: + ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD); + return ret; +} + +int ipa_uc_offload_reg_intf( + struct ipa_uc_offload_intf_params *inp, + struct ipa_uc_offload_out_params *outp) +{ + struct ipa_uc_offload_ctx *ctx; + int ret = 0; + + if (inp == NULL || outp == NULL) { + IPA_UC_OFFLOAD_ERR("invalid params in=%pK out=%pK\n", + inp, outp); + return -EINVAL; + } + + if (inp->proto <= IPA_UC_INVALID || + inp->proto >= IPA_UC_MAX_PROT_SIZE) { + IPA_UC_OFFLOAD_ERR("invalid proto %d\n", inp->proto); + return -EINVAL; + } + + if (!ipa_uc_offload_ctx[inp->proto]) { + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (ctx == NULL) { + IPA_UC_OFFLOAD_ERR("fail to alloc uc offload ctx\n"); + return -EFAULT; + } + ipa_uc_offload_ctx[inp->proto] = ctx; + ctx->proto = inp->proto; + } else + ctx = ipa_uc_offload_ctx[inp->proto]; + + if (ctx->state != IPA_UC_OFFLOAD_STATE_INVALID) { + IPA_UC_OFFLOAD_ERR("Already Initialized\n"); + return -EINVAL; + } + + if (ctx->proto == IPA_UC_NTN) { + ret = ipa_uc_offload_ntn_reg_intf(inp, outp, ctx); + if (!ret) + outp->clnt_hndl = IPA_UC_NTN; + } + + return ret; +} +EXPORT_SYMBOL(ipa_uc_offload_reg_intf); + +static int ipa_uc_ntn_cons_release(void) +{ + return 0; +} + +static int ipa_uc_ntn_cons_request(void) +{ + int ret = 0; + struct ipa_uc_offload_ctx *ntn_ctx; + + ntn_ctx = ipa_uc_offload_ctx[IPA_UC_NTN]; + if (!ntn_ctx) { + IPA_UC_OFFLOAD_ERR("NTN is not initialized\n"); + ret = -EFAULT; + } else if (ntn_ctx->state != IPA_UC_OFFLOAD_STATE_UP) { + IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", ntn_ctx->state); + ret = -EFAULT; + } + + return ret; +} + +static void ipa_uc_offload_rm_notify(void *user_data, enum ipa_rm_event event, + unsigned long data) +{ + struct ipa_uc_offload_ctx *offload_ctx; + + offload_ctx = (struct ipa_uc_offload_ctx *)user_data; + if (!(offload_ctx && offload_ctx->proto > IPA_UC_INVALID && + offload_ctx->proto < IPA_UC_MAX_PROT_SIZE)) { + IPA_UC_OFFLOAD_ERR("Invalid user data\n"); + return; + } + + if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) + IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", offload_ctx->state); + + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + complete_all(&offload_ctx->ntn_completion); + break; + + case IPA_RM_RESOURCE_RELEASED: + break; + + default: + IPA_UC_OFFLOAD_ERR("Invalid RM Evt: %d", event); + break; + } +} + +int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp, + struct ipa_ntn_conn_out_params *outp, + struct ipa_uc_offload_ctx *ntn_ctx) +{ + int result = 0; + enum ipa_uc_offload_state prev_state; + + prev_state = ntn_ctx->state; + if (inp->dl.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT || + inp->dl.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) { + IPA_UC_OFFLOAD_ERR("alignment failure on TX\n"); + return -EINVAL; + } + if (inp->ul.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT || + inp->ul.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) { + IPA_UC_OFFLOAD_ERR("alignment failure on RX\n"); + return -EINVAL; + } + + result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ETHERNET_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (result) { + IPA_UC_OFFLOAD_ERR("fail to add rm dependency: %d\n", result); + return result; + } + + result = ipa_rm_request_resource(IPA_RM_RESOURCE_ETHERNET_PROD); + if (result == -EINPROGRESS) { + if (wait_for_completion_timeout(&ntn_ctx->ntn_completion, + 10*HZ) == 0) { + IPA_UC_OFFLOAD_ERR("ETH_PROD resource req time out\n"); + result = -EFAULT; + goto fail; + } + } else if (result != 0) { + IPA_UC_OFFLOAD_ERR("fail to request resource\n"); + result = -EFAULT; + goto fail; + } + + ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP; + result = ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify, + ntn_ctx->priv, ntn_ctx->hdr_len, outp); + if (result) { + IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes: %d\n", + result); + ntn_ctx->state = prev_state; + result = -EFAULT; + goto fail; + } + + return 0; + +fail: + ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD, + IPA_RM_RESOURCE_APPS_CONS); + return result; +} + +int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp, + struct ipa_uc_offload_conn_out_params *outp) +{ + int ret = 0; + struct ipa_uc_offload_ctx *offload_ctx; + + if (!(inp && outp)) { + IPA_UC_OFFLOAD_ERR("bad parm. in=%pK out=%pK\n", inp, outp); + return -EINVAL; + } + + if (inp->clnt_hndl <= IPA_UC_INVALID || + inp->clnt_hndl >= IPA_UC_MAX_PROT_SIZE) { + IPA_UC_OFFLOAD_ERR("invalid client handle %d\n", + inp->clnt_hndl); + return -EINVAL; + } + + offload_ctx = ipa_uc_offload_ctx[inp->clnt_hndl]; + if (!offload_ctx) { + IPA_UC_OFFLOAD_ERR("Invalid Handle\n"); + return -EINVAL; + } + + if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) { + IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state); + return -EPERM; + } + + switch (offload_ctx->proto) { + case IPA_UC_NTN: + ret = ipa_uc_ntn_conn_pipes(&inp->u.ntn, &outp->u.ntn, + offload_ctx); + break; + + default: + IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", offload_ctx->proto); + ret = -EINVAL; + break; + } + + return ret; +} +EXPORT_SYMBOL(ipa_uc_offload_conn_pipes); + +int ipa_set_perf_profile(struct ipa_perf_profile *profile) +{ + struct ipa_rm_perf_profile rm_profile; + enum ipa_rm_resource_name resource_name; + + if (profile == NULL) { + IPA_UC_OFFLOAD_ERR("Invalid input\n"); + return -EINVAL; + } + + rm_profile.max_supported_bandwidth_mbps = + profile->max_supported_bw_mbps; + + if (profile->client == IPA_CLIENT_ETHERNET_PROD) { + resource_name = IPA_RM_RESOURCE_ETHERNET_PROD; + } else if (profile->client == IPA_CLIENT_ETHERNET_CONS) { + resource_name = IPA_RM_RESOURCE_ETHERNET_CONS; + } else { + IPA_UC_OFFLOAD_ERR("not supported\n"); + return -EINVAL; + } + + if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) { + IPA_UC_OFFLOAD_ERR("fail to setup rm perf profile\n"); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(ipa_set_perf_profile); + +static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx) +{ + int ipa_ep_idx_ul, ipa_ep_idx_dl; + int ret = 0; + + ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED; + + ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD); + if (ret) { + IPA_UC_OFFLOAD_ERR("fail to release ETHERNET_PROD res: %d\n", + ret); + return -EFAULT; + } + + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (ret) { + IPA_UC_OFFLOAD_ERR("fail to del dep ETH_PROD->APPS, %d\n", ret); + return -EFAULT; + } + + ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD); + ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS); + ret = ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl); + if (ret) { + IPA_UC_OFFLOAD_ERR("fail to tear down ntn offload pipes, %d\n", + ret); + return -EFAULT; + } + + return ret; +} + +int ipa_uc_offload_disconn_pipes(u32 clnt_hdl) +{ + struct ipa_uc_offload_ctx *offload_ctx; + int ret = 0; + + if (clnt_hdl <= IPA_UC_INVALID || + clnt_hdl >= IPA_UC_MAX_PROT_SIZE) { + IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl); + return -EINVAL; + } + + offload_ctx = ipa_uc_offload_ctx[clnt_hdl]; + if (!offload_ctx) { + IPA_UC_OFFLOAD_ERR("Invalid client Handle\n"); + return -EINVAL; + } + + if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_UP) { + IPA_UC_OFFLOAD_ERR("Invalid state\n"); + return -EINVAL; + } + + switch (offload_ctx->proto) { + case IPA_UC_NTN: + ret = ipa_uc_ntn_disconn_pipes(offload_ctx); + break; + + default: + IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl); + ret = -EINVAL; + break; + } + + return ret; +} +EXPORT_SYMBOL(ipa_uc_offload_disconn_pipes); + +static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx) +{ + int len, result = 0; + struct ipa_ioc_del_hdr *hdr; + + if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD)) { + IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_PROD resource\n"); + return -EFAULT; + } + + if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS)) { + IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_CONS resource\n"); + return -EFAULT; + } + + len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del); + hdr = kzalloc(len, GFP_KERNEL); + if (hdr == NULL) + return -ENOMEM; + + hdr->commit = 1; + hdr->num_hdls = 2; + hdr->hdl[0].hdl = ntn_ctx->partial_hdr_hdl[0]; + hdr->hdl[1].hdl = ntn_ctx->partial_hdr_hdl[1]; + + if (ipa_del_hdr(hdr)) { + IPA_UC_OFFLOAD_ERR("fail to delete partial header\n"); + result = -EFAULT; + goto fail; + } + + if (ipa_deregister_intf(ntn_ctx->netdev_name)) { + IPA_UC_OFFLOAD_ERR("fail to delete interface prop\n"); + result = -EFAULT; + goto fail; + } + +fail: + kfree(hdr); + return result; +} + +int ipa_uc_offload_cleanup(u32 clnt_hdl) +{ + struct ipa_uc_offload_ctx *offload_ctx; + int ret = 0; + + if (clnt_hdl <= IPA_UC_INVALID || + clnt_hdl >= IPA_UC_MAX_PROT_SIZE) { + IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl); + return -EINVAL; + } + + offload_ctx = ipa_uc_offload_ctx[clnt_hdl]; + if (!offload_ctx) { + IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl); + return -EINVAL; + } + + if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) { + IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state); + return -EINVAL; + } + + switch (offload_ctx->proto) { + case IPA_UC_NTN: + ret = ipa_uc_ntn_cleanup(offload_ctx); + break; + + default: + IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl); + ret = -EINVAL; + break; + } + + if (!ret) { + kfree(offload_ctx); + offload_ctx = NULL; + ipa_uc_offload_ctx[clnt_hdl] = NULL; + } + + return ret; +} +EXPORT_SYMBOL(ipa_uc_offload_cleanup); + +/** + * ipa_uc_offload_uc_rdyCB() - To register uC ready CB if uC not + * ready + * @inout: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *inp) +{ + int ret = 0; + + if (!inp) { + IPA_UC_OFFLOAD_ERR("Invalid input\n"); + return -EINVAL; + } + + if (inp->proto == IPA_UC_NTN) + ret = ipa_ntn_uc_reg_rdyCB(inp->notify, inp->priv); + + if (ret == -EEXIST) { + inp->is_uC_ready = true; + ret = 0; + } else + inp->is_uC_ready = false; + + return ret; +} +EXPORT_SYMBOL(ipa_uc_offload_reg_rdyCB); + +void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto) +{ + if (proto == IPA_UC_NTN) + ipa_ntn_uc_dereg_rdyCB(); +} +EXPORT_SYMBOL(ipa_uc_offload_dereg_rdyCB); diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c new file mode 100644 index 000000000000..948157a7a613 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c @@ -0,0 +1,2750 @@ +/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ipa_v3/ipa_i.h" +#include "../ipa_rm_i.h" + +#define IPA_USB_RM_TIMEOUT_MSEC 10000 +#define IPA_USB_DEV_READY_TIMEOUT_MSEC 10000 + +#define IPA_HOLB_TMR_EN 0x1 + +/* GSI channels weights */ +#define IPA_USB_DL_CHAN_LOW_WEIGHT 0x5 +#define IPA_USB_UL_CHAN_LOW_WEIGHT 0x4 + +#define IPA_USB_MAX_MSG_LEN 4096 + +#define IPA_USB_DRV_NAME "ipa_usb" + +#define IPA_USB_DBG(fmt, args...) \ + do { \ + pr_debug(IPA_USB_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_USB_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_USB_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_USB_ERR(fmt, args...) \ + do { \ + pr_err(IPA_USB_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_USB_INFO(fmt, args...) \ + do { \ + pr_info(IPA_USB_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +struct ipa_usb_xdci_connect_params_internal { + enum ipa_usb_max_usb_packet_size max_pkt_size; + u32 ipa_to_usb_clnt_hdl; + u8 ipa_to_usb_xferrscidx; + bool ipa_to_usb_xferrscidx_valid; + u32 usb_to_ipa_clnt_hdl; + u8 usb_to_ipa_xferrscidx; + bool usb_to_ipa_xferrscidx_valid; + enum ipa_usb_teth_prot teth_prot; + struct ipa_usb_teth_prot_params teth_prot_params; + u32 max_supported_bandwidth_mbps; +}; + +enum ipa3_usb_teth_prot_state { + IPA_USB_TETH_PROT_INITIALIZED, + IPA_USB_TETH_PROT_CONNECTED, + IPA_USB_TETH_PROT_INVALID +}; + +struct ipa3_usb_teth_prot_context { + union { + struct ipa_usb_init_params rndis; + struct ecm_ipa_params ecm; + struct teth_bridge_init_params teth_bridge; + } teth_prot_params; + enum ipa3_usb_teth_prot_state state; + void *user_data; +}; + +enum ipa3_usb_cons_state { + IPA_USB_CONS_GRANTED, + IPA_USB_CONS_RELEASED +}; + +struct ipa3_usb_rm_context { + struct ipa_rm_create_params prod_params; + struct ipa_rm_create_params cons_params; + bool prod_valid; + bool cons_valid; + struct completion prod_comp; + enum ipa3_usb_cons_state cons_state; + /* consumer was requested*/ + bool cons_requested; + /* consumer was requested and released before it was granted*/ + bool cons_requested_released; +}; + +enum ipa3_usb_state { + IPA_USB_INVALID, + IPA_USB_INITIALIZED, + IPA_USB_CONNECTED, + IPA_USB_STOPPED, + IPA_USB_SUSPEND_REQUESTED, + IPA_USB_SUSPENDED, + IPA_USB_SUSPENDED_NO_RWAKEUP, + IPA_USB_RESUME_IN_PROGRESS +}; + +enum ipa3_usb_transport_type { + IPA_USB_TRANSPORT_TETH, + IPA_USB_TRANSPORT_DPL, + IPA_USB_TRANSPORT_MAX +}; + +/* Get transport type from tethering protocol */ +#define IPA3_USB_GET_TTYPE(__teth_prot) \ + (((__teth_prot) == IPA_USB_DIAG) ? \ + IPA_USB_TRANSPORT_DPL : IPA_USB_TRANSPORT_TETH) + +/* Does the given transport type is DPL? */ +#define IPA3_USB_IS_TTYPE_DPL(__ttype) \ + ((__ttype) == IPA_USB_TRANSPORT_DPL) + +struct ipa3_usb_teth_prot_conn_params { + u32 usb_to_ipa_clnt_hdl; + u32 ipa_to_usb_clnt_hdl; + struct ipa_usb_teth_prot_params params; +}; + +/** + * Transport type - could be either data tethering or DPL + * Each transport has it's own RM resources and statuses + */ +struct ipa3_usb_transport_type_ctx { + struct ipa3_usb_rm_context rm_ctx; + int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data); + void *user_data; + enum ipa3_usb_state state; + struct ipa_usb_xdci_chan_params ch_params; + struct ipa3_usb_teth_prot_conn_params teth_conn_params; +}; + +struct ipa3_usb_smmu_reg_map { + int cnt; + phys_addr_t addr; +}; + +struct ipa3_usb_context { + struct ipa3_usb_teth_prot_context + teth_prot_ctx[IPA_USB_MAX_TETH_PROT_SIZE]; + int num_init_prot; /* without dpl */ + struct teth_bridge_init_params teth_bridge_params; + struct completion dev_ready_comp; + u32 qmi_req_id; + spinlock_t state_lock; + bool dl_data_pending; + struct workqueue_struct *wq; + struct mutex general_mutex; + struct ipa3_usb_transport_type_ctx + ttype_ctx[IPA_USB_TRANSPORT_MAX]; + struct dentry *dfile_state_info; + struct dentry *dent; + struct ipa3_usb_smmu_reg_map smmu_reg_map; +}; + +enum ipa3_usb_op { + IPA_USB_OP_INIT_TETH_PROT, + IPA_USB_OP_REQUEST_CHANNEL, + IPA_USB_OP_CONNECT, + IPA_USB_OP_DISCONNECT, + IPA_USB_OP_RELEASE_CHANNEL, + IPA_USB_OP_DEINIT_TETH_PROT, + IPA_USB_OP_SUSPEND, + IPA_USB_OP_SUSPEND_NO_RWAKEUP, + IPA_USB_OP_RESUME +}; + +struct ipa3_usb_status_dbg_info { + const char *teth_state; + const char *dpl_state; + int num_init_prot; + const char *inited_prots[IPA_USB_MAX_TETH_PROT_SIZE]; + const char *teth_connected_prot; + const char *dpl_connected_prot; + const char *teth_cons_state; + const char *dpl_cons_state; +}; + +static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work); +static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work); +static DECLARE_WORK(ipa3_usb_notify_remote_wakeup_work, + ipa3_usb_wq_notify_remote_wakeup); +static DECLARE_WORK(ipa3_usb_dpl_notify_remote_wakeup_work, + ipa3_usb_wq_dpl_notify_remote_wakeup); + +struct ipa3_usb_context *ipa3_usb_ctx; + +static char *ipa3_usb_op_to_string(enum ipa3_usb_op op) +{ + switch (op) { + case IPA_USB_OP_INIT_TETH_PROT: + return "IPA_USB_OP_INIT_TETH_PROT"; + case IPA_USB_OP_REQUEST_CHANNEL: + return "IPA_USB_OP_REQUEST_CHANNEL"; + case IPA_USB_OP_CONNECT: + return "IPA_USB_OP_CONNECT"; + case IPA_USB_OP_DISCONNECT: + return "IPA_USB_OP_DISCONNECT"; + case IPA_USB_OP_RELEASE_CHANNEL: + return "IPA_USB_OP_RELEASE_CHANNEL"; + case IPA_USB_OP_DEINIT_TETH_PROT: + return "IPA_USB_OP_DEINIT_TETH_PROT"; + case IPA_USB_OP_SUSPEND: + return "IPA_USB_OP_SUSPEND"; + case IPA_USB_OP_SUSPEND_NO_RWAKEUP: + return "IPA_USB_OP_SUSPEND_NO_RWAKEUP"; + case IPA_USB_OP_RESUME: + return "IPA_USB_OP_RESUME"; + } + + return "UNSUPPORTED"; +} + +static char *ipa3_usb_state_to_string(enum ipa3_usb_state state) +{ + switch (state) { + case IPA_USB_INVALID: + return "IPA_USB_INVALID"; + case IPA_USB_INITIALIZED: + return "IPA_USB_INITIALIZED"; + case IPA_USB_CONNECTED: + return "IPA_USB_CONNECTED"; + case IPA_USB_STOPPED: + return "IPA_USB_STOPPED"; + case IPA_USB_SUSPEND_REQUESTED: + return "IPA_USB_SUSPEND_REQUESTED"; + case IPA_USB_SUSPENDED: + return "IPA_USB_SUSPENDED"; + case IPA_USB_SUSPENDED_NO_RWAKEUP: + return "IPA_USB_SUSPENDED_NO_RWAKEUP"; + case IPA_USB_RESUME_IN_PROGRESS: + return "IPA_USB_RESUME_IN_PROGRESS"; + } + + return "UNSUPPORTED"; +} + +static char *ipa3_usb_notify_event_to_string(enum ipa_usb_notify_event event) +{ + switch (event) { + case IPA_USB_DEVICE_READY: + return "IPA_USB_DEVICE_READY"; + case IPA_USB_REMOTE_WAKEUP: + return "IPA_USB_REMOTE_WAKEUP"; + case IPA_USB_SUSPEND_COMPLETED: + return "IPA_USB_SUSPEND_COMPLETED"; + } + + return "UNSUPPORTED"; +} + +static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit, + enum ipa3_usb_transport_type ttype) +{ + unsigned long flags; + int state_legal = false; + enum ipa3_usb_state state; + struct ipa3_usb_rm_context *rm_ctx; + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + state = ipa3_usb_ctx->ttype_ctx[ttype].state; + switch (new_state) { + case IPA_USB_INVALID: + if (state == IPA_USB_INITIALIZED) + state_legal = true; + break; + case IPA_USB_INITIALIZED: + if (state == IPA_USB_STOPPED || state == IPA_USB_INVALID || + ((!IPA3_USB_IS_TTYPE_DPL(ttype)) && + (state == IPA_USB_INITIALIZED))) + state_legal = true; + break; + case IPA_USB_CONNECTED: + if (state == IPA_USB_INITIALIZED || + state == IPA_USB_STOPPED || + state == IPA_USB_RESUME_IN_PROGRESS || + state == IPA_USB_SUSPENDED_NO_RWAKEUP || + /* + * In case of failure during suspend request + * handling, state is reverted to connected. + */ + (err_permit && state == IPA_USB_SUSPEND_REQUESTED)) + state_legal = true; + break; + case IPA_USB_STOPPED: + if (state == IPA_USB_CONNECTED || + state == IPA_USB_SUSPENDED || + state == IPA_USB_SUSPENDED_NO_RWAKEUP) + state_legal = true; + break; + case IPA_USB_SUSPEND_REQUESTED: + if (state == IPA_USB_CONNECTED) + state_legal = true; + break; + case IPA_USB_SUSPENDED: + if (state == IPA_USB_SUSPEND_REQUESTED || + /* + * In case of failure during resume, state is reverted + * to original, which could be suspended. Allow it + */ + (err_permit && state == IPA_USB_RESUME_IN_PROGRESS)) + state_legal = true; + break; + case IPA_USB_SUSPENDED_NO_RWAKEUP: + if (state == IPA_USB_CONNECTED) + state_legal = true; + break; + case IPA_USB_RESUME_IN_PROGRESS: + if (state == IPA_USB_SUSPENDED) + state_legal = true; + break; + default: + state_legal = false; + break; + + } + if (state_legal) { + if (state != new_state) { + IPA_USB_DBG("ipa_usb %s state changed %s -> %s\n", + IPA3_USB_IS_TTYPE_DPL(ttype) ? "DPL" : "", + ipa3_usb_state_to_string(state), + ipa3_usb_state_to_string(new_state)); + ipa3_usb_ctx->ttype_ctx[ttype].state = new_state; + } + } else { + IPA_USB_ERR("invalid state change %s -> %s\n", + ipa3_usb_state_to_string(state), + ipa3_usb_state_to_string(new_state)); + } + + if (state_legal && (new_state == IPA_USB_CONNECTED)) { + rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + if ((rm_ctx->cons_state == IPA_USB_CONS_GRANTED) || + rm_ctx->cons_requested_released) { + rm_ctx->cons_requested = false; + rm_ctx->cons_requested_released = + false; + } + /* Notify RM that consumer is granted */ + if (rm_ctx->cons_requested) { + ipa_rm_notify_completion( + IPA_RM_RESOURCE_GRANTED, + rm_ctx->cons_params.name); + rm_ctx->cons_state = IPA_USB_CONS_GRANTED; + rm_ctx->cons_requested = false; + } + } + + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + return state_legal; +} + +static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op, + enum ipa3_usb_transport_type ttype) +{ + unsigned long flags; + bool is_legal = false; + enum ipa3_usb_state state; + bool is_dpl; + + if (ipa3_usb_ctx == NULL) { + IPA_USB_ERR("ipa_usb_ctx is not initialized!\n"); + return false; + } + + is_dpl = IPA3_USB_IS_TTYPE_DPL(ttype); + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + state = ipa3_usb_ctx->ttype_ctx[ttype].state; + switch (op) { + case IPA_USB_OP_INIT_TETH_PROT: + if (state == IPA_USB_INVALID || + (!is_dpl && state == IPA_USB_INITIALIZED)) + is_legal = true; + break; + case IPA_USB_OP_REQUEST_CHANNEL: + if (state == IPA_USB_INITIALIZED) + is_legal = true; + break; + case IPA_USB_OP_CONNECT: + if (state == IPA_USB_INITIALIZED || state == IPA_USB_STOPPED) + is_legal = true; + break; + case IPA_USB_OP_DISCONNECT: + if (state == IPA_USB_CONNECTED || + state == IPA_USB_SUSPENDED || + state == IPA_USB_SUSPENDED_NO_RWAKEUP) + is_legal = true; + break; + case IPA_USB_OP_RELEASE_CHANNEL: + /* when releasing 1st channel state will be changed already */ + if (state == IPA_USB_STOPPED || + (!is_dpl && state == IPA_USB_INITIALIZED)) + is_legal = true; + break; + case IPA_USB_OP_DEINIT_TETH_PROT: + /* + * For data tethering we should allow deinit an inited protocol + * always. E.g. rmnet is inited and rndis is connected. + * USB can deinit rmnet first and then disconnect rndis + * on cable disconnect. + */ + if (!is_dpl || state == IPA_USB_INITIALIZED) + is_legal = true; + break; + case IPA_USB_OP_SUSPEND: + if (state == IPA_USB_CONNECTED) + is_legal = true; + break; + case IPA_USB_OP_SUSPEND_NO_RWAKEUP: + if (state == IPA_USB_CONNECTED) + is_legal = true; + break; + case IPA_USB_OP_RESUME: + if (state == IPA_USB_SUSPENDED || + state == IPA_USB_SUSPENDED_NO_RWAKEUP) + is_legal = true; + break; + default: + is_legal = false; + break; + } + + if (!is_legal) { + IPA_USB_ERR("Illegal %s operation: state=%s operation=%s\n", + is_dpl ? "DPL" : "", + ipa3_usb_state_to_string(state), + ipa3_usb_op_to_string(op)); + } + + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + return is_legal; +} + +static void ipa3_usb_notify_do(enum ipa3_usb_transport_type ttype, + enum ipa_usb_notify_event event) +{ + int (*cb)(enum ipa_usb_notify_event, void *user_data); + void *user_data; + int res; + + IPA_USB_DBG("Trying to notify USB with %s\n", + ipa3_usb_notify_event_to_string(event)); + + cb = ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb; + user_data = ipa3_usb_ctx->ttype_ctx[ttype].user_data; + + if (cb) { + res = cb(event, user_data); + IPA_USB_DBG("Notified USB with %s. is_dpl=%d result=%d\n", + ipa3_usb_notify_event_to_string(event), + IPA3_USB_IS_TTYPE_DPL(ttype), res); + } +} + +/* + * This call-back is called from ECM or RNDIS drivers. + * Both drivers are data tethering drivers and not DPL + */ +void ipa3_usb_device_ready_notify_cb(void) +{ + IPA_USB_DBG_LOW("entry\n"); + ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH, + IPA_USB_DEVICE_READY); + IPA_USB_DBG_LOW("exit\n"); +} + +static void ipa3_usb_prod_notify_cb_do(enum ipa_rm_event event, + enum ipa3_usb_transport_type ttype) +{ + struct ipa3_usb_rm_context *rm_ctx; + + IPA_USB_DBG_LOW("entry\n"); + + rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + IPA_USB_DBG(":%s granted\n", + ipa_rm_resource_str(rm_ctx->prod_params.name)); + complete_all(&rm_ctx->prod_comp); + break; + case IPA_RM_RESOURCE_RELEASED: + IPA_USB_DBG(":%s released\n", + ipa_rm_resource_str(rm_ctx->prod_params.name)); + complete_all(&rm_ctx->prod_comp); + break; + } + IPA_USB_DBG_LOW("exit\n"); +} + +static void ipa3_usb_prod_notify_cb(void *user_data, enum ipa_rm_event event, + unsigned long data) +{ + ipa3_usb_prod_notify_cb_do(event, IPA_USB_TRANSPORT_TETH); +} + +static void ipa3_usb_dpl_dummy_prod_notify_cb(void *user_data, + enum ipa_rm_event event, unsigned long data) +{ + ipa3_usb_prod_notify_cb_do(event, IPA_USB_TRANSPORT_TETH); +} + +static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work) +{ + ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH, IPA_USB_REMOTE_WAKEUP); +} + +static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work) +{ + ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_REMOTE_WAKEUP); +} + +static int ipa3_usb_cons_request_resource_cb_do( + enum ipa3_usb_transport_type ttype, + struct work_struct *remote_wakeup_work) +{ + struct ipa3_usb_rm_context *rm_ctx; + unsigned long flags; + int result; + + IPA_USB_DBG_LOW("entry\n"); + rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + IPA_USB_DBG("state is %s\n", + ipa3_usb_state_to_string( + ipa3_usb_ctx->ttype_ctx[ttype].state)); + switch (ipa3_usb_ctx->ttype_ctx[ttype].state) { + case IPA_USB_CONNECTED: + case IPA_USB_SUSPENDED_NO_RWAKEUP: + rm_ctx->cons_state = IPA_USB_CONS_GRANTED; + result = 0; + break; + case IPA_USB_SUSPEND_REQUESTED: + rm_ctx->cons_requested = true; + if (rm_ctx->cons_state == IPA_USB_CONS_GRANTED) + result = 0; + else + result = -EINPROGRESS; + break; + case IPA_USB_SUSPENDED: + if (!rm_ctx->cons_requested) { + rm_ctx->cons_requested = true; + queue_work(ipa3_usb_ctx->wq, remote_wakeup_work); + } + result = -EINPROGRESS; + break; + default: + rm_ctx->cons_requested = true; + result = -EINPROGRESS; + break; + } + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + IPA_USB_DBG_LOW("exit with %d\n", result); + return result; +} + +static int ipa3_usb_cons_request_resource_cb(void) +{ + return ipa3_usb_cons_request_resource_cb_do(IPA_USB_TRANSPORT_TETH, + &ipa3_usb_notify_remote_wakeup_work); +} + +static int ipa3_usb_dpl_cons_request_resource_cb(void) +{ + return ipa3_usb_cons_request_resource_cb_do(IPA_USB_TRANSPORT_DPL, + &ipa3_usb_dpl_notify_remote_wakeup_work); +} + +static int ipa3_usb_cons_release_resource_cb_do( + enum ipa3_usb_transport_type ttype) +{ + unsigned long flags; + struct ipa3_usb_rm_context *rm_ctx; + + IPA_USB_DBG_LOW("entry\n"); + rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + IPA_USB_DBG("state is %s\n", + ipa3_usb_state_to_string( + ipa3_usb_ctx->ttype_ctx[ttype].state)); + switch (ipa3_usb_ctx->ttype_ctx[ttype].state) { + case IPA_USB_SUSPENDED: + /* Proceed with the suspend if no DL/DPL data */ + if (rm_ctx->cons_requested) + rm_ctx->cons_requested_released = true; + break; + case IPA_USB_SUSPEND_REQUESTED: + if (rm_ctx->cons_requested) + rm_ctx->cons_requested_released = true; + break; + case IPA_USB_STOPPED: + case IPA_USB_RESUME_IN_PROGRESS: + case IPA_USB_SUSPENDED_NO_RWAKEUP: + if (rm_ctx->cons_requested) + rm_ctx->cons_requested = false; + break; + case IPA_USB_CONNECTED: + case IPA_USB_INITIALIZED: + break; + default: + IPA_USB_ERR("received cons_release_cb in bad state: %s!\n", + ipa3_usb_state_to_string( + ipa3_usb_ctx->ttype_ctx[ttype].state)); + WARN_ON(1); + break; + } + + rm_ctx->cons_state = IPA_USB_CONS_RELEASED; + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + IPA_USB_DBG_LOW("exit\n"); + return 0; +} + +static int ipa3_usb_cons_release_resource_cb(void) +{ + return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_TETH); +} + +static int ipa3_usb_dpl_cons_release_resource_cb(void) +{ + return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_DPL); +} + +static char *ipa3_usb_teth_prot_to_string(enum ipa_usb_teth_prot teth_prot) +{ + switch (teth_prot) { + case IPA_USB_RNDIS: + return "rndis_ipa"; + case IPA_USB_ECM: + return "ecm_ipa"; + case IPA_USB_RMNET: + case IPA_USB_MBIM: + return "teth_bridge"; + case IPA_USB_DIAG: + return "dpl"; + default: + break; + } + + return "unsupported"; +} + +static char *ipa3_usb_teth_bridge_prot_to_string( + enum ipa_usb_teth_prot teth_prot) +{ + switch (teth_prot) { + case IPA_USB_RMNET: + return "rmnet"; + case IPA_USB_MBIM: + return "mbim"; + default: + break; + } + + return "unsupported"; +} + +static int ipa3_usb_init_teth_bridge(void) +{ + int result; + + result = teth_bridge_init(&ipa3_usb_ctx->teth_bridge_params); + if (result) { + IPA_USB_ERR("Failed to initialize teth_bridge\n"); + return result; + } + + return 0; +} + +static int ipa3_usb_create_rm_resources(enum ipa3_usb_transport_type ttype) +{ + struct ipa3_usb_rm_context *rm_ctx; + int result = -EFAULT; + bool created = false; + + rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + + /* create PROD */ + if (!rm_ctx->prod_valid) { + rm_ctx->prod_params.name = IPA3_USB_IS_TTYPE_DPL(ttype) ? + IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD : + IPA_RM_RESOURCE_USB_PROD; + rm_ctx->prod_params.floor_voltage = IPA_VOLTAGE_SVS; + rm_ctx->prod_params.reg_params.user_data = NULL; + rm_ctx->prod_params.reg_params.notify_cb = + IPA3_USB_IS_TTYPE_DPL(ttype) ? + ipa3_usb_dpl_dummy_prod_notify_cb : + ipa3_usb_prod_notify_cb; + rm_ctx->prod_params.request_resource = NULL; + rm_ctx->prod_params.release_resource = NULL; + result = ipa_rm_create_resource(&rm_ctx->prod_params); + if (result) { + IPA_USB_ERR("Failed to create %s RM resource\n", + ipa_rm_resource_str(rm_ctx->prod_params.name)); + return result; + } + rm_ctx->prod_valid = true; + created = true; + IPA_USB_DBG("Created %s RM resource\n", + ipa_rm_resource_str(rm_ctx->prod_params.name)); + } + + /* Create CONS */ + if (!rm_ctx->cons_valid) { + rm_ctx->cons_params.name = IPA3_USB_IS_TTYPE_DPL(ttype) ? + IPA_RM_RESOURCE_USB_DPL_CONS : + IPA_RM_RESOURCE_USB_CONS; + rm_ctx->cons_params.floor_voltage = IPA_VOLTAGE_SVS; + rm_ctx->cons_params.reg_params.user_data = NULL; + rm_ctx->cons_params.reg_params.notify_cb = NULL; + rm_ctx->cons_params.request_resource = + IPA3_USB_IS_TTYPE_DPL(ttype) ? + ipa3_usb_dpl_cons_request_resource_cb : + ipa3_usb_cons_request_resource_cb; + rm_ctx->cons_params.release_resource = + IPA3_USB_IS_TTYPE_DPL(ttype) ? + ipa3_usb_dpl_cons_release_resource_cb : + ipa3_usb_cons_release_resource_cb; + result = ipa_rm_create_resource(&rm_ctx->cons_params); + if (result) { + IPA_USB_ERR("Failed to create %s RM resource\n", + ipa_rm_resource_str(rm_ctx->cons_params.name)); + goto create_cons_rsc_fail; + } + rm_ctx->cons_valid = true; + IPA_USB_DBG("Created %s RM resource\n", + ipa_rm_resource_str(rm_ctx->cons_params.name)); + } + + return 0; + +create_cons_rsc_fail: + if (created) { + rm_ctx->prod_valid = false; + ipa_rm_delete_resource(rm_ctx->prod_params.name); + } + return result; +} + +int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot, + struct ipa_usb_teth_params *teth_params, + int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, + void *), + void *user_data) +{ + int result = -EFAULT; + enum ipa3_usb_transport_type ttype; + struct ipa3_usb_teth_prot_context *teth_prot_ptr; + + mutex_lock(&ipa3_usb_ctx->general_mutex); + IPA_USB_DBG_LOW("entry\n"); + if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE || + ((teth_prot == IPA_USB_RNDIS || teth_prot == IPA_USB_ECM) && + teth_params == NULL) || ipa_usb_notify_cb == NULL || + user_data == NULL) { + IPA_USB_ERR("bad parameters\n"); + result = -EINVAL; + goto bad_params; + } + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_INIT_TETH_PROT, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + result = -EPERM; + goto bad_params; + } + + /* Create IPA RM USB resources */ + teth_prot_ptr = &ipa3_usb_ctx->teth_prot_ctx[teth_prot]; + result = ipa3_usb_create_rm_resources(ttype); + if (result) { + IPA_USB_ERR("Failed creating IPA RM USB resources\n"); + goto bad_params; + } + + if (!ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb) { + ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = + ipa_usb_notify_cb; + } else if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + if (ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb != + ipa_usb_notify_cb) { + IPA_USB_ERR("Got different notify_cb\n"); + result = -EINVAL; + goto bad_params; + } + } else { + IPA_USB_ERR("Already has dpl_notify_cb\n"); + result = -EINVAL; + goto bad_params; + } + + /* Initialize tethering protocol */ + switch (teth_prot) { + case IPA_USB_RNDIS: + case IPA_USB_ECM: + if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state != + IPA_USB_TETH_PROT_INVALID) { + IPA_USB_DBG("%s already initialized\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + result = -EPERM; + goto bad_params; + } + ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data; + if (teth_prot == IPA_USB_RNDIS) { + struct ipa_usb_init_params *rndis_ptr = + &teth_prot_ptr->teth_prot_params.rndis; + + rndis_ptr->device_ready_notify = + ipa3_usb_device_ready_notify_cb; + memcpy(rndis_ptr->host_ethaddr, + teth_params->host_ethaddr, + sizeof(teth_params->host_ethaddr)); + memcpy(rndis_ptr->device_ethaddr, + teth_params->device_ethaddr, + sizeof(teth_params->device_ethaddr)); + + result = rndis_ipa_init(rndis_ptr); + if (result) { + IPA_USB_ERR("Failed to initialize %s\n", + ipa3_usb_teth_prot_to_string( + teth_prot)); + goto teth_prot_init_fail; + } + } else { + struct ecm_ipa_params *ecm_ptr = + &teth_prot_ptr->teth_prot_params.ecm; + + ecm_ptr->device_ready_notify = + ipa3_usb_device_ready_notify_cb; + memcpy(ecm_ptr->host_ethaddr, + teth_params->host_ethaddr, + sizeof(teth_params->host_ethaddr)); + memcpy(ecm_ptr->device_ethaddr, + teth_params->device_ethaddr, + sizeof(teth_params->device_ethaddr)); + + result = ecm_ipa_init(ecm_ptr); + if (result) { + IPA_USB_ERR("Failed to initialize %s\n", + ipa3_usb_teth_prot_to_string( + teth_prot)); + goto teth_prot_init_fail; + } + } + teth_prot_ptr->state = + IPA_USB_TETH_PROT_INITIALIZED; + ipa3_usb_ctx->num_init_prot++; + IPA_USB_DBG("initialized %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + case IPA_USB_RMNET: + case IPA_USB_MBIM: + if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state != + IPA_USB_TETH_PROT_INVALID) { + IPA_USB_DBG("%s already initialized\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + result = -EPERM; + goto bad_params; + } + ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data; + result = ipa3_usb_init_teth_bridge(); + if (result) + goto teth_prot_init_fail; + ipa3_usb_ctx->teth_prot_ctx[teth_prot].state = + IPA_USB_TETH_PROT_INITIALIZED; + ipa3_usb_ctx->num_init_prot++; + IPA_USB_DBG("initialized %s %s\n", + ipa3_usb_teth_prot_to_string(teth_prot), + ipa3_usb_teth_bridge_prot_to_string(teth_prot)); + break; + case IPA_USB_DIAG: + if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state != + IPA_USB_TETH_PROT_INVALID) { + IPA_USB_DBG("DPL already initialized\n"); + result = -EPERM; + goto bad_params; + } + ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data; + ipa3_usb_ctx->teth_prot_ctx[teth_prot].state = + IPA_USB_TETH_PROT_INITIALIZED; + IPA_USB_DBG("initialized DPL\n"); + break; + default: + IPA_USB_ERR("unexpected tethering protocol\n"); + result = -EINVAL; + goto bad_params; + } + + if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype)) + IPA_USB_ERR("failed to change state to initialized\n"); + + IPA_USB_DBG_LOW("exit\n"); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return 0; + +teth_prot_init_fail: + if ((IPA3_USB_IS_TTYPE_DPL(ttype)) + || (ipa3_usb_ctx->num_init_prot == 0)) { + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid = false; + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid = false; + ipa_rm_delete_resource( + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name); + ipa_rm_delete_resource( + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name); + } +bad_params: + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; +} +EXPORT_SYMBOL(ipa_usb_init_teth_prot); + +void ipa3_usb_gsi_evt_err_cb(struct gsi_evt_err_notify *notify) +{ + IPA_USB_DBG_LOW("entry\n"); + if (!notify) + return; + IPA_USB_ERR("Received event error %d, description: %d\n", + notify->evt_id, notify->err_desc); + IPA_USB_DBG_LOW("exit\n"); +} + +void ipa3_usb_gsi_chan_err_cb(struct gsi_chan_err_notify *notify) +{ + IPA_USB_DBG_LOW("entry\n"); + if (!notify) + return; + IPA_USB_ERR("Received channel error %d, description: %d\n", + notify->evt_id, notify->err_desc); + IPA_USB_DBG_LOW("exit\n"); +} + +static bool ipa3_usb_check_chan_params(struct ipa_usb_xdci_chan_params *params) +{ + IPA_USB_DBG_LOW("gevntcount_low_addr = %x\n", + params->gevntcount_low_addr); + IPA_USB_DBG_LOW("gevntcount_hi_addr = %x\n", + params->gevntcount_hi_addr); + IPA_USB_DBG_LOW("dir = %d\n", params->dir); + IPA_USB_DBG_LOW("xfer_ring_len = %d\n", params->xfer_ring_len); + IPA_USB_DBG_LOW("xfer_ring_base_addr = %llx\n", + params->xfer_ring_base_addr); + IPA_USB_DBG_LOW("last_trb_addr_iova = %x\n", + params->xfer_scratch.last_trb_addr_iova); + IPA_USB_DBG_LOW("const_buffer_size = %d\n", + params->xfer_scratch.const_buffer_size); + IPA_USB_DBG_LOW("depcmd_low_addr = %x\n", + params->xfer_scratch.depcmd_low_addr); + IPA_USB_DBG_LOW("depcmd_hi_addr = %x\n", + params->xfer_scratch.depcmd_hi_addr); + + if (params->client >= IPA_CLIENT_MAX || + params->teth_prot > IPA_USB_MAX_TETH_PROT_SIZE || + params->xfer_ring_len % GSI_CHAN_RE_SIZE_16B || + params->xfer_scratch.const_buffer_size < 1 || + params->xfer_scratch.const_buffer_size > 31) { + IPA_USB_ERR("Invalid params\n"); + return false; + } + switch (params->teth_prot) { + case IPA_USB_DIAG: + if (!IPA_CLIENT_IS_CONS(params->client)) { + IPA_USB_ERR("DPL supports only DL channel\n"); + return false; + } + case IPA_USB_RNDIS: + case IPA_USB_ECM: + if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state == + IPA_USB_TETH_PROT_INVALID) { + IPA_USB_ERR("%s is not initialized\n", + ipa3_usb_teth_prot_to_string( + params->teth_prot)); + return false; + } + break; + case IPA_USB_RMNET: + case IPA_USB_MBIM: + if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state == + IPA_USB_TETH_PROT_INVALID) { + IPA_USB_ERR("%s is not initialized\n", + ipa3_usb_teth_bridge_prot_to_string( + params->teth_prot)); + return false; + } + break; + default: + IPA_USB_ERR("Unknown tethering protocol (%d)\n", + params->teth_prot); + return false; + } + return true; +} + +static int ipa3_usb_smmu_map_xdci_channel( + struct ipa_usb_xdci_chan_params *params, bool map) +{ + int result; + u32 gevntcount_r = rounddown(params->gevntcount_low_addr, PAGE_SIZE); + u32 xfer_scratch_r = + rounddown(params->xfer_scratch.depcmd_low_addr, PAGE_SIZE); + + if (gevntcount_r != xfer_scratch_r) { + IPA_USB_ERR("No support more than 1 page map for USB regs\n"); + WARN_ON(1); + return -EINVAL; + } + + if (map) { + if (ipa3_usb_ctx->smmu_reg_map.cnt == 0) { + ipa3_usb_ctx->smmu_reg_map.addr = gevntcount_r; + result = ipa3_smmu_map_peer_reg( + ipa3_usb_ctx->smmu_reg_map.addr, true); + if (result) { + IPA_USB_ERR("failed to map USB regs %d\n", + result); + return result; + } + } else { + if (gevntcount_r != ipa3_usb_ctx->smmu_reg_map.addr) { + IPA_USB_ERR( + "No support for map different reg\n"); + return -EINVAL; + } + } + ipa3_usb_ctx->smmu_reg_map.cnt++; + } else { + if (gevntcount_r != ipa3_usb_ctx->smmu_reg_map.addr) { + IPA_USB_ERR( + "No support for map different reg\n"); + return -EINVAL; + } + + if (ipa3_usb_ctx->smmu_reg_map.cnt == 1) { + result = ipa3_smmu_map_peer_reg( + ipa3_usb_ctx->smmu_reg_map.addr, false); + if (result) { + IPA_USB_ERR("failed to unmap USB regs %d\n", + result); + return result; + } + } + ipa3_usb_ctx->smmu_reg_map.cnt--; + } + + result = ipa3_smmu_map_peer_buff(params->xfer_ring_base_addr_iova, + params->xfer_ring_base_addr, params->xfer_ring_len, map); + if (result) { + IPA_USB_ERR("failed to map Xfer ring %d\n", result); + return result; + } + + result = ipa3_smmu_map_peer_buff(params->data_buff_base_addr_iova, + params->data_buff_base_addr, params->data_buff_base_len, map); + if (result) { + IPA_USB_ERR("failed to map TRBs buff %d\n", result); + return result; + } + + return 0; +} + +static int ipa3_usb_request_xdci_channel( + struct ipa_usb_xdci_chan_params *params, + struct ipa_req_chan_out_params *out_params) +{ + int result = -EFAULT; + struct ipa_request_gsi_channel_params chan_params; + enum ipa3_usb_transport_type ttype; + enum ipa_usb_teth_prot teth_prot; + struct ipa_usb_init_params *rndis_ptr; + struct ecm_ipa_params *ecm_ptr; + + IPA_USB_DBG_LOW("entry\n"); + if (params == NULL || out_params == NULL || + !ipa3_usb_check_chan_params(params)) { + IPA_USB_ERR("bad parameters\n"); + return -EINVAL; + } + + ttype = IPA3_USB_GET_TTYPE(params->teth_prot); + teth_prot = params->teth_prot; + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_REQUEST_CHANNEL, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + return -EPERM; + } + + rndis_ptr = + &ipa3_usb_ctx->teth_prot_ctx[teth_prot].teth_prot_params.rndis; + ecm_ptr = + &ipa3_usb_ctx->teth_prot_ctx[teth_prot].teth_prot_params.ecm; + + memset(&chan_params, 0, sizeof(struct ipa_request_gsi_channel_params)); + memcpy(&chan_params.ipa_ep_cfg, ¶ms->ipa_ep_cfg, + sizeof(struct ipa_ep_cfg)); + chan_params.client = params->client; + switch (params->teth_prot) { + case IPA_USB_RNDIS: + chan_params.priv = rndis_ptr->private; + if (params->dir == GSI_CHAN_DIR_FROM_GSI) + chan_params.notify = rndis_ptr->ipa_tx_notify; + else + chan_params.notify = rndis_ptr->ipa_rx_notify; + chan_params.skip_ep_cfg = rndis_ptr->skip_ep_cfg; + break; + case IPA_USB_ECM: + chan_params.priv = ecm_ptr->private; + if (params->dir == GSI_CHAN_DIR_FROM_GSI) + chan_params.notify = ecm_ptr->ecm_ipa_tx_dp_notify; + else + chan_params.notify = ecm_ptr->ecm_ipa_rx_dp_notify; + chan_params.skip_ep_cfg = ecm_ptr->skip_ep_cfg; + break; + case IPA_USB_RMNET: + case IPA_USB_MBIM: + chan_params.priv = + ipa3_usb_ctx->teth_bridge_params.private_data; + chan_params.notify = + ipa3_usb_ctx->teth_bridge_params.usb_notify_cb; + chan_params.skip_ep_cfg = + ipa3_usb_ctx->teth_bridge_params.skip_ep_cfg; + break; + case IPA_USB_DIAG: + chan_params.priv = NULL; + chan_params.notify = NULL; + chan_params.skip_ep_cfg = true; + break; + default: + break; + } + + result = ipa3_usb_smmu_map_xdci_channel(params, true); + if (result) { + IPA_USB_ERR("failed to smmu map %d\n", result); + return result; + } + + /* store channel params for SMMU unmap */ + ipa3_usb_ctx->ttype_ctx[ttype].ch_params = *params; + + chan_params.keep_ipa_awake = params->keep_ipa_awake; + chan_params.evt_ring_params.intf = GSI_EVT_CHTYPE_XDCI_EV; + chan_params.evt_ring_params.intr = GSI_INTR_IRQ; + chan_params.evt_ring_params.re_size = GSI_EVT_RING_RE_SIZE_16B; + chan_params.evt_ring_params.ring_len = params->xfer_ring_len - + chan_params.evt_ring_params.re_size; + chan_params.evt_ring_params.ring_base_addr = + params->xfer_ring_base_addr; + chan_params.evt_ring_params.ring_base_vaddr = NULL; + chan_params.evt_ring_params.int_modt = 0; + chan_params.evt_ring_params.int_modt = 0; + chan_params.evt_ring_params.intvec = 0; + chan_params.evt_ring_params.msi_addr = 0; + chan_params.evt_ring_params.rp_update_addr = 0; + chan_params.evt_ring_params.exclusive = true; + chan_params.evt_ring_params.err_cb = ipa3_usb_gsi_evt_err_cb; + chan_params.evt_ring_params.user_data = NULL; + chan_params.evt_scratch.xdci.gevntcount_low_addr = + params->gevntcount_low_addr; + chan_params.evt_scratch.xdci.gevntcount_hi_addr = + params->gevntcount_hi_addr; + chan_params.chan_params.prot = GSI_CHAN_PROT_XDCI; + chan_params.chan_params.dir = params->dir; + /* chan_id is set in ipa3_request_gsi_channel() */ + chan_params.chan_params.re_size = GSI_CHAN_RE_SIZE_16B; + chan_params.chan_params.ring_len = params->xfer_ring_len; + chan_params.chan_params.ring_base_addr = + params->xfer_ring_base_addr; + chan_params.chan_params.ring_base_vaddr = NULL; + chan_params.chan_params.use_db_eng = GSI_CHAN_DB_MODE; + chan_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG; + if (params->dir == GSI_CHAN_DIR_FROM_GSI) + chan_params.chan_params.low_weight = + IPA_USB_DL_CHAN_LOW_WEIGHT; + else + chan_params.chan_params.low_weight = + IPA_USB_UL_CHAN_LOW_WEIGHT; + chan_params.chan_params.xfer_cb = NULL; + chan_params.chan_params.err_cb = ipa3_usb_gsi_chan_err_cb; + chan_params.chan_params.chan_user_data = NULL; + chan_params.chan_scratch.xdci.last_trb_addr = + params->xfer_scratch.last_trb_addr_iova; + /* xferrscidx will be updated later */ + chan_params.chan_scratch.xdci.xferrscidx = 0; + chan_params.chan_scratch.xdci.const_buffer_size = + params->xfer_scratch.const_buffer_size; + chan_params.chan_scratch.xdci.depcmd_low_addr = + params->xfer_scratch.depcmd_low_addr; + chan_params.chan_scratch.xdci.depcmd_hi_addr = + params->xfer_scratch.depcmd_hi_addr; + chan_params.chan_scratch.xdci.outstanding_threshold = + ((params->teth_prot == IPA_USB_MBIM) ? 1 : 2) * + chan_params.chan_params.re_size; + /* max_outstanding_tre is set in ipa3_request_gsi_channel() */ + result = ipa3_request_gsi_channel(&chan_params, out_params); + if (result) { + IPA_USB_ERR("failed to allocate GSI channel\n"); + ipa3_usb_smmu_map_xdci_channel(params, false); + return result; + } + + IPA_USB_DBG_LOW("exit\n"); + return 0; +} + +static int ipa3_usb_release_xdci_channel(u32 clnt_hdl, + enum ipa3_usb_transport_type ttype) +{ + int result = 0; + + IPA_USB_DBG_LOW("entry\n"); + if (ttype > IPA_USB_TRANSPORT_MAX) { + IPA_USB_ERR("bad parameter\n"); + return -EINVAL; + } + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_RELEASE_CHANNEL, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + return -EPERM; + } + + /* Release channel */ + result = ipa3_release_gsi_channel(clnt_hdl); + if (result) { + IPA_USB_ERR("failed to deallocate channel\n"); + return result; + } + + result = ipa3_usb_smmu_map_xdci_channel( + &ipa3_usb_ctx->ttype_ctx[ttype].ch_params, false); + + /* Change ipa_usb state to INITIALIZED */ + if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype)) + IPA_USB_ERR("failed to change state to initialized\n"); + + IPA_USB_DBG_LOW("exit\n"); + return 0; +} + +static int ipa3_usb_request_prod(enum ipa3_usb_transport_type ttype) +{ + int result; + struct ipa3_usb_rm_context *rm_ctx; + const char *rsrc_str; + + rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name); + + IPA_USB_DBG_LOW("requesting %s\n", rsrc_str); + init_completion(&rm_ctx->prod_comp); + result = ipa_rm_request_resource(rm_ctx->prod_params.name); + if (result) { + if (result != -EINPROGRESS) { + IPA_USB_ERR("failed to request %s: %d\n", + rsrc_str, result); + return result; + } + result = wait_for_completion_timeout(&rm_ctx->prod_comp, + msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC)); + if (result == 0) { + IPA_USB_ERR("timeout request %s\n", rsrc_str); + return -ETIME; + } + } + + IPA_USB_DBG_LOW("%s granted\n", rsrc_str); + return 0; +} + +static int ipa3_usb_release_prod(enum ipa3_usb_transport_type ttype) +{ + int result; + struct ipa3_usb_rm_context *rm_ctx; + const char *rsrc_str; + + rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name); + + IPA_USB_DBG_LOW("releasing %s\n", rsrc_str); + + init_completion(&rm_ctx->prod_comp); + result = ipa_rm_release_resource(rm_ctx->prod_params.name); + if (result) { + if (result != -EINPROGRESS) { + IPA_USB_ERR("failed to release %s: %d\n", + rsrc_str, result); + return result; + } + result = wait_for_completion_timeout(&rm_ctx->prod_comp, + msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC)); + if (result == 0) { + IPA_USB_ERR("timeout release %s\n", rsrc_str); + return -ETIME; + } + } + + IPA_USB_DBG_LOW("%s released\n", rsrc_str); + return 0; +} + +static bool ipa3_usb_check_connect_params( + struct ipa_usb_xdci_connect_params_internal *params) +{ + IPA_USB_DBG_LOW("ul xferrscidx = %d\n", params->usb_to_ipa_xferrscidx); + IPA_USB_DBG_LOW("dl xferrscidx = %d\n", params->ipa_to_usb_xferrscidx); + IPA_USB_DBG_LOW("max_supported_bandwidth_mbps = %d\n", + params->max_supported_bandwidth_mbps); + + if (params->max_pkt_size < IPA_USB_HIGH_SPEED_512B || + params->max_pkt_size > IPA_USB_SUPER_SPEED_1024B || + params->ipa_to_usb_xferrscidx > 127 || + (params->teth_prot != IPA_USB_DIAG && + (params->usb_to_ipa_xferrscidx > 127)) || + params->teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) { + IPA_USB_ERR("Invalid params\n"); + return false; + } + + if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state == + IPA_USB_TETH_PROT_INVALID) { + IPA_USB_ERR("%s is not initialized\n", + ipa3_usb_teth_prot_to_string( + params->teth_prot)); + return false; + } + + return true; +} + +static int ipa3_usb_connect_teth_bridge( + struct teth_bridge_connect_params *params) +{ + int result; + + result = teth_bridge_connect(params); + if (result) { + IPA_USB_ERR("failed to connect teth_bridge (%s)\n", + params->tethering_mode == TETH_TETHERING_MODE_RMNET ? + "rmnet" : "mbim"); + return result; + } + + return 0; +} + +static int ipa3_usb_connect_dpl(void) +{ + int res = 0; + + /* + * Add DPL dependency to RM dependency graph, first add_dependency call + * is sync in order to make sure the IPA clocks are up before we + * continue and notify the USB driver it may continue. + */ + res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res < 0) { + IPA_USB_ERR("ipa_rm_add_dependency_sync() failed.\n"); + return res; + } + + /* + * this add_dependency call can't be sync since it will block until DPL + * status is connected (which can happen only later in the flow), + * the clocks are already up so the call doesn't need to block. + */ + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_USB_DPL_CONS); + if (res < 0 && res != -EINPROGRESS) { + IPA_USB_ERR("ipa_rm_add_dependency() failed.\n"); + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD, + IPA_RM_RESOURCE_Q6_CONS); + return res; + } + + return 0; +} + +static int ipa3_usb_connect_teth_prot(enum ipa_usb_teth_prot teth_prot) +{ + int result; + struct teth_bridge_connect_params teth_bridge_params; + struct ipa3_usb_teth_prot_conn_params *teth_conn_params; + enum ipa3_usb_transport_type ttype; + struct ipa3_usb_teth_prot_context *teth_prot_ptr = + &ipa3_usb_ctx->teth_prot_ctx[teth_prot]; + + IPA_USB_DBG("connecting protocol = %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + teth_conn_params = &(ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params); + + switch (teth_prot) { + case IPA_USB_RNDIS: + if (teth_prot_ptr->state == + IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_DBG("%s is already connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + } + ipa3_usb_ctx->ttype_ctx[ttype].user_data = + teth_prot_ptr->user_data; + result = rndis_ipa_pipe_connect_notify( + teth_conn_params->usb_to_ipa_clnt_hdl, + teth_conn_params->ipa_to_usb_clnt_hdl, + teth_conn_params->params.max_xfer_size_bytes_to_dev, + teth_conn_params->params.max_packet_number_to_dev, + teth_conn_params->params.max_xfer_size_bytes_to_host, + teth_prot_ptr->teth_prot_params.rndis.private); + if (result) { + IPA_USB_ERR("failed to connect %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL; + return result; + } + teth_prot_ptr->state = + IPA_USB_TETH_PROT_CONNECTED; + IPA_USB_DBG("%s is connected.\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + case IPA_USB_ECM: + if (teth_prot_ptr->state == + IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_DBG("%s is already connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + } + ipa3_usb_ctx->ttype_ctx[ttype].user_data = + teth_prot_ptr->user_data; + result = ecm_ipa_connect(teth_conn_params->usb_to_ipa_clnt_hdl, + teth_conn_params->ipa_to_usb_clnt_hdl, + teth_prot_ptr->teth_prot_params.ecm.private); + if (result) { + IPA_USB_ERR("failed to connect %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL; + return result; + } + teth_prot_ptr->state = + IPA_USB_TETH_PROT_CONNECTED; + IPA_USB_DBG("%s is connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + case IPA_USB_RMNET: + case IPA_USB_MBIM: + if (teth_prot_ptr->state == + IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_DBG("%s is already connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + } + result = ipa3_usb_init_teth_bridge(); + if (result) + return result; + + ipa3_usb_ctx->ttype_ctx[ttype].user_data = + teth_prot_ptr->user_data; + teth_bridge_params.ipa_usb_pipe_hdl = + teth_conn_params->ipa_to_usb_clnt_hdl; + teth_bridge_params.usb_ipa_pipe_hdl = + teth_conn_params->usb_to_ipa_clnt_hdl; + teth_bridge_params.tethering_mode = + (teth_prot == IPA_USB_RMNET) ? + (TETH_TETHERING_MODE_RMNET):(TETH_TETHERING_MODE_MBIM); + teth_bridge_params.client_type = IPA_CLIENT_USB_PROD; + result = ipa3_usb_connect_teth_bridge(&teth_bridge_params); + if (result) { + ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL; + return result; + } + ipa3_usb_ctx->teth_prot_ctx[teth_prot].state = + IPA_USB_TETH_PROT_CONNECTED; + ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY); + IPA_USB_DBG("%s (%s) is connected\n", + ipa3_usb_teth_prot_to_string(teth_prot), + ipa3_usb_teth_bridge_prot_to_string(teth_prot)); + break; + case IPA_USB_DIAG: + if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state == + IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_DBG("%s is already connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + } + + ipa3_usb_ctx->ttype_ctx[ttype].user_data = + ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data; + result = ipa3_usb_connect_dpl(); + if (result) { + IPA_USB_ERR("Failed connecting DPL result=%d\n", + result); + ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL; + return result; + } + ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state = + IPA_USB_TETH_PROT_CONNECTED; + ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY); + IPA_USB_DBG("%s is connected.\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + default: + IPA_USB_ERR("Invalid tethering protocol\n"); + return -EFAULT; + } + + return 0; +} + +static int ipa3_usb_disconnect_teth_bridge(void) +{ + int result; + + result = teth_bridge_disconnect(IPA_CLIENT_USB_PROD); + if (result) { + IPA_USB_ERR("failed to disconnect teth_bridge\n"); + return result; + } + + return 0; +} + +static int ipa3_usb_disconnect_dpl(void) +{ + int res; + + /* Remove DPL RM dependency */ + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res) + IPA_USB_ERR("deleting DPL_DUMMY_PROD rsrc dependency fail\n"); + + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_USB_DPL_CONS); + if (res) + IPA_USB_ERR("deleting DPL_CONS rsrc dependencty fail\n"); + + return 0; +} + +static int ipa3_usb_disconnect_teth_prot(enum ipa_usb_teth_prot teth_prot) +{ + int result = 0; + enum ipa3_usb_transport_type ttype; + struct ipa3_usb_teth_prot_context *teth_prot_ptr = + &ipa3_usb_ctx->teth_prot_ctx[teth_prot]; + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + switch (teth_prot) { + case IPA_USB_RNDIS: + case IPA_USB_ECM: + if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state != + IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_DBG("%s is not connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + return -EPERM; + } + if (teth_prot == IPA_USB_RNDIS) { + result = rndis_ipa_pipe_disconnect_notify( + teth_prot_ptr->teth_prot_params.rndis.private); + } else { + result = ecm_ipa_disconnect( + teth_prot_ptr->teth_prot_params.ecm.private); + } + if (result) { + IPA_USB_ERR("failed to disconnect %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + } + teth_prot_ptr->state = IPA_USB_TETH_PROT_INITIALIZED; + IPA_USB_DBG("disconnected %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + case IPA_USB_RMNET: + case IPA_USB_MBIM: + if (teth_prot_ptr->state != IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_DBG("%s (%s) is not connected\n", + ipa3_usb_teth_prot_to_string(teth_prot), + ipa3_usb_teth_bridge_prot_to_string(teth_prot)); + return -EPERM; + } + result = ipa3_usb_disconnect_teth_bridge(); + if (result) + break; + + teth_prot_ptr->state = IPA_USB_TETH_PROT_INITIALIZED; + IPA_USB_DBG("disconnected %s (%s)\n", + ipa3_usb_teth_prot_to_string(teth_prot), + ipa3_usb_teth_bridge_prot_to_string(teth_prot)); + break; + case IPA_USB_DIAG: + if (teth_prot_ptr->state != IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_DBG("%s is not connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + return -EPERM; + } + result = ipa3_usb_disconnect_dpl(); + if (result) + break; + teth_prot_ptr->state = IPA_USB_TETH_PROT_INITIALIZED; + IPA_USB_DBG("disconnected %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + default: + break; + } + + ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL; + return result; +} + +static int ipa3_usb_xdci_connect_internal( + struct ipa_usb_xdci_connect_params_internal *params) +{ + int result = -EFAULT; + struct ipa_rm_perf_profile profile; + enum ipa3_usb_transport_type ttype; + struct ipa3_usb_teth_prot_conn_params *teth_prot_ptr; + struct ipa3_usb_rm_context *rm_ctx_ptr; + + IPA_USB_DBG_LOW("entry\n"); + if (params == NULL || !ipa3_usb_check_connect_params(params)) { + IPA_USB_ERR("bad parameters\n"); + return -EINVAL; + } + + ttype = (params->teth_prot == IPA_USB_DIAG) ? IPA_USB_TRANSPORT_DPL : + IPA_USB_TRANSPORT_TETH; + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_CONNECT, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + return -EPERM; + } + + teth_prot_ptr = &ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params; + teth_prot_ptr->ipa_to_usb_clnt_hdl = params->ipa_to_usb_clnt_hdl; + rm_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx; + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) + teth_prot_ptr->usb_to_ipa_clnt_hdl = + params->usb_to_ipa_clnt_hdl; + teth_prot_ptr->params = params->teth_prot_params; + + /* Set EE xDCI specific scratch */ + result = ipa3_set_usb_max_packet_size(params->max_pkt_size); + if (result) { + IPA_USB_ERR("failed setting xDCI EE scratch field\n"); + return result; + } + + /* Set RM PROD & CONS perf profile */ + profile.max_supported_bandwidth_mbps = + params->max_supported_bandwidth_mbps; + result = ipa_rm_set_perf_profile( + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name, + &profile); + if (result) { + IPA_USB_ERR("failed to set %s perf profile\n", + ipa_rm_resource_str(rm_ctx_ptr->prod_params.name)); + return result; + } + result = ipa_rm_set_perf_profile( + rm_ctx_ptr->cons_params.name, &profile); + if (result) { + IPA_USB_ERR("failed to set %s perf profile\n", + ipa_rm_resource_str(rm_ctx_ptr->cons_params.name)); + return result; + } + + /* Request PROD */ + result = ipa3_usb_request_prod(ttype); + if (result) + return result; + + if (params->teth_prot != IPA_USB_DIAG) { + /* Start UL channel */ + result = ipa3_xdci_start(params->usb_to_ipa_clnt_hdl, + params->usb_to_ipa_xferrscidx, + params->usb_to_ipa_xferrscidx_valid); + if (result) { + IPA_USB_ERR("failed to connect UL channel\n"); + goto connect_ul_fail; + } + } + + /* Start DL/DPL channel */ + result = ipa3_xdci_start(params->ipa_to_usb_clnt_hdl, + params->ipa_to_usb_xferrscidx, + params->ipa_to_usb_xferrscidx_valid); + if (result) { + IPA_USB_ERR("failed to connect DL/DPL channel.\n"); + goto connect_dl_fail; + } + + /* Connect tethering protocol */ + result = ipa3_usb_connect_teth_prot(params->teth_prot); + if (result) { + IPA_USB_ERR("failed to connect teth protocol\n"); + goto connect_teth_prot_fail; + } + + if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) { + IPA_USB_ERR( + "failed to change state to connected\n"); + goto state_change_connected_fail; + } + + IPA_USB_DBG_LOW("exit\n"); + return 0; + +state_change_connected_fail: + ipa3_usb_disconnect_teth_prot(params->teth_prot); +connect_teth_prot_fail: + ipa3_xdci_disconnect(params->ipa_to_usb_clnt_hdl, false, -1); + ipa3_reset_gsi_channel(params->ipa_to_usb_clnt_hdl); + ipa3_reset_gsi_event_ring(params->ipa_to_usb_clnt_hdl); +connect_dl_fail: + if (params->teth_prot != IPA_USB_DIAG) { + ipa3_xdci_disconnect(params->usb_to_ipa_clnt_hdl, false, -1); + ipa3_reset_gsi_channel(params->usb_to_ipa_clnt_hdl); + ipa3_reset_gsi_event_ring(params->usb_to_ipa_clnt_hdl); + } +connect_ul_fail: + ipa3_usb_release_prod(ttype); + return result; +} + +#ifdef CONFIG_DEBUG_FS +static char dbg_buff[IPA_USB_MAX_MSG_LEN]; + +static char *ipa3_usb_cons_state_to_string(enum ipa3_usb_cons_state state) +{ + switch (state) { + case IPA_USB_CONS_GRANTED: + return "CONS_GRANTED"; + case IPA_USB_CONS_RELEASED: + return "CONS_RELEASED"; + } + + return "UNSUPPORTED"; +} + +static int ipa3_usb_get_status_dbg_info(struct ipa3_usb_status_dbg_info *status) +{ + int res; + int i; + unsigned long flags; + struct ipa3_usb_rm_context *rm_ctx_ptr; + + IPA_USB_DBG_LOW("entry\n"); + + if (ipa3_usb_ctx == NULL) { + IPA_USB_ERR("IPA USB was not inited yet\n"); + return -EFAULT; + } + + mutex_lock(&ipa3_usb_ctx->general_mutex); + + if (!status) { + IPA_USB_ERR("Invalid input\n"); + res = -EINVAL; + goto bail; + } + + memset(status, 0, sizeof(struct ipa3_usb_status_dbg_info)); + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + rm_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].rm_ctx; + status->teth_state = ipa3_usb_state_to_string( + ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].state); + status->dpl_state = ipa3_usb_state_to_string( + ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].state); + if (rm_ctx_ptr->cons_valid) + status->teth_cons_state = ipa3_usb_cons_state_to_string( + rm_ctx_ptr->cons_state); + rm_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].rm_ctx; + if (rm_ctx_ptr->cons_valid) + status->dpl_cons_state = ipa3_usb_cons_state_to_string( + rm_ctx_ptr->cons_state); + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + + for (i = 0 ; i < IPA_USB_MAX_TETH_PROT_SIZE ; i++) { + if (ipa3_usb_ctx->teth_prot_ctx[i].state == + IPA_USB_TETH_PROT_INITIALIZED) { + if ((i == IPA_USB_RMNET) || (i == IPA_USB_MBIM)) + status->inited_prots[status->num_init_prot++] = + ipa3_usb_teth_bridge_prot_to_string(i); + else + status->inited_prots[status->num_init_prot++] = + ipa3_usb_teth_prot_to_string(i); + } else if (ipa3_usb_ctx->teth_prot_ctx[i].state == + IPA_USB_TETH_PROT_CONNECTED) { + switch (i) { + case IPA_USB_RMNET: + case IPA_USB_MBIM: + status->teth_connected_prot = + ipa3_usb_teth_bridge_prot_to_string(i); + break; + case IPA_USB_DIAG: + status->dpl_connected_prot = + ipa3_usb_teth_prot_to_string(i); + break; + default: + status->teth_connected_prot = + ipa3_usb_teth_prot_to_string(i); + } + } + } + + res = 0; + IPA_USB_DBG_LOW("exit\n"); +bail: + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return res; +} + +static ssize_t ipa3_read_usb_state_info(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ipa3_usb_status_dbg_info status; + int result; + int nbytes; + int cnt = 0; + int i; + + result = ipa3_usb_get_status_dbg_info(&status); + if (result) { + nbytes = scnprintf(dbg_buff, IPA_USB_MAX_MSG_LEN, + "Fail to read IPA USB status\n"); + cnt += nbytes; + } else { + nbytes = scnprintf(dbg_buff, IPA_USB_MAX_MSG_LEN, + "Tethering Data State: %s\n" + "DPL State: %s\n" + "Protocols in Initialized State: ", + status.teth_state, + status.dpl_state); + cnt += nbytes; + + for (i = 0 ; i < status.num_init_prot ; i++) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_USB_MAX_MSG_LEN - cnt, + "%s ", status.inited_prots[i]); + cnt += nbytes; + } + nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt, + status.num_init_prot ? "\n" : "None\n"); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt, + "Protocols in Connected State: "); + cnt += nbytes; + if (status.teth_connected_prot) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_USB_MAX_MSG_LEN - cnt, + "%s ", status.teth_connected_prot); + cnt += nbytes; + } + if (status.dpl_connected_prot) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_USB_MAX_MSG_LEN - cnt, + "%s ", status.dpl_connected_prot); + cnt += nbytes; + } + nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt, + (status.teth_connected_prot || + status.dpl_connected_prot) ? "\n" : "None\n"); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt, + "USB Tethering Consumer State: %s\n", + status.teth_cons_state ? + status.teth_cons_state : "Invalid"); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt, + "DPL Consumer State: %s\n", + status.dpl_cons_state ? status.dpl_cons_state : + "Invalid"); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +const struct file_operations ipa3_ipa_usb_ops = { + .read = ipa3_read_usb_state_info, +}; + +static void ipa_usb_debugfs_init(void) +{ + const mode_t read_only_mode = 0444; + + ipa3_usb_ctx->dent = debugfs_create_dir("ipa_usb", 0); + if (IS_ERR(ipa3_usb_ctx->dent)) { + pr_err("fail to create folder in debug_fs\n"); + return; + } + + ipa3_usb_ctx->dfile_state_info = debugfs_create_file("state_info", + read_only_mode, ipa3_usb_ctx->dent, 0, + &ipa3_ipa_usb_ops); + if (!ipa3_usb_ctx->dfile_state_info || + IS_ERR(ipa3_usb_ctx->dfile_state_info)) { + pr_err("failed to create file for state_info\n"); + goto fail; + } + + return; + +fail: + debugfs_remove_recursive(ipa3_usb_ctx->dent); + ipa3_usb_ctx->dent = NULL; +} + +static void ipa_usb_debugfs_remove(void) +{ + if (IS_ERR(ipa3_usb_ctx->dent)) { + IPA_USB_ERR("ipa_usb debugfs folder was not created\n"); + return; + } + + debugfs_remove_recursive(ipa3_usb_ctx->dent); +} +#else /* CONFIG_DEBUG_FS */ +static void ipa_usb_debugfs_init(void){} +static void ipa_usb_debugfs_remove(void){} +#endif /* CONFIG_DEBUG_FS */ + + + +int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params, + struct ipa_usb_xdci_chan_params *dl_chan_params, + struct ipa_req_chan_out_params *ul_out_params, + struct ipa_req_chan_out_params *dl_out_params, + struct ipa_usb_xdci_connect_params *connect_params) +{ + int result = -EFAULT; + struct ipa_usb_xdci_connect_params_internal conn_params; + + mutex_lock(&ipa3_usb_ctx->general_mutex); + IPA_USB_DBG_LOW("entry\n"); + if (connect_params == NULL || dl_chan_params == NULL || + dl_out_params == NULL || + (connect_params->teth_prot != IPA_USB_DIAG && + (ul_chan_params == NULL || ul_out_params == NULL))) { + IPA_USB_ERR("bad parameters\n"); + result = -EINVAL; + goto bad_params; + } + + if (connect_params->teth_prot != IPA_USB_DIAG) { + result = ipa3_usb_request_xdci_channel(ul_chan_params, + ul_out_params); + if (result) { + IPA_USB_ERR("failed to allocate UL channel\n"); + goto bad_params; + } + } + + result = ipa3_usb_request_xdci_channel(dl_chan_params, dl_out_params); + if (result) { + IPA_USB_ERR("failed to allocate DL/DPL channel\n"); + goto alloc_dl_chan_fail; + } + + memset(&conn_params, 0, + sizeof(struct ipa_usb_xdci_connect_params_internal)); + conn_params.max_pkt_size = connect_params->max_pkt_size; + conn_params.ipa_to_usb_clnt_hdl = dl_out_params->clnt_hdl; + conn_params.ipa_to_usb_xferrscidx = + connect_params->ipa_to_usb_xferrscidx; + conn_params.ipa_to_usb_xferrscidx_valid = + connect_params->ipa_to_usb_xferrscidx_valid; + if (connect_params->teth_prot != IPA_USB_DIAG) { + conn_params.usb_to_ipa_clnt_hdl = ul_out_params->clnt_hdl; + conn_params.usb_to_ipa_xferrscidx = + connect_params->usb_to_ipa_xferrscidx; + conn_params.usb_to_ipa_xferrscidx_valid = + connect_params->usb_to_ipa_xferrscidx_valid; + } + conn_params.teth_prot = connect_params->teth_prot; + conn_params.teth_prot_params = connect_params->teth_prot_params; + conn_params.max_supported_bandwidth_mbps = + connect_params->max_supported_bandwidth_mbps; + result = ipa3_usb_xdci_connect_internal(&conn_params); + if (result) { + IPA_USB_ERR("failed to connect\n"); + goto connect_fail; + } + + IPA_USB_DBG_LOW("exit\n"); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return 0; + +connect_fail: + ipa3_usb_release_xdci_channel(dl_out_params->clnt_hdl, + IPA3_USB_GET_TTYPE(dl_chan_params->teth_prot)); +alloc_dl_chan_fail: + if (connect_params->teth_prot != IPA_USB_DIAG) + ipa3_usb_release_xdci_channel(ul_out_params->clnt_hdl, + IPA3_USB_GET_TTYPE(ul_chan_params->teth_prot)); +bad_params: + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; +} +EXPORT_SYMBOL(ipa_usb_xdci_connect); + +static int ipa3_usb_check_disconnect_prot(enum ipa_usb_teth_prot teth_prot) +{ + if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) { + IPA_USB_ERR("bad parameter\n"); + return -EFAULT; + } + + if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state != + IPA_USB_TETH_PROT_CONNECTED) { + IPA_USB_ERR("%s is not connected\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + return -EFAULT; + } + + return 0; +} + +/* Assumes lock already acquired */ +static int ipa_usb_xdci_dismiss_channels(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + int result = 0; + enum ipa3_usb_transport_type ttype; + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + IPA_USB_DBG_LOW("entry\n"); + + /* Reset DL channel */ + result = ipa3_reset_gsi_channel(dl_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to reset DL channel\n"); + return result; + } + + /* Reset DL event ring */ + result = ipa3_reset_gsi_event_ring(dl_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to reset DL event ring.\n"); + return result; + } + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + /* Reset UL channel */ + result = ipa3_reset_gsi_channel(ul_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to reset UL channel\n"); + return result; + } + + /* Reset UL event ring */ + result = ipa3_reset_gsi_event_ring(ul_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to reset UL event ring\n"); + return result; + } + } + + /* Change state to STOPPED */ + if (!ipa3_usb_set_state(IPA_USB_STOPPED, false, ttype)) + IPA_USB_ERR("failed to change state to stopped\n"); + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + result = ipa3_usb_release_xdci_channel(ul_clnt_hdl, ttype); + if (result) { + IPA_USB_ERR("failed to release UL channel\n"); + return result; + } + } + + result = ipa3_usb_release_xdci_channel(dl_clnt_hdl, ttype); + if (result) { + IPA_USB_ERR("failed to release DL channel\n"); + return result; + } + + IPA_USB_DBG_LOW("exit\n"); + + return 0; +} + +int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + int result = 0; + struct ipa_ep_cfg_holb holb_cfg; + unsigned long flags; + enum ipa3_usb_state orig_state; + enum ipa3_usb_transport_type ttype; + + mutex_lock(&ipa3_usb_ctx->general_mutex); + IPA_USB_DBG_LOW("entry\n"); + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_DISCONNECT, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + result = -EPERM; + goto bad_params; + } + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + if (ipa3_usb_ctx->ttype_ctx[ttype].state == + IPA_USB_SUSPENDED_NO_RWAKEUP) { + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + result = ipa_usb_xdci_dismiss_channels(ul_clnt_hdl, dl_clnt_hdl, + teth_prot); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; + } + + if (ipa3_usb_check_disconnect_prot(teth_prot)) { + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + result = -EINVAL; + goto bad_params; + } + + if (ipa3_usb_ctx->ttype_ctx[ttype].state != IPA_USB_SUSPENDED) { + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + /* Stop DL/DPL channel */ + result = ipa3_xdci_disconnect(dl_clnt_hdl, false, -1); + if (result) { + IPA_USB_ERR("failed to disconnect DL/DPL channel\n"); + goto bad_params; + } + } else { + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_EN; + holb_cfg.tmr_val = 0; + ipa3_cfg_ep_holb(dl_clnt_hdl, &holb_cfg); + } + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + orig_state = ipa3_usb_ctx->ttype_ctx[ttype].state; + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + if (orig_state != IPA_USB_SUSPENDED) { + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, + flags); + /* Stop UL channel */ + result = ipa3_xdci_disconnect(ul_clnt_hdl, + true, + ipa3_usb_ctx->qmi_req_id); + if (result) { + IPA_USB_ERR("failed disconnect UL channel\n"); + goto bad_params; + } + ipa3_usb_ctx->qmi_req_id++; + } else + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, + flags); + } else + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + + result = ipa_usb_xdci_dismiss_channels(ul_clnt_hdl, dl_clnt_hdl, + teth_prot); + if (result) + goto bad_params; + + /* Disconnect tethering protocol */ + result = ipa3_usb_disconnect_teth_prot(teth_prot); + if (result) + goto bad_params; + + if (orig_state != IPA_USB_SUSPENDED) { + result = ipa3_usb_release_prod(ttype); + if (result) { + IPA_USB_ERR("failed to release PROD\n"); + goto bad_params; + } + } + + IPA_USB_DBG_LOW("exit\n"); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return 0; + +bad_params: + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; + +} +EXPORT_SYMBOL(ipa_usb_xdci_disconnect); + +int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot) +{ + int result = -EFAULT; + enum ipa3_usb_transport_type ttype; + struct ipa3_usb_teth_prot_context *teth_prot_ptr; + + mutex_lock(&ipa3_usb_ctx->general_mutex); + IPA_USB_DBG_LOW("entry\n"); + if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) { + IPA_USB_ERR("bad parameters\n"); + result = -EINVAL; + goto bad_params; + } + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_DEINIT_TETH_PROT, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + result = -EPERM; + goto bad_params; + } + + /* Clean-up tethering protocol */ + teth_prot_ptr = &ipa3_usb_ctx->teth_prot_ctx[teth_prot]; + + switch (teth_prot) { + case IPA_USB_RNDIS: + case IPA_USB_ECM: + if (teth_prot_ptr->state != + IPA_USB_TETH_PROT_INITIALIZED) { + IPA_USB_ERR("%s is not initialized\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + result = -EINVAL; + goto bad_params; + } + if (teth_prot == IPA_USB_RNDIS) + rndis_ipa_cleanup( + teth_prot_ptr->teth_prot_params.rndis.private); + else + ecm_ipa_cleanup( + teth_prot_ptr->teth_prot_params.ecm.private); + teth_prot_ptr->user_data = NULL; + teth_prot_ptr->state = IPA_USB_TETH_PROT_INVALID; + ipa3_usb_ctx->num_init_prot--; + IPA_USB_DBG("deinitialized %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + case IPA_USB_RMNET: + case IPA_USB_MBIM: + if (teth_prot_ptr->state != + IPA_USB_TETH_PROT_INITIALIZED) { + IPA_USB_ERR("%s (%s) is not initialized\n", + ipa3_usb_teth_prot_to_string(teth_prot), + ipa3_usb_teth_bridge_prot_to_string(teth_prot)); + result = -EINVAL; + goto bad_params; + } + + teth_prot_ptr->user_data = NULL; + teth_prot_ptr->state = IPA_USB_TETH_PROT_INVALID; + ipa3_usb_ctx->num_init_prot--; + IPA_USB_DBG("deinitialized %s (%s)\n", + ipa3_usb_teth_prot_to_string(teth_prot), + ipa3_usb_teth_bridge_prot_to_string(teth_prot)); + break; + case IPA_USB_DIAG: + if (teth_prot_ptr->state != + IPA_USB_TETH_PROT_INITIALIZED) { + IPA_USB_ERR("%s is not initialized\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + result = -EINVAL; + goto bad_params; + } + teth_prot_ptr->user_data = NULL; + teth_prot_ptr->state = IPA_USB_TETH_PROT_INVALID; + IPA_USB_DBG("deinitialized %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + break; + default: + IPA_USB_ERR("unexpected tethering protocol\n"); + result = -EINVAL; + goto bad_params; + } + + if (IPA3_USB_IS_TTYPE_DPL(ttype) || + (ipa3_usb_ctx->num_init_prot == 0)) { + if (!ipa3_usb_set_state(IPA_USB_INVALID, false, ttype)) + IPA_USB_ERR("failed to change state to invalid\n"); + ipa_rm_delete_resource( + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name); + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid = false; + ipa_rm_delete_resource( + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name); + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid = false; + ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL; + } + + IPA_USB_DBG_LOW("exit\n"); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return 0; + +bad_params: + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; +} +EXPORT_SYMBOL(ipa_usb_deinit_teth_prot); + +/* Assumes lock already acquired */ +static int ipa3_usb_suspend_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + int result = 0; + enum ipa3_usb_transport_type ttype; + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_SUSPEND_NO_RWAKEUP, ttype)) { + IPA_USB_ERR("Illegal operation.\n"); + result = -EPERM; + goto fail_exit; + } + + IPA_USB_DBG("Start suspend with no remote wakeup sequence: %s\n", + IPA3_USB_IS_TTYPE_DPL(ttype) ? + "DPL channel":"Data Tethering channels"); + + if (ipa3_usb_check_disconnect_prot(teth_prot)) { + result = -EINVAL; + goto fail_exit; + } + + /* Stop DL/DPL channel */ + result = ipa3_xdci_disconnect(dl_clnt_hdl, false, -1); + if (result) { + IPA_USB_ERR("failed to disconnect DL/DPL channel\n"); + goto fail_exit; + } + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + /* Stop UL channel */ + result = ipa3_xdci_disconnect(ul_clnt_hdl, true, + ipa3_usb_ctx->qmi_req_id); + if (result) { + IPA_USB_ERR("failed disconnect UL channel\n"); + goto start_dl; + } + ipa3_usb_ctx->qmi_req_id++; + } + + /* Disconnect tethering protocol */ + result = ipa3_usb_disconnect_teth_prot(teth_prot); + if (result) + goto start_ul; + + result = ipa3_usb_release_prod(ttype); + if (result) { + IPA_USB_ERR("failed to release PROD\n"); + goto connect_teth; + } + + /* Change ipa_usb state to SUSPENDED_NO_RWAKEUP */ + if (!ipa3_usb_set_state(IPA_USB_SUSPENDED_NO_RWAKEUP, false, ttype)) + IPA_USB_ERR("failed to change state to suspend no rwakeup\n"); + + IPA_USB_DBG_LOW("exit\n"); + return 0; + +connect_teth: + (void)ipa3_usb_connect_teth_prot(teth_prot); +start_ul: + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) + (void)ipa3_xdci_connect(ul_clnt_hdl); +start_dl: + (void)ipa3_xdci_connect(dl_clnt_hdl); +fail_exit: + return result; +} + +int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot, bool with_remote_wakeup) +{ + int result = 0; + unsigned long flags; + enum ipa3_usb_transport_type ttype; + + mutex_lock(&ipa3_usb_ctx->general_mutex); + IPA_USB_DBG_LOW("entry\n"); + + if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) { + IPA_USB_ERR("bad parameters\n"); + result = -EINVAL; + goto bad_params; + } + + if (!with_remote_wakeup) { + result = ipa3_usb_suspend_no_remote_wakeup(ul_clnt_hdl, + dl_clnt_hdl, teth_prot); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; + } + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_SUSPEND, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + result = -EPERM; + goto bad_params; + } + + IPA_USB_DBG("Start suspend sequence: %s\n", + IPA3_USB_IS_TTYPE_DPL(ttype) ? + "DPL channel":"Data Tethering channels"); + + /* Change state to SUSPEND_REQUESTED */ + if (!ipa3_usb_set_state(IPA_USB_SUSPEND_REQUESTED, false, ttype)) { + IPA_USB_ERR( + "fail changing state to suspend_req\n"); + result = -EFAULT; + goto bad_params; + } + + /* Stop UL channel & suspend DL/DPL EP */ + result = ipa3_xdci_suspend(ul_clnt_hdl, dl_clnt_hdl, + true, + ipa3_usb_ctx->qmi_req_id, IPA3_USB_IS_TTYPE_DPL(ttype)); + if (result) { + IPA_USB_ERR("failed to suspend\n"); + goto suspend_fail; + } + ipa3_usb_ctx->qmi_req_id++; + + result = ipa3_usb_release_prod(ttype); + if (result) { + IPA_USB_ERR("failed to release PROD\n"); + goto release_prod_fail; + } + + /* Check if DL/DPL data pending */ + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state == + IPA_USB_CONS_GRANTED && + ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) { + + IPA_USB_DBG("DL/DPL data pending, invoke remote wakeup\n"); + queue_work(ipa3_usb_ctx->wq, + IPA3_USB_IS_TTYPE_DPL(ttype) ? + &ipa3_usb_dpl_notify_remote_wakeup_work : + &ipa3_usb_notify_remote_wakeup_work); + } + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + + /* Change state to SUSPENDED */ + if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false, ttype)) + IPA_USB_ERR("failed to change state to suspended\n"); + + /* Check if DL/DPL data pending */ + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) { + IPA_USB_DBG_LOW( + "DL/DPL data is pending, invoking remote wakeup\n"); + queue_work(ipa3_usb_ctx->wq, IPA3_USB_IS_TTYPE_DPL(ttype) ? + &ipa3_usb_dpl_notify_remote_wakeup_work : + &ipa3_usb_notify_remote_wakeup_work); + } + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + + IPA_USB_DBG_LOW("exit\n"); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return 0; + +release_prod_fail: + ipa3_xdci_resume(ul_clnt_hdl, dl_clnt_hdl, + IPA3_USB_IS_TTYPE_DPL(ttype)); +suspend_fail: + /* Change state back to CONNECTED */ + if (!ipa3_usb_set_state(IPA_USB_CONNECTED, true, ttype)) + IPA_USB_ERR("failed to change state back to connected\n"); +bad_params: + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; +} +EXPORT_SYMBOL(ipa_usb_xdci_suspend); + +/* Assumes lock already acquired */ +static int ipa3_usb_resume_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + int result = -EFAULT; + enum ipa3_usb_transport_type ttype; + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + IPA_USB_DBG("Start resume with no remote wakeup sequence: %s\n", + IPA3_USB_IS_TTYPE_DPL(ttype) ? + "DPL channel":"Data Tethering channels"); + + /* Request USB_PROD */ + result = ipa3_usb_request_prod(ttype); + if (result) + goto fail_exit; + + /* Connect tethering protocol */ + result = ipa3_usb_connect_teth_prot(teth_prot); + if (result) { + IPA_USB_ERR("failed to connect teth protocol\n"); + goto release_prod; + } + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + /* Start UL channel */ + result = ipa3_xdci_connect(ul_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to start UL channel\n"); + goto disconn_teth; + } + } + + /* Start DL/DPL channel */ + result = ipa3_xdci_connect(dl_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to start DL/DPL channel\n"); + goto stop_ul; + } + + /* Change state to CONNECTED */ + if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) { + IPA_USB_ERR("failed to change state to connected\n"); + result = -EFAULT; + goto stop_dl; + } + + return 0; + +stop_dl: + (void)ipa3_xdci_disconnect(dl_clnt_hdl, false, -1); +stop_ul: + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + (void)ipa3_xdci_disconnect(ul_clnt_hdl, true, + ipa3_usb_ctx->qmi_req_id); + ipa3_usb_ctx->qmi_req_id++; + } +disconn_teth: + (void)ipa3_usb_disconnect_teth_prot(teth_prot); +release_prod: + (void)ipa3_usb_release_prod(ttype); +fail_exit: + return result; +} + +int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + int result = -EFAULT; + enum ipa3_usb_state prev_state; + unsigned long flags; + enum ipa3_usb_transport_type ttype; + + mutex_lock(&ipa3_usb_ctx->general_mutex); + IPA_USB_DBG_LOW("entry\n"); + + if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) { + IPA_USB_ERR("bad parameters\n"); + result = -EINVAL; + goto bad_params; + } + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_RESUME, ttype)) { + IPA_USB_ERR("Illegal operation\n"); + result = -EPERM; + goto bad_params; + } + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + prev_state = ipa3_usb_ctx->ttype_ctx[ttype].state; + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + if (prev_state == IPA_USB_SUSPENDED_NO_RWAKEUP) { + result = ipa3_usb_resume_no_remote_wakeup(ul_clnt_hdl, + dl_clnt_hdl, teth_prot); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; + } + + IPA_USB_DBG("Start resume sequence: %s\n", + IPA3_USB_IS_TTYPE_DPL(ttype) ? + "DPL channel" : "Data Tethering channels"); + + /* Change state to RESUME_IN_PROGRESS */ + if (!ipa3_usb_set_state(IPA_USB_RESUME_IN_PROGRESS, false, ttype)) { + IPA_USB_ERR("failed to change state to resume_in_progress\n"); + result = -EFAULT; + goto bad_params; + } + + /* Request USB_PROD */ + result = ipa3_usb_request_prod(ttype); + if (result) + goto prod_req_fail; + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + /* Start UL channel */ + result = ipa3_start_gsi_channel(ul_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to start UL channel\n"); + goto start_ul_fail; + } + } + + /* Start DL/DPL channel */ + result = ipa3_start_gsi_channel(dl_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to start DL/DPL channel\n"); + goto start_dl_fail; + } + + /* Change state to CONNECTED */ + if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) { + IPA_USB_ERR("failed to change state to connected\n"); + result = -EFAULT; + goto state_change_connected_fail; + } + + IPA_USB_DBG_LOW("exit\n"); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return 0; + +state_change_connected_fail: + result = ipa3_stop_gsi_channel(dl_clnt_hdl); + if (result) + IPA_USB_ERR("Error stopping DL/DPL channel: %d\n", + result); +start_dl_fail: + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + result = ipa3_stop_gsi_channel(ul_clnt_hdl); + if (result) + IPA_USB_ERR("Error stopping UL channel: %d\n", result); + } +start_ul_fail: + ipa3_usb_release_prod(ttype); +prod_req_fail: + /* Change state back to prev_state */ + if (!ipa3_usb_set_state(prev_state, true, ttype)) + IPA_USB_ERR("failed to change state back to %s\n", + ipa3_usb_state_to_string(prev_state)); +bad_params: + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; +} +EXPORT_SYMBOL(ipa_usb_xdci_resume); + +static int __init ipa3_usb_init(void) +{ + int i; + unsigned long flags; + int res; + + pr_debug("entry\n"); + ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL); + if (ipa3_usb_ctx == NULL) { + pr_err(":ipa_usb init failed\n"); + return -EFAULT; + } + memset(ipa3_usb_ctx, 0, sizeof(struct ipa3_usb_context)); + + for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++) + ipa3_usb_ctx->teth_prot_ctx[i].state = + IPA_USB_TETH_PROT_INVALID; + ipa3_usb_ctx->num_init_prot = 0; + init_completion(&ipa3_usb_ctx->dev_ready_comp); + ipa3_usb_ctx->qmi_req_id = 0; + spin_lock_init(&ipa3_usb_ctx->state_lock); + ipa3_usb_ctx->dl_data_pending = false; + mutex_init(&ipa3_usb_ctx->general_mutex); + + for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) { + ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_valid = false; + ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_valid = false; + init_completion(&ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_comp); + ipa3_usb_ctx->ttype_ctx[i].user_data = NULL; + } + + spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) { + ipa3_usb_ctx->ttype_ctx[i].state = IPA_USB_INVALID; + ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_state = + IPA_USB_CONS_RELEASED; + } + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + + ipa3_usb_ctx->wq = create_singlethread_workqueue("ipa_usb_wq"); + if (!ipa3_usb_ctx->wq) { + pr_err("failed to create workqueue\n"); + res = -EFAULT; + goto ipa_usb_workqueue_fail; + } + + ipa_usb_debugfs_init(); + + pr_info("exit: IPA_USB init success!\n"); + + return 0; + +ipa_usb_workqueue_fail: + pr_err("init failed (%d)\n", -res); + kfree(ipa3_usb_ctx); + return res; +} + +static void ipa3_usb_exit(void) +{ + IPA_USB_DBG_LOW("IPA_USB exit\n"); + ipa_usb_debugfs_remove(); + kfree(ipa3_usb_ctx); +} + +arch_initcall(ipa3_usb_init); +module_exit(ipa3_usb_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA USB client driver"); diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c new file mode 100644 index 000000000000..e6e044efd37e --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c @@ -0,0 +1,1538 @@ +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ipa_common_i.h" + +#define ODU_BRIDGE_DRV_NAME "odu_ipa_bridge" + +#define ODU_BRIDGE_DBG(fmt, args...) \ + do { \ + pr_debug(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) +#define ODU_BRIDGE_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) +#define ODU_BRIDGE_ERR(fmt, args...) \ + do { \ + pr_err(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define ODU_BRIDGE_FUNC_ENTRY() \ + ODU_BRIDGE_DBG_LOW("ENTRY\n") +#define ODU_BRIDGE_FUNC_EXIT() \ + ODU_BRIDGE_DBG_LOW("EXIT\n") + + +#define ODU_BRIDGE_IS_QMI_ADDR(daddr) \ + (memcmp(&(daddr), &odu_bridge_ctx->llv6_addr, sizeof((daddr))) \ + == 0) + +#define ODU_BRIDGE_IPV4_HDR_NAME "odu_br_ipv4" +#define ODU_BRIDGE_IPV6_HDR_NAME "odu_br_ipv6" + +#define IPA_ODU_SYS_DESC_FIFO_SZ 0x800 + +#ifdef CONFIG_COMPAT +#define ODU_BRIDGE_IOC_SET_LLV6_ADDR32 _IOW(ODU_BRIDGE_IOC_MAGIC, \ + ODU_BRIDGE_IOCTL_SET_LLV6_ADDR, \ + compat_uptr_t) +#endif + +#define IPA_ODU_VER_CHECK() \ + do { \ + ret = 0;\ + if (ipa_get_hw_type() == IPA_HW_None) { \ + pr_err("IPA HW is unknown\n"); \ + ret = -EFAULT; \ + } \ + else if (ipa_get_hw_type() < IPA_HW_v3_0) \ + ret = 1; \ + } while (0) + +/** + * struct stats - driver statistics, viewable using debugfs + * @num_ul_packets: number of packets bridged in uplink direction + * @num_dl_packets: number of packets bridged in downink direction + * bridge + * @num_lan_packets: number of packets bridged to APPS on bridge mode + */ +struct stats { + u64 num_ul_packets; + u64 num_dl_packets; + u64 num_lan_packets; +}; + +/** + * struct odu_bridge_ctx - ODU bridge driver context information + * @class: kernel class pointer + * @dev_num: kernel device number + * @dev: kernel device struct pointer + * @cdev: kernel character device struct + * @netdev_name: network interface name + * @device_ethaddr: network interface ethernet address + * @priv: client's private data. to be used in client's callbacks + * @tx_dp_notify: client callback for handling IPA ODU_PROD callback + * @send_dl_skb: client callback for sending skb in downlink direction + * @stats: statistics, how many packets were transmitted using the SW bridge + * @is_conencted: is bridge connected ? + * @is_suspended: is bridge suspended ? + * @mode: ODU mode (router/bridge) + * @lock: for the initialization, connect and disconnect synchronization + * @llv6_addr: link local IPv6 address of ODU network interface + * @odu_br_ipv4_hdr_hdl: handle for partial ipv4 ethernet header + * @odu_br_ipv6_hdr_hdl: handle for partial ipv6 ethernet header + * @odu_prod_hdl: handle for IPA_CLIENT_ODU_PROD pipe + * @odu_emb_cons_hdl: handle for IPA_CLIENT_ODU_EMB_CONS pipe + * @odu_teth_cons_hdl: handle for IPA_CLIENT_ODU_TETH_CONS pipe + * @rm_comp: completion object for IP RM + * @wakeup_request: client callback to wakeup + */ +struct odu_bridge_ctx { + struct class *class; + dev_t dev_num; + struct device *dev; + struct cdev cdev; + char netdev_name[IPA_RESOURCE_NAME_MAX]; + u8 device_ethaddr[ETH_ALEN]; + void *priv; + ipa_notify_cb tx_dp_notify; + int (*send_dl_skb)(void *priv, struct sk_buff *skb); + struct stats stats; + bool is_connected; + bool is_suspended; + enum odu_bridge_mode mode; + struct mutex lock; + struct in6_addr llv6_addr; + uint32_t odu_br_ipv4_hdr_hdl; + uint32_t odu_br_ipv6_hdr_hdl; + u32 odu_prod_hdl; + u32 odu_emb_cons_hdl; + u32 odu_teth_cons_hdl; + u32 ipa_sys_desc_size; + void *logbuf; + void *logbuf_low; + struct completion rm_comp; + void (*wakeup_request)(void *); +}; +static struct odu_bridge_ctx *odu_bridge_ctx; + +#ifdef CONFIG_DEBUG_FS +#define ODU_MAX_MSG_LEN 512 +static char dbg_buff[ODU_MAX_MSG_LEN]; +#endif + +static void odu_bridge_emb_cons_cb(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + ODU_BRIDGE_FUNC_ENTRY(); + if (evt != IPA_RECEIVE) { + ODU_BRIDGE_ERR("unexpected event\n"); + WARN_ON(1); + return; + } + odu_bridge_ctx->send_dl_skb(priv, (struct sk_buff *)data); + odu_bridge_ctx->stats.num_dl_packets++; + ODU_BRIDGE_FUNC_EXIT(); +} + +static void odu_bridge_teth_cons_cb(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct ipv6hdr *ipv6hdr; + struct sk_buff *skb = (struct sk_buff *)data; + struct sk_buff *skb_copied; + + ODU_BRIDGE_FUNC_ENTRY(); + if (evt != IPA_RECEIVE) { + ODU_BRIDGE_ERR("unexpected event\n"); + WARN_ON(1); + return; + } + + ipv6hdr = (struct ipv6hdr *)(skb->data + ETH_HLEN); + if (ipv6hdr->version == 6 && + ipv6_addr_is_multicast(&ipv6hdr->daddr)) { + ODU_BRIDGE_DBG_LOW("Multicast pkt, send to APPS and adapter\n"); + skb_copied = skb_clone(skb, GFP_KERNEL); + if (skb_copied) { + odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv, + IPA_RECEIVE, + (unsigned long) skb_copied); + odu_bridge_ctx->stats.num_lan_packets++; + } else { + ODU_BRIDGE_ERR("No memory\n"); + } + } + + odu_bridge_ctx->send_dl_skb(priv, skb); + odu_bridge_ctx->stats.num_dl_packets++; + ODU_BRIDGE_FUNC_EXIT(); +} + +static int odu_bridge_connect_router(void) +{ + struct ipa_sys_connect_params odu_prod_params; + struct ipa_sys_connect_params odu_emb_cons_params; + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + memset(&odu_prod_params, 0, sizeof(odu_prod_params)); + memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params)); + + /* configure RX (ODU->IPA) EP */ + odu_prod_params.client = IPA_CLIENT_ODU_PROD; + odu_prod_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN; + odu_prod_params.ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT; + odu_prod_params.desc_fifo_sz = odu_bridge_ctx->ipa_sys_desc_size; + odu_prod_params.priv = odu_bridge_ctx->priv; + odu_prod_params.notify = odu_bridge_ctx->tx_dp_notify; + res = ipa_setup_sys_pipe(&odu_prod_params, + &odu_bridge_ctx->odu_prod_hdl); + if (res) { + ODU_BRIDGE_ERR("fail to setup sys pipe ODU_PROD %d\n", res); + goto fail_odu_prod; + } + + /* configure TX (IPA->ODU) EP */ + odu_emb_cons_params.client = IPA_CLIENT_ODU_EMB_CONS; + odu_emb_cons_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN; + odu_emb_cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT; + odu_emb_cons_params.desc_fifo_sz = odu_bridge_ctx->ipa_sys_desc_size; + odu_emb_cons_params.priv = odu_bridge_ctx->priv; + odu_emb_cons_params.notify = odu_bridge_emb_cons_cb; + res = ipa_setup_sys_pipe(&odu_emb_cons_params, + &odu_bridge_ctx->odu_emb_cons_hdl); + if (res) { + ODU_BRIDGE_ERR("fail to setup sys pipe ODU_EMB_CONS %d\n", res); + goto fail_odu_emb_cons; + } + + ODU_BRIDGE_DBG("odu_prod_hdl = %d, odu_emb_cons_hdl = %d\n", + odu_bridge_ctx->odu_prod_hdl, odu_bridge_ctx->odu_emb_cons_hdl); + + ODU_BRIDGE_FUNC_EXIT(); + + return 0; + +fail_odu_emb_cons: + ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl); + odu_bridge_ctx->odu_prod_hdl = 0; +fail_odu_prod: + return res; +} + +static int odu_bridge_connect_bridge(void) +{ + struct ipa_sys_connect_params odu_prod_params; + struct ipa_sys_connect_params odu_emb_cons_params; + struct ipa_sys_connect_params odu_teth_cons_params; + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + memset(&odu_prod_params, 0, sizeof(odu_prod_params)); + memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params)); + + /* Build IPA Resource manager dependency graph */ + ODU_BRIDGE_DBG_LOW("build dependency graph\n"); + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res && res != -EINPROGRESS) { + ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n"); + goto fail_add_dependency_1; + } + + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_ODU_ADAPT_CONS); + if (res && res != -EINPROGRESS) { + ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n"); + goto fail_add_dependency_2; + } + + /* configure RX (ODU->IPA) EP */ + odu_prod_params.client = IPA_CLIENT_ODU_PROD; + odu_prod_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ; + odu_prod_params.priv = odu_bridge_ctx->priv; + odu_prod_params.notify = odu_bridge_ctx->tx_dp_notify; + odu_prod_params.skip_ep_cfg = true; + res = ipa_setup_sys_pipe(&odu_prod_params, + &odu_bridge_ctx->odu_prod_hdl); + if (res) { + ODU_BRIDGE_ERR("fail to setup sys pipe ODU_PROD %d\n", res); + goto fail_odu_prod; + } + + /* configure TX tethered (IPA->ODU) EP */ + odu_teth_cons_params.client = IPA_CLIENT_ODU_TETH_CONS; + odu_teth_cons_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ; + odu_teth_cons_params.priv = odu_bridge_ctx->priv; + odu_teth_cons_params.notify = odu_bridge_teth_cons_cb; + odu_teth_cons_params.skip_ep_cfg = true; + res = ipa_setup_sys_pipe(&odu_teth_cons_params, + &odu_bridge_ctx->odu_teth_cons_hdl); + if (res) { + ODU_BRIDGE_ERR("fail to setup sys pipe ODU_TETH_CONS %d\n", + res); + goto fail_odu_teth_cons; + } + + /* configure TX embedded(IPA->ODU) EP */ + odu_emb_cons_params.client = IPA_CLIENT_ODU_EMB_CONS; + odu_emb_cons_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN; + odu_emb_cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT; + odu_emb_cons_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ; + odu_emb_cons_params.priv = odu_bridge_ctx->priv; + odu_emb_cons_params.notify = odu_bridge_emb_cons_cb; + res = ipa_setup_sys_pipe(&odu_emb_cons_params, + &odu_bridge_ctx->odu_emb_cons_hdl); + if (res) { + ODU_BRIDGE_ERR("fail to setup sys pipe ODU_EMB_CONS %d\n", res); + goto fail_odu_emb_cons; + } + + ODU_BRIDGE_DBG_LOW("odu_prod_hdl = %d, odu_emb_cons_hdl = %d\n", + odu_bridge_ctx->odu_prod_hdl, odu_bridge_ctx->odu_emb_cons_hdl); + ODU_BRIDGE_DBG_LOW("odu_teth_cons_hdl = %d\n", + odu_bridge_ctx->odu_teth_cons_hdl); + + ODU_BRIDGE_FUNC_EXIT(); + + return 0; + +fail_odu_emb_cons: + ipa_teardown_sys_pipe(odu_bridge_ctx->odu_teth_cons_hdl); + odu_bridge_ctx->odu_teth_cons_hdl = 0; +fail_odu_teth_cons: + ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl); + odu_bridge_ctx->odu_prod_hdl = 0; +fail_odu_prod: + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_ODU_ADAPT_CONS); +fail_add_dependency_2: + ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD, + IPA_RM_RESOURCE_Q6_CONS); +fail_add_dependency_1: + return res; +} + +static int odu_bridge_disconnect_router(void) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl); + if (res) + ODU_BRIDGE_ERR("teardown ODU PROD failed\n"); + odu_bridge_ctx->odu_prod_hdl = 0; + + res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_emb_cons_hdl); + if (res) + ODU_BRIDGE_ERR("teardown ODU EMB CONS failed\n"); + odu_bridge_ctx->odu_emb_cons_hdl = 0; + + ODU_BRIDGE_FUNC_EXIT(); + + return 0; +} + +static int odu_bridge_disconnect_bridge(void) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl); + if (res) + ODU_BRIDGE_ERR("teardown ODU PROD failed\n"); + odu_bridge_ctx->odu_prod_hdl = 0; + + res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_teth_cons_hdl); + if (res) + ODU_BRIDGE_ERR("teardown ODU TETH CONS failed\n"); + odu_bridge_ctx->odu_teth_cons_hdl = 0; + + res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_emb_cons_hdl); + if (res) + ODU_BRIDGE_ERR("teardown ODU EMB CONS failed\n"); + odu_bridge_ctx->odu_emb_cons_hdl = 0; + + /* Delete IPA Resource manager dependency graph */ + ODU_BRIDGE_DBG("deleting dependency graph\n"); + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res && res != -EINPROGRESS) + ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n"); + + res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_ODU_ADAPT_CONS); + if (res && res != -EINPROGRESS) + ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n"); + + return 0; +} + +/** + * odu_bridge_disconnect() - Disconnect odu bridge + * + * Disconnect all pipes and deletes IPA RM dependencies on bridge mode + * + * Return codes: 0- success, error otherwise + */ +int odu_bridge_disconnect(void) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + if (!odu_bridge_ctx) { + ODU_BRIDGE_ERR("Not initialized\n"); + return -EFAULT; + } + + if (!odu_bridge_ctx->is_connected) { + ODU_BRIDGE_ERR("Not connected\n"); + return -EFAULT; + } + + mutex_lock(&odu_bridge_ctx->lock); + if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) { + res = odu_bridge_disconnect_router(); + if (res) { + ODU_BRIDGE_ERR("disconnect_router failed %d\n", res); + goto out; + } + } else { + res = odu_bridge_disconnect_bridge(); + if (res) { + ODU_BRIDGE_ERR("disconnect_bridge failed %d\n", res); + goto out; + } + } + + odu_bridge_ctx->is_connected = false; + res = 0; +out: + mutex_unlock(&odu_bridge_ctx->lock); + ODU_BRIDGE_FUNC_EXIT(); + return res; +} +EXPORT_SYMBOL(odu_bridge_disconnect); + +/** + * odu_bridge_connect() - Connect odu bridge. + * + * Call to the mode-specific connect function for connection IPA pipes + * and adding IPA RM dependencies + + * Return codes: 0: success + * -EINVAL: invalid parameters + * -EPERM: Operation not permitted as the bridge is already + * connected + */ +int odu_bridge_connect(void) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + if (!odu_bridge_ctx) { + ODU_BRIDGE_ERR("Not initialized\n"); + return -EFAULT; + } + + if (odu_bridge_ctx->is_connected) { + ODU_BRIDGE_ERR("already connected\n"); + return -EFAULT; + } + + mutex_lock(&odu_bridge_ctx->lock); + if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) { + res = odu_bridge_connect_router(); + if (res) { + ODU_BRIDGE_ERR("connect_router failed\n"); + goto bail; + } + } else { + res = odu_bridge_connect_bridge(); + if (res) { + ODU_BRIDGE_ERR("connect_bridge failed\n"); + goto bail; + } + } + + odu_bridge_ctx->is_connected = true; + res = 0; +bail: + mutex_unlock(&odu_bridge_ctx->lock); + ODU_BRIDGE_FUNC_EXIT(); + return res; +} +EXPORT_SYMBOL(odu_bridge_connect); + +/** + * odu_bridge_set_mode() - Set bridge mode to Router/Bridge + * @mode: mode to be set + */ +static int odu_bridge_set_mode(enum odu_bridge_mode mode) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + if (mode < 0 || mode >= ODU_BRIDGE_MODE_MAX) { + ODU_BRIDGE_ERR("Unsupported mode: %d\n", mode); + return -EFAULT; + } + + ODU_BRIDGE_DBG_LOW("setting mode: %d\n", mode); + mutex_lock(&odu_bridge_ctx->lock); + + if (odu_bridge_ctx->mode == mode) { + ODU_BRIDGE_DBG_LOW("same mode\n"); + res = 0; + goto bail; + } + + if (odu_bridge_ctx->is_connected) { + /* first disconnect the old configuration */ + if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) { + res = odu_bridge_disconnect_router(); + if (res) { + ODU_BRIDGE_ERR("disconnect_router failed\n"); + goto bail; + } + } else { + res = odu_bridge_disconnect_bridge(); + if (res) { + ODU_BRIDGE_ERR("disconnect_bridge failed\n"); + goto bail; + } + } + + /* connect the new configuration */ + if (mode == ODU_BRIDGE_MODE_ROUTER) { + res = odu_bridge_connect_router(); + if (res) { + ODU_BRIDGE_ERR("connect_router failed\n"); + goto bail; + } + } else { + res = odu_bridge_connect_bridge(); + if (res) { + ODU_BRIDGE_ERR("connect_bridge failed\n"); + goto bail; + } + } + } + odu_bridge_ctx->mode = mode; + res = 0; +bail: + mutex_unlock(&odu_bridge_ctx->lock); + ODU_BRIDGE_FUNC_EXIT(); + return res; +}; + +/** + * odu_bridge_set_llv6_addr() - Set link local ipv6 address + * @llv6_addr: odu network interface link local address + * + * This function sets the link local ipv6 address provided by IOCTL + */ +static int odu_bridge_set_llv6_addr(struct in6_addr *llv6_addr) +{ + struct in6_addr llv6_addr_host; + + ODU_BRIDGE_FUNC_ENTRY(); + + llv6_addr_host.s6_addr32[0] = ntohl(llv6_addr->s6_addr32[0]); + llv6_addr_host.s6_addr32[1] = ntohl(llv6_addr->s6_addr32[1]); + llv6_addr_host.s6_addr32[2] = ntohl(llv6_addr->s6_addr32[2]); + llv6_addr_host.s6_addr32[3] = ntohl(llv6_addr->s6_addr32[3]); + + memcpy(&odu_bridge_ctx->llv6_addr, &llv6_addr_host, + sizeof(odu_bridge_ctx->llv6_addr)); + ODU_BRIDGE_DBG_LOW("LLV6 addr: %pI6c\n", &odu_bridge_ctx->llv6_addr); + + ODU_BRIDGE_FUNC_EXIT(); + + return 0; +}; + +static long odu_bridge_ioctl(struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int res = 0; + struct in6_addr llv6_addr; + + ODU_BRIDGE_DBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd)); + + if ((_IOC_TYPE(cmd) != ODU_BRIDGE_IOC_MAGIC) || + (_IOC_NR(cmd) >= ODU_BRIDGE_IOCTL_MAX)) { + ODU_BRIDGE_ERR("Invalid ioctl\n"); + return -ENOIOCTLCMD; + } + + switch (cmd) { + case ODU_BRIDGE_IOC_SET_MODE: + ODU_BRIDGE_DBG("ODU_BRIDGE_IOC_SET_MODE ioctl called\n"); + res = odu_bridge_set_mode(arg); + if (res) { + ODU_BRIDGE_ERR("Error, res = %d\n", res); + break; + } + break; + + case ODU_BRIDGE_IOC_SET_LLV6_ADDR: + ODU_BRIDGE_DBG("ODU_BRIDGE_IOC_SET_LLV6_ADDR ioctl called\n"); + res = copy_from_user(&llv6_addr, + (struct in6_addr *)arg, + sizeof(llv6_addr)); + if (res) { + ODU_BRIDGE_ERR("Error, res = %d\n", res); + res = -EFAULT; + break; + } + + res = odu_bridge_set_llv6_addr(&llv6_addr); + if (res) { + ODU_BRIDGE_ERR("Error, res = %d\n", res); + break; + } + break; + + default: + ODU_BRIDGE_ERR("Unknown ioctl: %d\n", cmd); + WARN_ON(1); + } + + return res; +} + +#ifdef CONFIG_COMPAT +static long compat_odu_bridge_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case ODU_BRIDGE_IOC_SET_LLV6_ADDR32: + cmd = ODU_BRIDGE_IOC_SET_LLV6_ADDR; + break; + case ODU_BRIDGE_IOC_SET_MODE: + break; + default: + return -ENOIOCTLCMD; + } + return odu_bridge_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + +#ifdef CONFIG_DEBUG_FS +static struct dentry *dent; +static struct dentry *dfile_stats; +static struct dentry *dfile_mode; + +static ssize_t odu_debugfs_stats(struct file *file, + char __user *ubuf, + size_t count, + loff_t *ppos) +{ + int nbytes = 0; + + nbytes += scnprintf(&dbg_buff[nbytes], + ODU_MAX_MSG_LEN - nbytes, + "UL packets: %lld\n", + odu_bridge_ctx->stats.num_ul_packets); + nbytes += scnprintf(&dbg_buff[nbytes], + ODU_MAX_MSG_LEN - nbytes, + "DL packets: %lld\n", + odu_bridge_ctx->stats.num_dl_packets); + nbytes += scnprintf(&dbg_buff[nbytes], + ODU_MAX_MSG_LEN - nbytes, + "LAN packets: %lld\n", + odu_bridge_ctx->stats.num_lan_packets); + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t odu_debugfs_hw_bridge_mode_write(struct file *file, + const char __user *ubuf, + size_t count, + loff_t *ppos) +{ + unsigned long missing; + enum odu_bridge_mode mode; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + if (count > 0) + dbg_buff[count-1] = '\0'; + + if (strcmp(dbg_buff, "router") == 0) { + mode = ODU_BRIDGE_MODE_ROUTER; + } else if (strcmp(dbg_buff, "bridge") == 0) { + mode = ODU_BRIDGE_MODE_BRIDGE; + } else { + ODU_BRIDGE_ERR("Bad mode, got %s,\n" + "Use or .\n", dbg_buff); + return count; + } + + odu_bridge_set_mode(mode); + return count; +} + +static ssize_t odu_debugfs_hw_bridge_mode_read(struct file *file, + char __user *ubuf, + size_t count, + loff_t *ppos) +{ + int nbytes = 0; + + switch (odu_bridge_ctx->mode) { + case ODU_BRIDGE_MODE_ROUTER: + nbytes += scnprintf(&dbg_buff[nbytes], + ODU_MAX_MSG_LEN - nbytes, + "router\n"); + break; + case ODU_BRIDGE_MODE_BRIDGE: + nbytes += scnprintf(&dbg_buff[nbytes], + ODU_MAX_MSG_LEN - nbytes, + "bridge\n"); + break; + default: + nbytes += scnprintf(&dbg_buff[nbytes], + ODU_MAX_MSG_LEN - nbytes, + "mode error\n"); + break; + + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +const struct file_operations odu_stats_ops = { + .read = odu_debugfs_stats, +}; + +const struct file_operations odu_hw_bridge_mode_ops = { + .read = odu_debugfs_hw_bridge_mode_read, + .write = odu_debugfs_hw_bridge_mode_write, +}; + +static void odu_debugfs_init(void) +{ + const mode_t read_only_mode = 0444; + const mode_t read_write_mode = 0666; + + dent = debugfs_create_dir("odu_ipa_bridge", 0); + if (IS_ERR(dent)) { + ODU_BRIDGE_ERR("fail to create folder odu_ipa_bridge\n"); + return; + } + + dfile_stats = + debugfs_create_file("stats", read_only_mode, dent, + 0, &odu_stats_ops); + if (!dfile_stats || IS_ERR(dfile_stats)) { + ODU_BRIDGE_ERR("fail to create file stats\n"); + goto fail; + } + + dfile_mode = + debugfs_create_file("mode", read_write_mode, + dent, 0, &odu_hw_bridge_mode_ops); + if (!dfile_mode || + IS_ERR(dfile_mode)) { + ODU_BRIDGE_ERR("fail to create file dfile_mode\n"); + goto fail; + } + + return; +fail: + debugfs_remove_recursive(dent); +} + +static void odu_debugfs_destroy(void) +{ + debugfs_remove_recursive(dent); +} + +#else +static void odu_debugfs_init(void) {} +static void odu_debugfs_destroy(void) {} +#endif /* CONFIG_DEBUG_FS */ + + +static const struct file_operations odu_bridge_drv_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = odu_bridge_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_odu_bridge_ioctl, +#endif +}; + +/** + * odu_bridge_tx_dp() - Send skb to ODU bridge + * @skb: skb to send + * @metadata: metadata on packet + * + * This function handles uplink packet. + * In Router Mode: + * packet is sent directly to IPA. + * In Router Mode: + * packet is classified if it should arrive to network stack. + * QMI IP packet should arrive to APPS network stack + * IPv6 Multicast packet should arrive to APPS network stack and Q6 + * + * Return codes: 0- success, error otherwise + */ +int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata) +{ + struct sk_buff *skb_copied = NULL; + struct ipv6hdr *ipv6hdr; + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + switch (odu_bridge_ctx->mode) { + case ODU_BRIDGE_MODE_ROUTER: + /* Router mode - pass skb to IPA */ + res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata); + if (res) { + ODU_BRIDGE_DBG("tx dp failed %d\n", res); + goto out; + } + odu_bridge_ctx->stats.num_ul_packets++; + goto out; + + case ODU_BRIDGE_MODE_BRIDGE: + ipv6hdr = (struct ipv6hdr *)(skb->data + ETH_HLEN); + if (ipv6hdr->version == 6 && + ODU_BRIDGE_IS_QMI_ADDR(ipv6hdr->daddr)) { + ODU_BRIDGE_DBG_LOW("QMI packet\n"); + skb_copied = skb_clone(skb, GFP_KERNEL); + if (!skb_copied) { + ODU_BRIDGE_ERR("No memory\n"); + return -ENOMEM; + } + odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv, + IPA_RECEIVE, + (unsigned long)skb_copied); + odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv, + IPA_WRITE_DONE, + (unsigned long)skb); + odu_bridge_ctx->stats.num_ul_packets++; + odu_bridge_ctx->stats.num_lan_packets++; + res = 0; + goto out; + } + + if (ipv6hdr->version == 6 && + ipv6_addr_is_multicast(&ipv6hdr->daddr)) { + ODU_BRIDGE_DBG_LOW( + "Multicast pkt, send to APPS and IPA\n"); + skb_copied = skb_clone(skb, GFP_KERNEL); + if (!skb_copied) { + ODU_BRIDGE_ERR("No memory\n"); + return -ENOMEM; + } + + res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata); + if (res) { + ODU_BRIDGE_DBG("tx dp failed %d\n", res); + dev_kfree_skb(skb_copied); + goto out; + } + + odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv, + IPA_RECEIVE, + (unsigned long)skb_copied); + odu_bridge_ctx->stats.num_ul_packets++; + odu_bridge_ctx->stats.num_lan_packets++; + goto out; + } + + res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata); + if (res) { + ODU_BRIDGE_DBG("tx dp failed %d\n", res); + goto out; + } + odu_bridge_ctx->stats.num_ul_packets++; + goto out; + + default: + ODU_BRIDGE_ERR("Unsupported mode: %d\n", odu_bridge_ctx->mode); + WARN_ON(1); + res = -EFAULT; + + } +out: + ODU_BRIDGE_FUNC_EXIT(); + return res; +} +EXPORT_SYMBOL(odu_bridge_tx_dp); + +static int odu_bridge_add_hdrs(void) +{ + struct ipa_ioc_add_hdr *hdrs; + struct ipa_hdr_add *ipv4_hdr; + struct ipa_hdr_add *ipv6_hdr; + struct ethhdr *eth_ipv4; + struct ethhdr *eth_ipv6; + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + hdrs = kzalloc(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr), + GFP_KERNEL); + if (!hdrs) { + ODU_BRIDGE_ERR("no mem\n"); + res = -ENOMEM; + goto out; + } + ipv4_hdr = &hdrs->hdr[0]; + eth_ipv4 = (struct ethhdr *)(ipv4_hdr->hdr); + ipv6_hdr = &hdrs->hdr[1]; + eth_ipv6 = (struct ethhdr *)(ipv6_hdr->hdr); + strlcpy(ipv4_hdr->name, ODU_BRIDGE_IPV4_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + memcpy(eth_ipv4->h_source, odu_bridge_ctx->device_ethaddr, ETH_ALEN); + eth_ipv4->h_proto = htons(ETH_P_IP); + ipv4_hdr->hdr_len = ETH_HLEN; + ipv4_hdr->is_partial = 1; + ipv4_hdr->is_eth2_ofst_valid = 1; + ipv4_hdr->eth2_ofst = 0; + strlcpy(ipv6_hdr->name, ODU_BRIDGE_IPV6_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + memcpy(eth_ipv6->h_source, odu_bridge_ctx->device_ethaddr, ETH_ALEN); + eth_ipv6->h_proto = htons(ETH_P_IPV6); + ipv6_hdr->hdr_len = ETH_HLEN; + ipv6_hdr->is_partial = 1; + ipv6_hdr->is_eth2_ofst_valid = 1; + ipv6_hdr->eth2_ofst = 0; + hdrs->commit = 1; + hdrs->num_hdrs = 2; + res = ipa_add_hdr(hdrs); + if (res) { + ODU_BRIDGE_ERR("Fail on Header-Insertion(%d)\n", res); + goto out_free_mem; + } + if (ipv4_hdr->status) { + ODU_BRIDGE_ERR("Fail on Header-Insertion ipv4(%d)\n", + ipv4_hdr->status); + res = ipv4_hdr->status; + goto out_free_mem; + } + if (ipv6_hdr->status) { + ODU_BRIDGE_ERR("Fail on Header-Insertion ipv6(%d)\n", + ipv6_hdr->status); + res = ipv6_hdr->status; + goto out_free_mem; + } + odu_bridge_ctx->odu_br_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl; + odu_bridge_ctx->odu_br_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl; + + res = 0; +out_free_mem: + kfree(hdrs); +out: + ODU_BRIDGE_FUNC_EXIT(); + return res; +} + +static void odu_bridge_del_hdrs(void) +{ + struct ipa_ioc_del_hdr *del_hdr; + struct ipa_hdr_del *ipv4; + struct ipa_hdr_del *ipv6; + int result; + + del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) + + sizeof(*ipv6), GFP_KERNEL); + if (!del_hdr) + return; + del_hdr->commit = 1; + del_hdr->num_hdls = 2; + ipv4 = &del_hdr->hdl[0]; + ipv4->hdl = odu_bridge_ctx->odu_br_ipv4_hdr_hdl; + ipv6 = &del_hdr->hdl[1]; + ipv6->hdl = odu_bridge_ctx->odu_br_ipv6_hdr_hdl; + result = ipa_del_hdr(del_hdr); + if (result || ipv4->status || ipv6->status) + ODU_BRIDGE_ERR("ipa_del_hdr failed"); + kfree(del_hdr); +} + +/** + * odu_bridge_register_properties() - set Tx/Rx properties for ipacm + * + * Register the network interface interface with Tx and Rx properties + * Tx properties are for data flowing from IPA to adapter, they + * have Header-Insertion properties both for Ipv4 and Ipv6 Ethernet framing. + * Rx properties are for data flowing from adapter to IPA, they have + * simple rule which always "hit". + * + */ +static int odu_bridge_register_properties(void) +{ + struct ipa_tx_intf tx_properties = {0}; + struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} }; + struct ipa_ioc_tx_intf_prop *ipv4_property; + struct ipa_ioc_tx_intf_prop *ipv6_property; + struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} }; + struct ipa_rx_intf rx_properties = {0}; + struct ipa_ioc_rx_intf_prop *rx_ipv4_property; + struct ipa_ioc_rx_intf_prop *rx_ipv6_property; + int res = 0; + + ODU_BRIDGE_FUNC_ENTRY(); + + tx_properties.prop = properties; + ipv4_property = &tx_properties.prop[0]; + ipv4_property->ip = IPA_IP_v4; + ipv4_property->dst_pipe = IPA_CLIENT_ODU_EMB_CONS; + ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + strlcpy(ipv4_property->hdr_name, ODU_BRIDGE_IPV4_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + ipv6_property = &tx_properties.prop[1]; + ipv6_property->ip = IPA_IP_v6; + ipv6_property->dst_pipe = IPA_CLIENT_ODU_EMB_CONS; + ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + strlcpy(ipv6_property->hdr_name, ODU_BRIDGE_IPV6_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + tx_properties.num_props = 2; + + rx_properties.prop = rx_ioc_properties; + rx_ipv4_property = &rx_properties.prop[0]; + rx_ipv4_property->ip = IPA_IP_v4; + rx_ipv4_property->attrib.attrib_mask = 0; + rx_ipv4_property->src_pipe = IPA_CLIENT_ODU_PROD; + rx_ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + rx_ipv6_property = &rx_properties.prop[1]; + rx_ipv6_property->ip = IPA_IP_v6; + rx_ipv6_property->attrib.attrib_mask = 0; + rx_ipv6_property->src_pipe = IPA_CLIENT_ODU_PROD; + rx_ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + rx_properties.num_props = 2; + + res = ipa_register_intf(odu_bridge_ctx->netdev_name, &tx_properties, + &rx_properties); + if (res) { + ODU_BRIDGE_ERR("fail on Tx/Rx properties registration %d\n", + res); + } + + ODU_BRIDGE_FUNC_EXIT(); + + return res; +} + +static void odu_bridge_deregister_properties(void) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + res = ipa_deregister_intf(odu_bridge_ctx->netdev_name); + if (res) + ODU_BRIDGE_ERR("Fail on Tx prop deregister %d\n", res); + ODU_BRIDGE_FUNC_EXIT(); +} + +/** + * odu_bridge_init() - Initialize the ODU bridge driver + * @params: initialization parameters + * + * This function initialize all bridge internal data and register odu bridge to + * kernel for IOCTL and debugfs. + * Header addition and properties are registered to IPA driver. + * + * Return codes: 0: success, + * -EINVAL - Bad parameter + * Other negative value - Failure + */ +int odu_bridge_init(struct odu_bridge_params *params) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + if (!params) { + ODU_BRIDGE_ERR("null pointer params\n"); + return -EINVAL; + } + if (!params->netdev_name) { + ODU_BRIDGE_ERR("null pointer params->netdev_name\n"); + return -EINVAL; + } + if (!params->tx_dp_notify) { + ODU_BRIDGE_ERR("null pointer params->tx_dp_notify\n"); + return -EINVAL; + } + if (!params->send_dl_skb) { + ODU_BRIDGE_ERR("null pointer params->send_dl_skb\n"); + return -EINVAL; + } + if (odu_bridge_ctx) { + ODU_BRIDGE_ERR("Already initialized\n"); + return -EFAULT; + } + if (!ipa_is_ready()) { + ODU_BRIDGE_ERR("IPA is not ready\n"); + return -EFAULT; + } + + ODU_BRIDGE_DBG("device_ethaddr=%pM\n", params->device_ethaddr); + + odu_bridge_ctx = kzalloc(sizeof(*odu_bridge_ctx), GFP_KERNEL); + if (!odu_bridge_ctx) { + ODU_BRIDGE_ERR("kzalloc err.\n"); + return -ENOMEM; + } + + odu_bridge_ctx->class = class_create(THIS_MODULE, ODU_BRIDGE_DRV_NAME); + if (!odu_bridge_ctx->class) { + ODU_BRIDGE_ERR("Class_create err.\n"); + res = -ENODEV; + goto fail_class_create; + } + + res = alloc_chrdev_region(&odu_bridge_ctx->dev_num, 0, 1, + ODU_BRIDGE_DRV_NAME); + if (res) { + ODU_BRIDGE_ERR("alloc_chrdev_region err.\n"); + res = -ENODEV; + goto fail_alloc_chrdev_region; + } + + odu_bridge_ctx->dev = device_create(odu_bridge_ctx->class, NULL, + odu_bridge_ctx->dev_num, odu_bridge_ctx, ODU_BRIDGE_DRV_NAME); + if (IS_ERR(odu_bridge_ctx->dev)) { + ODU_BRIDGE_ERR(":device_create err.\n"); + res = -ENODEV; + goto fail_device_create; + } + + cdev_init(&odu_bridge_ctx->cdev, &odu_bridge_drv_fops); + odu_bridge_ctx->cdev.owner = THIS_MODULE; + odu_bridge_ctx->cdev.ops = &odu_bridge_drv_fops; + + res = cdev_add(&odu_bridge_ctx->cdev, odu_bridge_ctx->dev_num, 1); + if (res) { + ODU_BRIDGE_ERR(":cdev_add err=%d\n", -res); + res = -ENODEV; + goto fail_cdev_add; + } + + odu_debugfs_init(); + + strlcpy(odu_bridge_ctx->netdev_name, params->netdev_name, + IPA_RESOURCE_NAME_MAX); + odu_bridge_ctx->priv = params->priv; + odu_bridge_ctx->tx_dp_notify = params->tx_dp_notify; + odu_bridge_ctx->send_dl_skb = params->send_dl_skb; + memcpy(odu_bridge_ctx->device_ethaddr, params->device_ethaddr, + ETH_ALEN); + odu_bridge_ctx->ipa_sys_desc_size = params->ipa_desc_size; + odu_bridge_ctx->mode = ODU_BRIDGE_MODE_ROUTER; + + mutex_init(&odu_bridge_ctx->lock); + + res = odu_bridge_add_hdrs(); + if (res) { + ODU_BRIDGE_ERR("fail on odu_bridge_add_hdr %d\n", res); + goto fail_add_hdrs; + } + + res = odu_bridge_register_properties(); + if (res) { + ODU_BRIDGE_ERR("fail on register properties %d\n", res); + goto fail_register_properties; + } + + ODU_BRIDGE_FUNC_EXIT(); + return 0; + +fail_register_properties: + odu_bridge_del_hdrs(); +fail_add_hdrs: + odu_debugfs_destroy(); +fail_cdev_add: + device_destroy(odu_bridge_ctx->class, odu_bridge_ctx->dev_num); +fail_device_create: + unregister_chrdev_region(odu_bridge_ctx->dev_num, 1); +fail_alloc_chrdev_region: + class_destroy(odu_bridge_ctx->class); +fail_class_create: + kfree(odu_bridge_ctx); + odu_bridge_ctx = NULL; + return res; +} +EXPORT_SYMBOL(odu_bridge_init); + +/** + * odu_bridge_cleanup() - De-Initialize the ODU bridge driver + * + * Return codes: 0: success, + * -EINVAL - Bad parameter + * Other negative value - Failure + */ +int odu_bridge_cleanup(void) +{ + ODU_BRIDGE_FUNC_ENTRY(); + + if (!odu_bridge_ctx) { + ODU_BRIDGE_ERR("Not initialized\n"); + return -EFAULT; + } + + if (odu_bridge_ctx->is_connected) { + ODU_BRIDGE_ERR("cannot deinit while bridge is conncetd\n"); + return -EFAULT; + } + + odu_bridge_deregister_properties(); + odu_bridge_del_hdrs(); + odu_debugfs_destroy(); + cdev_del(&odu_bridge_ctx->cdev); + device_destroy(odu_bridge_ctx->class, odu_bridge_ctx->dev_num); + unregister_chrdev_region(odu_bridge_ctx->dev_num, 1); + class_destroy(odu_bridge_ctx->class); + ipc_log_context_destroy(odu_bridge_ctx->logbuf); + ipc_log_context_destroy(odu_bridge_ctx->logbuf_low); + kfree(odu_bridge_ctx); + odu_bridge_ctx = NULL; + + ODU_BRIDGE_FUNC_EXIT(); + return 0; +} +EXPORT_SYMBOL(odu_bridge_cleanup); + +/* IPA Bridge implementation */ +#ifdef CONFIG_IPA3 + +static void ipa_br_rm_notify(void *user_data, enum ipa_rm_event event, + unsigned long data) +{ + if (event == IPA_RM_RESOURCE_GRANTED) + complete(&odu_bridge_ctx->rm_comp); +} + +static int ipa_br_request_prod(void) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + reinit_completion(&odu_bridge_ctx->rm_comp); + ODU_BRIDGE_DBG("requesting odu prod\n"); + res = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD); + if (res) { + if (res != -EINPROGRESS) { + ODU_BRIDGE_ERR("failed to request prod %d\n", res); + return res; + } + wait_for_completion(&odu_bridge_ctx->rm_comp); + } + + ODU_BRIDGE_FUNC_EXIT(); + return 0; + +} + +static int ipa_br_release_prod(void) +{ + int res; + + ODU_BRIDGE_FUNC_ENTRY(); + + reinit_completion(&odu_bridge_ctx->rm_comp); + ODU_BRIDGE_DBG("requesting odu prod\n"); + res = ipa_rm_release_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD); + if (res) { + ODU_BRIDGE_ERR("failed to release prod %d\n", res); + return res; + } + + ODU_BRIDGE_FUNC_EXIT(); + return 0; + +} + +static int ipa_br_cons_request(void) +{ + ODU_BRIDGE_FUNC_ENTRY(); + if (odu_bridge_ctx->is_suspended) + odu_bridge_ctx->wakeup_request(odu_bridge_ctx->priv); + ODU_BRIDGE_FUNC_EXIT(); + return 0; +} + +static int ipa_br_cons_release(void) +{ + ODU_BRIDGE_FUNC_ENTRY(); + ODU_BRIDGE_FUNC_EXIT(); + return 0; +} + +/* IPA Bridge API is the new API which will replaces old odu_bridge API */ +int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl) +{ + int ret; + struct ipa_rm_create_params create_params; + + if (!params || !params->wakeup_request || !hdl) { + ODU_BRIDGE_ERR("NULL arg\n"); + return -EINVAL; + } + + + ret = odu_bridge_init(¶ms->info); + if (ret) + return ret; + + odu_bridge_ctx->wakeup_request = params->wakeup_request; + + /* create IPA RM resources for power management */ + init_completion(&odu_bridge_ctx->rm_comp); + memset(&create_params, 0, sizeof(create_params)); + create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD; + create_params.reg_params.user_data = odu_bridge_ctx; + create_params.reg_params.notify_cb = ipa_br_rm_notify; + create_params.floor_voltage = IPA_VOLTAGE_SVS; + ret = ipa_rm_create_resource(&create_params); + if (ret) { + ODU_BRIDGE_ERR("failed to create RM prod %d\n", ret); + goto fail_rm_prod; + } + + ret = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_ODU_ADAPT_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (ret) { + ODU_BRIDGE_ERR("failed to add ODU->APPS dependency %d\n", ret); + goto fail_add_dep; + } + + memset(&create_params, 0, sizeof(create_params)); + create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS; + create_params.request_resource = ipa_br_cons_request; + create_params.release_resource = ipa_br_cons_release; + create_params.floor_voltage = IPA_VOLTAGE_SVS; + ret = ipa_rm_create_resource(&create_params); + if (ret) { + ODU_BRIDGE_ERR("failed to create RM cons %d\n", ret); + goto fail_rm_cons; + } + + /* handle is ignored for now */ + *hdl = 0; + + return 0; + +fail_rm_cons: + ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD, + IPA_RM_RESOURCE_APPS_CONS); +fail_add_dep: + ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD); +fail_rm_prod: + odu_bridge_cleanup(); + return ret; +} +EXPORT_SYMBOL(ipa_bridge_init); + +int ipa_bridge_connect(u32 hdl) +{ + int ret; + + if (!odu_bridge_ctx) { + ODU_BRIDGE_ERR("Not initialized\n"); + return -EFAULT; + } + + if (odu_bridge_ctx->is_connected) { + ODU_BRIDGE_ERR("already connected\n"); + return -EFAULT; + } + + ret = ipa_br_request_prod(); + if (ret) + return ret; + + return odu_bridge_connect(); +} +EXPORT_SYMBOL(ipa_bridge_connect); + +int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth) +{ + struct ipa_rm_perf_profile profile = {0}; + int ret; + + profile.max_supported_bandwidth_mbps = bandwidth; + ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_PROD, &profile); + if (ret) { + ODU_BRIDGE_ERR("failed to set perf profile to prod %d\n", ret); + return ret; + } + + ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_CONS, &profile); + if (ret) { + ODU_BRIDGE_ERR("failed to set perf profile to cons %d\n", ret); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(ipa_bridge_set_perf_profile); + +int ipa_bridge_disconnect(u32 hdl) +{ + int ret; + + ret = odu_bridge_disconnect(); + if (ret) + return ret; + + ret = ipa_br_release_prod(); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL(ipa_bridge_disconnect); + +int ipa_bridge_suspend(u32 hdl) +{ + int ret; + + if (!odu_bridge_ctx) { + ODU_BRIDGE_ERR("Not initialized\n"); + return -EFAULT; + } + + if (!odu_bridge_ctx->is_connected) { + ODU_BRIDGE_ERR("bridge is disconnected\n"); + return -EFAULT; + } + + if (odu_bridge_ctx->is_suspended) { + ODU_BRIDGE_ERR("bridge is already suspended\n"); + return -EFAULT; + } + + /* stop cons channel to prevent downlink data during suspend */ + ret = ipa_stop_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl); + if (ret) { + ODU_BRIDGE_ERR("failed to stop CONS channel %d\n", ret); + return ret; + } + + ret = ipa_br_release_prod(); + if (ret) { + ODU_BRIDGE_ERR("failed to release prod %d\n", ret); + ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl); + return ret; + } + odu_bridge_ctx->is_suspended = true; + + return 0; +} +EXPORT_SYMBOL(ipa_bridge_suspend); + +int ipa_bridge_resume(u32 hdl) +{ + int ret; + + if (!odu_bridge_ctx) { + ODU_BRIDGE_ERR("Not initialized\n"); + return -EFAULT; + } + + if (!odu_bridge_ctx->is_connected) { + ODU_BRIDGE_ERR("bridge is disconnected\n"); + return -EFAULT; + } + + if (!odu_bridge_ctx->is_suspended) { + ODU_BRIDGE_ERR("bridge is not suspended\n"); + return -EFAULT; + } + + ret = ipa_br_request_prod(); + if (ret) + return ret; + + ret = ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl); + if (ret) { + ODU_BRIDGE_ERR("failed to start CONS channel %d\n", ret); + return ret; + } + odu_bridge_ctx->is_suspended = false; + + return 0; +} +EXPORT_SYMBOL(ipa_bridge_resume); + +int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb, + struct ipa_tx_meta *metadata) +{ + return odu_bridge_tx_dp(skb, metadata); +} +EXPORT_SYMBOL(ipa_bridge_tx_dp); + +int ipa_bridge_cleanup(u32 hdl) +{ + ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD, + IPA_RM_RESOURCE_APPS_CONS); + ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD); + ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS); + return odu_bridge_cleanup(); +} +EXPORT_SYMBOL(ipa_bridge_cleanup); + +#endif /* CONFIG_IPA3 */ + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ODU bridge driver"); diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c new file mode 100644 index 000000000000..9149ba710aa6 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c @@ -0,0 +1,2479 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include "rndis_ipa_trace.h" + +#define DRV_NAME "RNDIS_IPA" +#define DEBUGFS_DIR_NAME "rndis_ipa" +#define DEBUGFS_AGGR_DIR_NAME "rndis_ipa_aggregation" +#define NETDEV_NAME "rndis" +#define DRV_RESOURCE_ID IPA_RM_RESOURCE_RNDIS_PROD +#define IPV4_HDR_NAME "rndis_eth_ipv4" +#define IPV6_HDR_NAME "rndis_eth_ipv6" +#define IPA_TO_USB_CLIENT IPA_CLIENT_USB_CONS +#define INACTIVITY_MSEC_DELAY 100 +#define DEFAULT_OUTSTANDING_HIGH 64 +#define DEFAULT_OUTSTANDING_LOW 32 +#define DEBUGFS_TEMP_BUF_SIZE 4 +#define RNDIS_IPA_PKT_TYPE 0x00000001 +#define RNDIS_IPA_DFLT_RT_HDL 0 +#define FROM_IPA_TO_USB_BAMDMA 4 +#define FROM_USB_TO_IPA_BAMDMA 5 +#define BAM_DMA_MAX_PKT_NUMBER 10 +#define BAM_DMA_DATA_FIFO_SIZE \ + (BAM_DMA_MAX_PKT_NUMBER * \ + (ETH_FRAME_LEN + sizeof(struct rndis_pkt_hdr))) +#define BAM_DMA_DESC_FIFO_SIZE \ + (BAM_DMA_MAX_PKT_NUMBER * (sizeof(struct sps_iovec))) +#define TX_TIMEOUT (5 * HZ) +#define MIN_TX_ERROR_SLEEP_PERIOD 500 +#define DEFAULT_AGGR_TIME_LIMIT 1 +#define DEFAULT_AGGR_PKT_LIMIT 0 + +#define RNDIS_IPA_ERROR(fmt, args...) \ + pr_err(DRV_NAME "@%s@%d@ctx:%s: "\ + fmt, __func__, __LINE__, current->comm, ## args) +#define RNDIS_IPA_DEBUG(fmt, args...) \ + pr_debug("ctx: %s, "fmt, current->comm, ## args) + +#define NULL_CHECK_RETVAL(ptr) \ + do { \ + if (!(ptr)) { \ + RNDIS_IPA_ERROR("null pointer #ptr\n"); \ + ret = -EINVAL; \ + } \ + } \ + while (0) + +#define RNDIS_HDR_OFST(field) offsetof(struct rndis_pkt_hdr, field) +#define RNDIS_IPA_LOG_ENTRY() RNDIS_IPA_DEBUG("begin\n") +#define RNDIS_IPA_LOG_EXIT() RNDIS_IPA_DEBUG("end\n") + +/** + * enum rndis_ipa_state - specify the current driver internal state + * which is guarded by a state machine. + * + * The driver internal state changes due to its external API usage. + * The driver saves its internal state to guard from caller illegal + * call sequence. + * states: + * UNLOADED is the first state which is the default one and is also the state + * after the driver gets unloaded(cleanup). + * INITIALIZED is the driver state once it finished registering + * the network device and all internal data struct were initialized + * CONNECTED is the driver state once the USB pipes were connected to IPA + * UP is the driver state after the interface mode was set to UP but the + * pipes are not connected yet - this state is meta-stable state. + * CONNECTED_AND_UP is the driver state when the pipe were connected and + * the interface got UP request from the network stack. this is the driver + * idle operation state which allows it to transmit/receive data. + * INVALID is a state which is not allowed. + */ +enum rndis_ipa_state { + RNDIS_IPA_UNLOADED = 0, + RNDIS_IPA_INITIALIZED = 1, + RNDIS_IPA_CONNECTED = 2, + RNDIS_IPA_UP = 3, + RNDIS_IPA_CONNECTED_AND_UP = 4, + RNDIS_IPA_INVALID = 5, +}; + +/** + * enum rndis_ipa_operation - enumerations used to describe the API operation + * + * Those enums are used as input for the driver state machine. + */ +enum rndis_ipa_operation { + RNDIS_IPA_INITIALIZE, + RNDIS_IPA_CONNECT, + RNDIS_IPA_OPEN, + RNDIS_IPA_STOP, + RNDIS_IPA_DISCONNECT, + RNDIS_IPA_CLEANUP, +}; + +#define RNDIS_IPA_STATE_DEBUG(ctx) \ + RNDIS_IPA_DEBUG("Driver state: %s\n",\ + rndis_ipa_state_string((ctx)->state)) + + +/** + * struct rndis_ipa_dev - main driver context parameters + * + * @net: network interface struct implemented by this driver + * @directory: debugfs directory for various debugging switches + * @tx_filter: flag that enable/disable Tx path to continue to IPA + * @tx_dropped: number of filtered out Tx packets + * @tx_dump_enable: dump all Tx packets + * @rx_filter: flag that enable/disable Rx path to continue to IPA + * @rx_dropped: number of filtered out Rx packets + * @rx_dump_enable: dump all Rx packets + * @icmp_filter: allow all ICMP packet to pass through the filters + * @rm_enable: flag that enable/disable Resource manager request prior to Tx + * @deaggregation_enable: enable/disable IPA HW deaggregation logic + * @during_xmit_error: flags that indicate that the driver is in a middle + * of error handling in Tx path + * @directory: holds all debug flags used by the driver to allow cleanup + * for driver unload + * @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table + * @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table + * @usb_to_ipa_hdl: save handle for IPA pipe operations + * @ipa_to_usb_hdl: save handle for IPA pipe operations + * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed + * @outstanding_high: number of outstanding packets allowed + * @outstanding_low: number of outstanding packets which shall cause + * to netdev queue start (after stopped due to outstanding_high reached) + * @error_msec_sleep_time: number of msec for sleeping in case of Tx error + * @state: current state of the driver + * @host_ethaddr: holds the tethered PC ethernet address + * @device_ethaddr: holds the device ethernet address + * @device_ready_notify: callback supplied by USB core driver + * This callback shall be called by the Netdev once the Netdev internal + * state is changed to RNDIS_IPA_CONNECTED_AND_UP + * @xmit_error_delayed_work: work item for cases where IPA driver Tx fails + * @state_lock: used to protect the state variable. + */ +struct rndis_ipa_dev { + struct net_device *net; + bool tx_filter; + u32 tx_dropped; + bool tx_dump_enable; + bool rx_filter; + u32 rx_dropped; + bool rx_dump_enable; + bool icmp_filter; + bool rm_enable; + bool deaggregation_enable; + bool during_xmit_error; + struct dentry *directory; + u32 eth_ipv4_hdr_hdl; + u32 eth_ipv6_hdr_hdl; + u32 usb_to_ipa_hdl; + u32 ipa_to_usb_hdl; + atomic_t outstanding_pkts; + u32 outstanding_high; + u32 outstanding_low; + u32 error_msec_sleep_time; + enum rndis_ipa_state state; + u8 host_ethaddr[ETH_ALEN]; + u8 device_ethaddr[ETH_ALEN]; + void (*device_ready_notify)(void); + struct delayed_work xmit_error_delayed_work; + spinlock_t state_lock; /* Spinlock for the state variable.*/ +}; + +/** + * rndis_pkt_hdr - RNDIS_IPA representation of REMOTE_NDIS_PACKET_MSG + * @msg_type: for REMOTE_NDIS_PACKET_MSG this value should be 1 + * @msg_len: total message length in bytes, including RNDIS header an payload + * @data_ofst: offset in bytes from start of the data_ofst to payload + * @data_len: payload size in bytes + * @zeroes: OOB place holder - not used for RNDIS_IPA. + */ +struct rndis_pkt_hdr { + __le32 msg_type; + __le32 msg_len; + __le32 data_ofst; + __le32 data_len; + __le32 zeroes[7]; +} __packed__; + +static int rndis_ipa_open(struct net_device *net); +static void rndis_ipa_packet_receive_notify + (void *private, enum ipa_dp_evt_type evt, unsigned long data); +static void rndis_ipa_tx_complete_notify + (void *private, enum ipa_dp_evt_type evt, unsigned long data); +static void rndis_ipa_tx_timeout(struct net_device *net); +static int rndis_ipa_stop(struct net_device *net); +static void rndis_ipa_enable_data_path(struct rndis_ipa_dev *rndis_ipa_ctx); +static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb); +static void rndis_ipa_xmit_error(struct sk_buff *skb); +static void rndis_ipa_xmit_error_aftercare_wq(struct work_struct *work); +static void rndis_ipa_prepare_header_insertion + (int eth_type, + const char *hdr_name, struct ipa_hdr_add *add_hdr, + const void *dst_mac, const void *src_mac); +static int rndis_ipa_hdrs_cfg + (struct rndis_ipa_dev *rndis_ipa_ctx, + const void *dst_mac, const void *src_mac); +static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx); +static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net); +static int rndis_ipa_register_properties(char *netdev_name); +static int rndis_ipa_deregister_properties(char *netdev_name); +static void rndis_ipa_rm_notify + (void *user_data, enum ipa_rm_event event, + unsigned long data); +static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx); +static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx); +static bool rx_filter(struct sk_buff *skb); +static bool tx_filter(struct sk_buff *skb); +static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx); +static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx); +static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx); +static netdev_tx_t rndis_ipa_start_xmit + (struct sk_buff *skb, struct net_device *net); +static int rndis_ipa_debugfs_atomic_open + (struct inode *inode, struct file *file); +static int rndis_ipa_debugfs_aggr_open + (struct inode *inode, struct file *file); +static ssize_t rndis_ipa_debugfs_aggr_write + (struct file *file, + const char __user *buf, size_t count, loff_t *ppos); +static ssize_t rndis_ipa_debugfs_atomic_read + (struct file *file, + char __user *ubuf, size_t count, loff_t *ppos); +static void rndis_ipa_dump_skb(struct sk_buff *skb); +static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx); +static void rndis_ipa_debugfs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx); +static int rndis_ipa_ep_registers_cfg + (u32 usb_to_ipa_hdl, + u32 ipa_to_usb_hdl, u32 max_xfer_size_bytes_to_dev, + u32 max_xfer_size_bytes_to_host, u32 mtu, + bool deaggr_enable); +static int rndis_ipa_set_device_ethernet_addr + (u8 *dev_ethaddr, + u8 device_ethaddr[]); +static enum rndis_ipa_state rndis_ipa_next_state + (enum rndis_ipa_state current_state, + enum rndis_ipa_operation operation); +static const char *rndis_ipa_state_string(enum rndis_ipa_state state); +static int rndis_ipa_init_module(void); +static void rndis_ipa_cleanup_module(void); + +struct rndis_ipa_dev *rndis_ipa; + +static const struct net_device_ops rndis_ipa_netdev_ops = { + .ndo_open = rndis_ipa_open, + .ndo_stop = rndis_ipa_stop, + .ndo_start_xmit = rndis_ipa_start_xmit, + .ndo_tx_timeout = rndis_ipa_tx_timeout, + .ndo_get_stats = rndis_ipa_get_stats, + .ndo_set_mac_address = eth_mac_addr, +}; + +const struct file_operations rndis_ipa_debugfs_atomic_ops = { + .open = rndis_ipa_debugfs_atomic_open, + .read = rndis_ipa_debugfs_atomic_read, +}; + +const struct file_operations rndis_ipa_aggr_ops = { + .open = rndis_ipa_debugfs_aggr_open, + .write = rndis_ipa_debugfs_aggr_write, +}; + +static struct ipa_ep_cfg ipa_to_usb_ep_cfg = { + .mode = { + .mode = IPA_BASIC, + .dst = IPA_CLIENT_APPS_LAN_CONS, + }, + .hdr = { + .hdr_len = ETH_HLEN + sizeof(struct rndis_pkt_hdr), + .hdr_ofst_metadata_valid = false, + .hdr_ofst_metadata = 0, + .hdr_additional_const_len = ETH_HLEN, + .hdr_ofst_pkt_size_valid = true, + .hdr_ofst_pkt_size = 3 * sizeof(u32), + .hdr_a5_mux = false, + .hdr_remove_additional = false, + .hdr_metadata_reg_valid = false, + }, + .hdr_ext = { + .hdr_pad_to_alignment = 0, + .hdr_total_len_or_pad_offset = 1 * sizeof(u32), + .hdr_payload_len_inc_padding = false, + .hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN, + .hdr_total_len_or_pad_valid = true, + .hdr_little_endian = true, + }, + .aggr = { + .aggr_en = IPA_ENABLE_AGGR, + .aggr = IPA_GENERIC, + .aggr_byte_limit = 4, + .aggr_time_limit = DEFAULT_AGGR_TIME_LIMIT, + .aggr_pkt_limit = DEFAULT_AGGR_PKT_LIMIT + }, + .deaggr = { + .deaggr_hdr_len = 0, + .packet_offset_valid = 0, + .packet_offset_location = 0, + .max_packet_len = 0, + }, + .route = { + .rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL, + }, + .nat = { + .nat_en = IPA_SRC_NAT, + }, +}; + +static struct ipa_ep_cfg usb_to_ipa_ep_cfg_deaggr_dis = { + .mode = { + .mode = IPA_BASIC, + .dst = IPA_CLIENT_APPS_LAN_CONS, + }, + .hdr = { + .hdr_len = ETH_HLEN + sizeof(struct rndis_pkt_hdr), + .hdr_ofst_metadata_valid = false, + .hdr_ofst_metadata = 0, + .hdr_additional_const_len = 0, + .hdr_ofst_pkt_size_valid = true, + .hdr_ofst_pkt_size = 3 * sizeof(u32) + + sizeof(struct rndis_pkt_hdr), + .hdr_a5_mux = false, + .hdr_remove_additional = false, + .hdr_metadata_reg_valid = false, + }, + .hdr_ext = { + .hdr_pad_to_alignment = 0, + .hdr_total_len_or_pad_offset = 1 * sizeof(u32), + .hdr_payload_len_inc_padding = false, + .hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN, + .hdr_total_len_or_pad_valid = true, + .hdr_little_endian = true, + }, + + .aggr = { + .aggr_en = IPA_BYPASS_AGGR, + .aggr = 0, + .aggr_byte_limit = 0, + .aggr_time_limit = 0, + .aggr_pkt_limit = 0, + }, + .deaggr = { + .deaggr_hdr_len = 0, + .packet_offset_valid = false, + .packet_offset_location = 0, + .max_packet_len = 0, + }, + + .route = { + .rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL, + }, + .nat = { + .nat_en = IPA_BYPASS_NAT, + }, +}; + +static struct ipa_ep_cfg usb_to_ipa_ep_cfg_deaggr_en = { + .mode = { + .mode = IPA_BASIC, + .dst = IPA_CLIENT_APPS_LAN_CONS, + }, + .hdr = { + .hdr_len = ETH_HLEN, + .hdr_ofst_metadata_valid = false, + .hdr_ofst_metadata = 0, + .hdr_additional_const_len = 0, + .hdr_ofst_pkt_size_valid = true, + .hdr_ofst_pkt_size = 3 * sizeof(u32), + .hdr_a5_mux = false, + .hdr_remove_additional = false, + .hdr_metadata_reg_valid = false, + }, + .hdr_ext = { + .hdr_pad_to_alignment = 0, + .hdr_total_len_or_pad_offset = 1 * sizeof(u32), + .hdr_payload_len_inc_padding = false, + .hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN, + .hdr_total_len_or_pad_valid = true, + .hdr_little_endian = true, + }, + .aggr = { + .aggr_en = IPA_ENABLE_DEAGGR, + .aggr = IPA_GENERIC, + .aggr_byte_limit = 0, + .aggr_time_limit = 0, + .aggr_pkt_limit = 0, + }, + .deaggr = { + .deaggr_hdr_len = sizeof(struct rndis_pkt_hdr), + .packet_offset_valid = true, + .packet_offset_location = 8, + .max_packet_len = 8192, /* Will be overridden*/ + }, + .route = { + .rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL, + }, + .nat = { + .nat_en = IPA_BYPASS_NAT, + }, +}; + +/** + * rndis_template_hdr - RNDIS template structure for RNDIS_IPA SW insertion + * @msg_type: set for REMOTE_NDIS_PACKET_MSG (0x00000001) + * this value will be used for all data packets + * @msg_len: will add the skb length to get final size + * @data_ofst: this field value will not be changed + * @data_len: set as skb length to get final size + * @zeroes: make sure all OOB data is not used + */ +struct rndis_pkt_hdr rndis_template_hdr = { + .msg_type = RNDIS_IPA_PKT_TYPE, + .msg_len = sizeof(struct rndis_pkt_hdr), + .data_ofst = sizeof(struct rndis_pkt_hdr) - RNDIS_HDR_OFST(data_ofst), + .data_len = 0, + .zeroes = {0}, +}; + +/** + * rndis_ipa_init() - create network device and initialize internal + * data structures + * @params: in/out parameters required for initialization, + * see "struct ipa_usb_init_params" for more details + * + * Shall be called prior to pipe connection. + * Detailed description: + * - allocate the network device + * - set default values for driver internal switches and stash them inside + * the netdev private field + * - set needed headroom for RNDIS header + * - create debugfs folder and files + * - create IPA resource manager client + * - set the ethernet address for the netdev to be added on SW Tx path + * - add header insertion rules for IPA driver (based on host/device Ethernet + * addresses given in input params and on RNDIS data template struct) + * - register tx/rx properties to IPA driver (will be later used + * by IPA configuration manager to configure rest of the IPA rules) + * - set the carrier state to "off" (until connect is called) + * - register the network device + * - set the out parameters + * - change driver internal state to INITIALIZED + * + * Returns negative errno, or zero on success + */ +int rndis_ipa_init(struct ipa_usb_init_params *params) +{ + int result = 0; + struct net_device *net; + struct rndis_ipa_dev *rndis_ipa_ctx; + int ret; + + RNDIS_IPA_LOG_ENTRY(); + RNDIS_IPA_DEBUG("%s initializing\n", DRV_NAME); + ret = 0; + NULL_CHECK_RETVAL(params); + if (ret) + return ret; + + RNDIS_IPA_DEBUG + ("host_ethaddr=%pM, device_ethaddr=%pM\n", + params->host_ethaddr, + params->device_ethaddr); + + net = alloc_etherdev(sizeof(struct rndis_ipa_dev)); + if (!net) { + result = -ENOMEM; + RNDIS_IPA_ERROR("fail to allocate Ethernet device\n"); + goto fail_alloc_etherdev; + } + RNDIS_IPA_DEBUG("network device was successfully allocated\n"); + + rndis_ipa_ctx = netdev_priv(net); + if (!rndis_ipa_ctx) { + result = -ENOMEM; + RNDIS_IPA_ERROR("fail to extract netdev priv\n"); + goto fail_netdev_priv; + } + memset(rndis_ipa_ctx, 0, sizeof(*rndis_ipa_ctx)); + RNDIS_IPA_DEBUG("rndis_ipa_ctx (private)=%pK\n", rndis_ipa_ctx); + + spin_lock_init(&rndis_ipa_ctx->state_lock); + + rndis_ipa_ctx->net = net; + rndis_ipa_ctx->tx_filter = false; + rndis_ipa_ctx->rx_filter = false; + rndis_ipa_ctx->icmp_filter = true; + rndis_ipa_ctx->rm_enable = true; + rndis_ipa_ctx->tx_dropped = 0; + rndis_ipa_ctx->rx_dropped = 0; + rndis_ipa_ctx->tx_dump_enable = false; + rndis_ipa_ctx->rx_dump_enable = false; + rndis_ipa_ctx->deaggregation_enable = false; + rndis_ipa_ctx->outstanding_high = DEFAULT_OUTSTANDING_HIGH; + rndis_ipa_ctx->outstanding_low = DEFAULT_OUTSTANDING_LOW; + atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0); + memcpy + (rndis_ipa_ctx->device_ethaddr, params->device_ethaddr, + sizeof(rndis_ipa_ctx->device_ethaddr)); + memcpy + (rndis_ipa_ctx->host_ethaddr, params->host_ethaddr, + sizeof(rndis_ipa_ctx->host_ethaddr)); + INIT_DELAYED_WORK + (&rndis_ipa_ctx->xmit_error_delayed_work, + rndis_ipa_xmit_error_aftercare_wq); + rndis_ipa_ctx->error_msec_sleep_time = + MIN_TX_ERROR_SLEEP_PERIOD; + RNDIS_IPA_DEBUG("internal data structures were set\n"); + + if (!params->device_ready_notify) + RNDIS_IPA_DEBUG("device_ready_notify() was not supplied\n"); + rndis_ipa_ctx->device_ready_notify = params->device_ready_notify; + + snprintf(net->name, sizeof(net->name), "%s%%d", NETDEV_NAME); + RNDIS_IPA_DEBUG + ("Setting network interface driver name to: %s\n", + net->name); + + net->netdev_ops = &rndis_ipa_netdev_ops; + net->watchdog_timeo = TX_TIMEOUT; + + net->needed_headroom = sizeof(rndis_template_hdr); + RNDIS_IPA_DEBUG + ("Needed headroom for RNDIS header set to %d\n", + net->needed_headroom); + + rndis_ipa_debugfs_init(rndis_ipa_ctx); + + result = rndis_ipa_set_device_ethernet_addr + (net->dev_addr, rndis_ipa_ctx->device_ethaddr); + if (result) { + RNDIS_IPA_ERROR("set device MAC failed\n"); + goto fail_set_device_ethernet; + } + RNDIS_IPA_DEBUG("Device Ethernet address set %pM\n", net->dev_addr); + + result = rndis_ipa_hdrs_cfg + (rndis_ipa_ctx, + params->host_ethaddr, + params->device_ethaddr); + if (result) { + RNDIS_IPA_ERROR("fail on ipa hdrs set\n"); + goto fail_hdrs_cfg; + } + RNDIS_IPA_DEBUG("IPA header-insertion configed for Ethernet+RNDIS\n"); + + result = rndis_ipa_register_properties(net->name); + if (result) { + RNDIS_IPA_ERROR("fail on properties set\n"); + goto fail_register_tx; + } + RNDIS_IPA_DEBUG("2 TX and 2 RX properties were registered\n"); + + netif_carrier_off(net); + RNDIS_IPA_DEBUG("set carrier off until pipes are connected\n"); + + result = register_netdev(net); + if (result) { + RNDIS_IPA_ERROR("register_netdev failed: %d\n", result); + goto fail_register_netdev; + } + RNDIS_IPA_DEBUG + ("netdev:%s registration succeeded, index=%d\n", + net->name, net->ifindex); + + rndis_ipa = rndis_ipa_ctx; + params->ipa_rx_notify = rndis_ipa_packet_receive_notify; + params->ipa_tx_notify = rndis_ipa_tx_complete_notify; + params->private = rndis_ipa_ctx; + params->skip_ep_cfg = false; + rndis_ipa_ctx->state = RNDIS_IPA_INITIALIZED; + RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx); + pr_info("RNDIS_IPA NetDev was initialized"); + + RNDIS_IPA_LOG_EXIT(); + + return 0; + +fail_register_netdev: + rndis_ipa_deregister_properties(net->name); +fail_register_tx: + rndis_ipa_hdrs_destroy(rndis_ipa_ctx); +fail_set_device_ethernet: +fail_hdrs_cfg: + rndis_ipa_debugfs_destroy(rndis_ipa_ctx); +fail_netdev_priv: + free_netdev(net); +fail_alloc_etherdev: + return result; +} +EXPORT_SYMBOL(rndis_ipa_init); + +/** + * rndis_ipa_pipe_connect_notify() - notify rndis_ipa Netdev that the USB pipes + * were connected + * @usb_to_ipa_hdl: handle from IPA driver client for USB->IPA + * @ipa_to_usb_hdl: handle from IPA driver client for IPA->USB + * @private: same value that was set by init(), this parameter holds the + * network device pointer. + * @max_transfer_byte_size: RNDIS protocol specific, the maximum size that + * the host expect + * @max_packet_number: RNDIS protocol specific, the maximum packet number + * that the host expects + * + * Once USB driver finishes the pipe connection between IPA core + * and USB core this method shall be called in order to + * allow the driver to complete the data path configurations. + * Detailed description: + * - configure the IPA end-points register + * - notify the Linux kernel for "carrier_on" + * - change the driver internal state + * + * After this function is done the driver state changes to "Connected" or + * Connected and Up. + * This API is expected to be called after initialization() or + * after a call to disconnect(). + * + * Returns negative errno, or zero on success + */ +int rndis_ipa_pipe_connect_notify( + u32 usb_to_ipa_hdl, + u32 ipa_to_usb_hdl, + u32 max_xfer_size_bytes_to_dev, + u32 max_packet_number_to_dev, + u32 max_xfer_size_bytes_to_host, + void *private) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = private; + int next_state; + int result; + int ret; + unsigned long flags; + + RNDIS_IPA_LOG_ENTRY(); + + ret = 0; + NULL_CHECK_RETVAL(private); + + if (ret) + return ret; + + RNDIS_IPA_DEBUG + ("usb_to_ipa_hdl=%d, ipa_to_usb_hdl=%d, private=0x%pK\n", + usb_to_ipa_hdl, ipa_to_usb_hdl, private); + RNDIS_IPA_DEBUG + ("max_xfer_sz_to_dev=%d, max_pkt_num_to_dev=%d\n", + max_xfer_size_bytes_to_dev, + max_packet_number_to_dev); + RNDIS_IPA_DEBUG + ("max_xfer_sz_to_host=%d\n", + max_xfer_size_bytes_to_host); + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + next_state = rndis_ipa_next_state + (rndis_ipa_ctx->state, + RNDIS_IPA_CONNECT); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_ERROR("use init()/disconnect() before connect()\n"); + return -EPERM; + } + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + + if (usb_to_ipa_hdl >= IPA_CLIENT_MAX) { + RNDIS_IPA_ERROR + ("usb_to_ipa_hdl(%d) - not valid ipa handle\n", + usb_to_ipa_hdl); + return -EINVAL; + } + if (ipa_to_usb_hdl >= IPA_CLIENT_MAX) { + RNDIS_IPA_ERROR + ("ipa_to_usb_hdl(%d) - not valid ipa handle\n", + ipa_to_usb_hdl); + return -EINVAL; + } + + result = rndis_ipa_create_rm_resource(rndis_ipa_ctx); + if (result) { + RNDIS_IPA_ERROR("fail on RM create\n"); + goto fail_create_rm; + } + RNDIS_IPA_DEBUG("RM resource was created\n"); + + rndis_ipa_ctx->ipa_to_usb_hdl = ipa_to_usb_hdl; + rndis_ipa_ctx->usb_to_ipa_hdl = usb_to_ipa_hdl; + if (max_packet_number_to_dev > 1) + rndis_ipa_ctx->deaggregation_enable = true; + else + rndis_ipa_ctx->deaggregation_enable = false; + result = rndis_ipa_ep_registers_cfg + (usb_to_ipa_hdl, + ipa_to_usb_hdl, + max_xfer_size_bytes_to_dev, + max_xfer_size_bytes_to_host, + rndis_ipa_ctx->net->mtu, + rndis_ipa_ctx->deaggregation_enable); + if (result) { + RNDIS_IPA_ERROR("fail on ep cfg\n"); + goto fail; + } + RNDIS_IPA_DEBUG("end-points configured\n"); + + netif_stop_queue(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("netif_stop_queue() was called\n"); + + netif_carrier_on(rndis_ipa_ctx->net); + if (!netif_carrier_ok(rndis_ipa_ctx->net)) { + RNDIS_IPA_ERROR("netif_carrier_ok error\n"); + result = -EBUSY; + goto fail; + } + RNDIS_IPA_DEBUG("netif_carrier_on() was called\n"); + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, + RNDIS_IPA_CONNECT); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_ERROR("use init()/disconnect() before connect()\n"); + return -EPERM; + } + rndis_ipa_ctx->state = next_state; + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + + RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx); + + if (next_state == RNDIS_IPA_CONNECTED_AND_UP) + rndis_ipa_enable_data_path(rndis_ipa_ctx); + else + RNDIS_IPA_DEBUG("queue shall be started after open()\n"); + + pr_info("RNDIS_IPA NetDev pipes were connected\n"); + + RNDIS_IPA_LOG_EXIT(); + + return 0; + +fail: + rndis_ipa_destroy_rm_resource(rndis_ipa_ctx); +fail_create_rm: + return result; +} +EXPORT_SYMBOL(rndis_ipa_pipe_connect_notify); + +/** + * rndis_ipa_open() - notify Linux network stack to start sending packets + * @net: the network interface supplied by the network stack + * + * Linux uses this API to notify the driver that the network interface + * transitions to the up state. + * The driver will instruct the Linux network stack to start + * delivering data packets. + * The driver internal state shall be changed to Up or Connected and Up + * + * Returns negative errno, or zero on success + */ +static int rndis_ipa_open(struct net_device *net) +{ + struct rndis_ipa_dev *rndis_ipa_ctx; + int next_state; + unsigned long flags; + + RNDIS_IPA_LOG_ENTRY(); + + rndis_ipa_ctx = netdev_priv(net); + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + + next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, RNDIS_IPA_OPEN); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_ERROR("can't bring driver up before initialize\n"); + return -EPERM; + } + + rndis_ipa_ctx->state = next_state; + + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + + RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx); + + if (next_state == RNDIS_IPA_CONNECTED_AND_UP) + rndis_ipa_enable_data_path(rndis_ipa_ctx); + else + RNDIS_IPA_DEBUG("queue shall be started after connect()\n"); + + pr_info("RNDIS_IPA NetDev was opened\n"); + + RNDIS_IPA_LOG_EXIT(); + + return 0; +} + +/** + * rndis_ipa_start_xmit() - send data from APPs to USB core via IPA core + * using SW path (Tx data path) + * Tx path for this Netdev is Apps-processor->IPA->USB + * @skb: packet received from Linux network stack destined for tethered PC + * @net: the network device being used to send this packet (rndis0) + * + * Several conditions needed in order to send the packet to IPA: + * - Transmit queue for the network driver is currently + * in "started" state + * - The driver internal state is in Connected and Up state. + * - Filters Tx switch are turned off + * - The IPA resource manager state for the driver producer client + * is "Granted" which implies that all the resources in the dependency + * graph are valid for data flow. + * - outstanding high boundary was not reached. + * + * In case the outstanding packets high boundary is reached, the driver will + * stop the send queue until enough packets are processed by + * the IPA core (based on calls to rndis_ipa_tx_complete_notify). + * + * In case all of the conditions are met, the network driver shall: + * - encapsulate the Ethernet packet with RNDIS header (REMOTE_NDIS_PACKET_MSG) + * - send the packet by using IPA Driver SW path (IP_PACKET_INIT) + * - Netdev status fields shall be updated based on the current Tx packet + * + * Returns NETDEV_TX_BUSY if retry should be made later, + * or NETDEV_TX_OK on success. + */ +static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb, + struct net_device *net) +{ + int ret; + netdev_tx_t status = NETDEV_TX_BUSY; + struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net); + + netif_trans_update(net); + + RNDIS_IPA_DEBUG + ("Tx, len=%d, skb->protocol=%d, outstanding=%d\n", + skb->len, skb->protocol, + atomic_read(&rndis_ipa_ctx->outstanding_pkts)); + + if (unlikely(netif_queue_stopped(net))) { + RNDIS_IPA_ERROR("interface queue is stopped\n"); + goto out; + } + + if (unlikely(rndis_ipa_ctx->tx_dump_enable)) + rndis_ipa_dump_skb(skb); + + if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) { + RNDIS_IPA_ERROR("Missing pipe connected and/or iface up\n"); + return NETDEV_TX_BUSY; + } + + if (unlikely(tx_filter(skb))) { + dev_kfree_skb_any(skb); + RNDIS_IPA_DEBUG("packet got filtered out on Tx path\n"); + rndis_ipa_ctx->tx_dropped++; + status = NETDEV_TX_OK; + goto out; + } + + ret = resource_request(rndis_ipa_ctx); + if (ret) { + RNDIS_IPA_DEBUG("Waiting to resource\n"); + netif_stop_queue(net); + goto resource_busy; + } + + if (atomic_read(&rndis_ipa_ctx->outstanding_pkts) >= + rndis_ipa_ctx->outstanding_high) { + RNDIS_IPA_DEBUG("Outstanding high boundary reached (%d)\n", + rndis_ipa_ctx->outstanding_high); + netif_stop_queue(net); + RNDIS_IPA_DEBUG("send queue was stopped\n"); + status = NETDEV_TX_BUSY; + goto out; + } + + skb = rndis_encapsulate_skb(skb); + trace_rndis_tx_dp(skb->protocol); + ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL); + if (ret) { + RNDIS_IPA_ERROR("ipa transmit failed (%d)\n", ret); + goto fail_tx_packet; + } + + atomic_inc(&rndis_ipa_ctx->outstanding_pkts); + + status = NETDEV_TX_OK; + goto out; + +fail_tx_packet: + rndis_ipa_xmit_error(skb); +out: + resource_release(rndis_ipa_ctx); +resource_busy: + RNDIS_IPA_DEBUG + ("packet Tx done - %s\n", + (status == NETDEV_TX_OK) ? "OK" : "FAIL"); + + return status; +} + +/** + * rndis_ipa_tx_complete_notify() - notification for Netdev that the + * last packet was successfully sent + * @private: driver context stashed by IPA driver upon pipe connect + * @evt: event type (expected to be write-done event) + * @data: data provided with event (this is actually the skb that + * holds the sent packet) + * + * This function will be called on interrupt bottom halve deferred context. + * outstanding packets counter shall be decremented. + * Network stack send queue will be re-started in case low outstanding + * boundary is reached and queue was stopped before. + * At the end the skb shall be freed. + */ +static void rndis_ipa_tx_complete_notify( + void *private, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + struct rndis_ipa_dev *rndis_ipa_ctx = private; + int ret; + + ret = 0; + NULL_CHECK_RETVAL(private); + if (ret) + return; + + trace_rndis_status_rcvd(skb->protocol); + + RNDIS_IPA_DEBUG + ("Tx-complete, len=%d, skb->prot=%d, outstanding=%d\n", + skb->len, skb->protocol, + atomic_read(&rndis_ipa_ctx->outstanding_pkts)); + + if (unlikely((evt != IPA_WRITE_DONE))) { + RNDIS_IPA_ERROR("unsupported event on TX call-back\n"); + return; + } + + if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) { + RNDIS_IPA_DEBUG + ("dropping Tx-complete pkt, state=%s\n", + rndis_ipa_state_string(rndis_ipa_ctx->state)); + goto out; + } + + rndis_ipa_ctx->net->stats.tx_packets++; + rndis_ipa_ctx->net->stats.tx_bytes += skb->len; + + atomic_dec(&rndis_ipa_ctx->outstanding_pkts); + if + (netif_queue_stopped(rndis_ipa_ctx->net) && + netif_carrier_ok(rndis_ipa_ctx->net) && + atomic_read(&rndis_ipa_ctx->outstanding_pkts) < + (rndis_ipa_ctx->outstanding_low)) { + RNDIS_IPA_DEBUG("outstanding low boundary reached (%d)n", + rndis_ipa_ctx->outstanding_low); + netif_wake_queue(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("send queue was awaken\n"); + } + +out: + dev_kfree_skb_any(skb); +} + +static void rndis_ipa_tx_timeout(struct net_device *net) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net); + int outstanding = atomic_read(&rndis_ipa_ctx->outstanding_pkts); + + RNDIS_IPA_ERROR + ("possible IPA stall was detected, %d outstanding\n", + outstanding); + + net->stats.tx_errors++; +} + +/** + * rndis_ipa_rm_notify() - callback supplied to IPA resource manager + * for grant/release events + * user_data: the driver context supplied to IPA resource manager during call + * to ipa_rm_create_resource(). + * event: the event notified to us by IPA resource manager (Release/Grant) + * data: reserved field supplied by IPA resource manager + * + * This callback shall be called based on resource request/release sent + * to the IPA resource manager. + * In case the queue was stopped during EINPROGRESS for Tx path and the + * event received is Grant then the queue shall be restarted. + * In case the event notified is a release notification the netdev discard it. + */ +static void rndis_ipa_rm_notify( + void *user_data, enum ipa_rm_event event, + unsigned long data) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = user_data; + + RNDIS_IPA_LOG_ENTRY(); + + if (event == IPA_RM_RESOURCE_RELEASED) { + RNDIS_IPA_DEBUG("Resource Released\n"); + return; + } + + if (event != IPA_RM_RESOURCE_GRANTED) { + RNDIS_IPA_ERROR + ("Unexceoted event receieved from RM (%d\n)", event); + return; + } + RNDIS_IPA_DEBUG("Resource Granted\n"); + + if (netif_queue_stopped(rndis_ipa_ctx->net)) { + RNDIS_IPA_DEBUG("starting queue\n"); + netif_start_queue(rndis_ipa_ctx->net); + } else { + RNDIS_IPA_DEBUG("queue already awake\n"); + } + + RNDIS_IPA_LOG_EXIT(); +} + +/** + * rndis_ipa_packet_receive_notify() - Rx notify for packet sent from + * tethered PC (USB->IPA). + * is USB->IPA->Apps-processor + * @private: driver context + * @evt: event type + * @data: data provided with event + * + * Once IPA driver receives a packet from USB client this callback will be + * called from bottom-half interrupt handling context (ipa Rx workqueue). + * + * Packets that shall be sent to Apps processor may be of two types: + * 1) Packets that are destined for Apps (e.g: WEBSERVER running on Apps) + * 2) Exception packets that need special handling (based on IPA core + * configuration, e.g: new TCP session or any other packets that IPA core + * can't handle) + * If the next conditions are met, the packet shall be sent up to the + * Linux network stack: + * - Driver internal state is Connected and Up + * - Notification received from IPA driver meets the expected type + * for Rx packet + * -Filters Rx switch are turned off + * + * Prior to the sending to the network stack: + * - Netdev struct shall be stashed to the skb as required by the network stack + * - Ethernet header shall be removed (skb->data shall point to the Ethernet + * payload, Ethernet still stashed under MAC header). + * - The skb->pkt_protocol shall be set based on the ethernet destination + * address, Can be Broadcast, Multicast or Other-Host, The later + * pkt-types packets shall be dropped in case the Netdev is not + * in promisc mode. + * - Set the skb protocol field based on the EtherType field + * + * Netdev status fields shall be updated based on the current Rx packet + */ +static void rndis_ipa_packet_receive_notify( + void *private, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + struct rndis_ipa_dev *rndis_ipa_ctx = private; + int result; + unsigned int packet_len = skb->len; + + RNDIS_IPA_DEBUG + ("packet Rx, len=%d\n", + skb->len); + + if (unlikely(rndis_ipa_ctx->rx_dump_enable)) + rndis_ipa_dump_skb(skb); + + if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) { + RNDIS_IPA_DEBUG("use connect()/up() before receive()\n"); + RNDIS_IPA_DEBUG("packet dropped (length=%d)\n", + skb->len); + return; + } + + if (evt != IPA_RECEIVE) { + RNDIS_IPA_ERROR("a none IPA_RECEIVE event in driver RX\n"); + return; + } + + if (!rndis_ipa_ctx->deaggregation_enable) + skb_pull(skb, sizeof(struct rndis_pkt_hdr)); + + skb->dev = rndis_ipa_ctx->net; + skb->protocol = eth_type_trans(skb, rndis_ipa_ctx->net); + + if (rx_filter(skb)) { + RNDIS_IPA_DEBUG("packet got filtered out on RX path\n"); + rndis_ipa_ctx->rx_dropped++; + dev_kfree_skb_any(skb); + return; + } + + trace_rndis_netif_ni(skb->protocol); + result = netif_rx_ni(skb); + if (result) + RNDIS_IPA_ERROR("fail on netif_rx_ni\n"); + rndis_ipa_ctx->net->stats.rx_packets++; + rndis_ipa_ctx->net->stats.rx_bytes += packet_len; +} + +/** rndis_ipa_stop() - notify the network interface to stop + * sending/receiving data + * @net: the network device being stopped. + * + * This API is used by Linux network stack to notify the network driver that + * its state was changed to "down" + * The driver will stop the "send" queue and change its internal + * state to "Connected". + * The Netdev shall be returned to be "Up" after rndis_ipa_open(). + */ +static int rndis_ipa_stop(struct net_device *net) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net); + int next_state; + unsigned long flags; + + RNDIS_IPA_LOG_ENTRY(); + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + + next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, RNDIS_IPA_STOP); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_DEBUG("can't do network interface down without up\n"); + return -EPERM; + } + + rndis_ipa_ctx->state = next_state; + + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + + netif_stop_queue(net); + pr_info("RNDIS_IPA NetDev queue is stopped\n"); + + RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx); + + RNDIS_IPA_LOG_EXIT(); + + return 0; +} + +/** rndis_ipa_disconnect() - notify rndis_ipa Netdev that the USB pipes + * were disconnected + * @private: same value that was set by init(), this parameter holds the + * network device pointer. + * + * USB shall notify the Netdev after disconnecting the pipe. + * - The internal driver state shall returned to its previous + * state (Up or Initialized). + * - Linux network stack shall be informed for carrier off to notify + * user space for pipe disconnect + * - send queue shall be stopped + * During the transition between the pipe disconnection to + * the Netdev notification packets + * are expected to be dropped by IPA driver or IPA core. + */ +int rndis_ipa_pipe_disconnect_notify(void *private) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = private; + int next_state; + int outstanding_dropped_pkts; + int retval; + int ret; + unsigned long flags; + + RNDIS_IPA_LOG_ENTRY(); + + ret = 0; + NULL_CHECK_RETVAL(rndis_ipa_ctx); + if (ret) + return ret; + RNDIS_IPA_DEBUG("private=0x%pK\n", private); + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + + next_state = rndis_ipa_next_state + (rndis_ipa_ctx->state, + RNDIS_IPA_DISCONNECT); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_ERROR("can't disconnect before connect\n"); + return -EPERM; + } + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + + if (rndis_ipa_ctx->during_xmit_error) { + RNDIS_IPA_DEBUG("canceling xmit-error delayed work\n"); + cancel_delayed_work_sync( + &rndis_ipa_ctx->xmit_error_delayed_work); + rndis_ipa_ctx->during_xmit_error = false; + } + + netif_carrier_off(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("carrier_off notification was sent\n"); + + netif_stop_queue(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("queue stopped\n"); + + outstanding_dropped_pkts = + atomic_read(&rndis_ipa_ctx->outstanding_pkts); + + rndis_ipa_ctx->net->stats.tx_dropped += outstanding_dropped_pkts; + atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0); + + retval = rndis_ipa_destroy_rm_resource(rndis_ipa_ctx); + if (retval) { + RNDIS_IPA_ERROR("Fail to clean RM\n"); + return retval; + } + RNDIS_IPA_DEBUG("RM was successfully destroyed\n"); + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, + RNDIS_IPA_DISCONNECT); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_ERROR("can't disconnect before connect\n"); + return -EPERM; + } + rndis_ipa_ctx->state = next_state; + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + + RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx); + + pr_info("RNDIS_IPA NetDev pipes disconnected (%d outstanding clr)\n", + outstanding_dropped_pkts); + + RNDIS_IPA_LOG_EXIT(); + + return 0; +} +EXPORT_SYMBOL(rndis_ipa_pipe_disconnect_notify); + +/** + * rndis_ipa_cleanup() - unregister the network interface driver and free + * internal data structs. + * @private: same value that was set by init(), this + * parameter holds the network device pointer. + * + * This function shall be called once the network interface is not + * needed anymore, e.g: when the USB composition does not support it. + * This function shall be called after the pipes were disconnected. + * Detailed description: + * - remove header-insertion headers from IPA core + * - delete the driver dependency defined for IPA resource manager and + * destroy the producer resource. + * - remove the debugfs entries + * - deregister the network interface from Linux network stack + * - free all internal data structs + * + * It is assumed that no packets shall be sent through HW bridging + * during cleanup to avoid packets trying to add an header that is + * removed during cleanup (IPA configuration manager should have + * removed them at this point) + */ +void rndis_ipa_cleanup(void *private) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = private; + int next_state; + int ret; + unsigned long flags; + + RNDIS_IPA_LOG_ENTRY(); + + RNDIS_IPA_DEBUG("private=0x%pK\n", private); + + ret = 0; + NULL_CHECK_RETVAL(rndis_ipa_ctx); + if (ret) + return; + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + next_state = rndis_ipa_next_state + (rndis_ipa_ctx->state, + RNDIS_IPA_CLEANUP); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_ERROR("use disconnect()before clean()\n"); + return; + } + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + + RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx); + + ret = rndis_ipa_deregister_properties(rndis_ipa_ctx->net->name); + if (ret) { + RNDIS_IPA_ERROR("Fail to deregister Tx/Rx properties\n"); + return; + } + RNDIS_IPA_DEBUG("deregister Tx/Rx properties was successful\n"); + + ret = rndis_ipa_hdrs_destroy(rndis_ipa_ctx); + if (ret) + RNDIS_IPA_ERROR( + "Failed removing RNDIS headers from IPA core. Continue anyway\n"); + else + RNDIS_IPA_DEBUG("RNDIS headers were removed from IPA core\n"); + + rndis_ipa_debugfs_destroy(rndis_ipa_ctx); + RNDIS_IPA_DEBUG("debugfs remove was done\n"); + + unregister_netdev(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("netdev unregistered\n"); + + spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); + next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, + RNDIS_IPA_CLEANUP); + if (next_state == RNDIS_IPA_INVALID) { + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + RNDIS_IPA_ERROR("use disconnect()before clean()\n"); + return; + } + rndis_ipa_ctx->state = next_state; + spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags); + free_netdev(rndis_ipa_ctx->net); + pr_info("RNDIS_IPA NetDev was cleaned\n"); + + RNDIS_IPA_LOG_EXIT(); +} +EXPORT_SYMBOL(rndis_ipa_cleanup); + +static void rndis_ipa_enable_data_path(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + if (rndis_ipa_ctx->device_ready_notify) { + rndis_ipa_ctx->device_ready_notify(); + RNDIS_IPA_DEBUG("USB device_ready_notify() was called\n"); + } else { + RNDIS_IPA_DEBUG("device_ready_notify() not supplied\n"); + } + + netif_start_queue(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("netif_start_queue() was called\n"); +} + +static void rndis_ipa_xmit_error(struct sk_buff *skb) +{ + bool retval; + struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev); + unsigned long delay_jiffies; + u8 rand_dealy_msec; + + RNDIS_IPA_LOG_ENTRY(); + + RNDIS_IPA_DEBUG("starting Tx-queue backoff\n"); + + netif_stop_queue(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("netif_stop_queue was called\n"); + + skb_pull(skb, sizeof(rndis_template_hdr)); + rndis_ipa_ctx->net->stats.tx_errors++; + + get_random_bytes(&rand_dealy_msec, sizeof(rand_dealy_msec)); + delay_jiffies = msecs_to_jiffies( + rndis_ipa_ctx->error_msec_sleep_time + rand_dealy_msec); + + retval = schedule_delayed_work( + &rndis_ipa_ctx->xmit_error_delayed_work, delay_jiffies); + if (!retval) { + RNDIS_IPA_ERROR("fail to schedule delayed work\n"); + netif_start_queue(rndis_ipa_ctx->net); + } else { + RNDIS_IPA_DEBUG + ("work scheduled to start Tx-queue in %d msec\n", + rndis_ipa_ctx->error_msec_sleep_time + + rand_dealy_msec); + rndis_ipa_ctx->during_xmit_error = true; + } + + RNDIS_IPA_LOG_EXIT(); +} + +static void rndis_ipa_xmit_error_aftercare_wq(struct work_struct *work) +{ + struct rndis_ipa_dev *rndis_ipa_ctx; + struct delayed_work *delayed_work; + + RNDIS_IPA_LOG_ENTRY(); + + RNDIS_IPA_DEBUG("Starting queue after xmit error\n"); + + delayed_work = to_delayed_work(work); + rndis_ipa_ctx = container_of + (delayed_work, struct rndis_ipa_dev, + xmit_error_delayed_work); + + if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) { + RNDIS_IPA_ERROR + ("error aftercare handling in bad state (%d)", + rndis_ipa_ctx->state); + return; + } + + rndis_ipa_ctx->during_xmit_error = false; + + netif_start_queue(rndis_ipa_ctx->net); + RNDIS_IPA_DEBUG("netif_start_queue() was called\n"); + + RNDIS_IPA_LOG_EXIT(); +} + +/** + * rndis_ipa_prepare_header_insertion() - prepare the header insertion request + * for IPA driver + * eth_type: the Ethernet type for this header-insertion header + * hdr_name: string that shall represent this header in IPA data base + * add_hdr: output for caller to be used with ipa_add_hdr() to configure + * the IPA core + * dst_mac: tethered PC MAC (Ethernet) address to be added to packets + * for IPA->USB pipe + * src_mac: device MAC (Ethernet) address to be added to packets + * for IPA->USB pipe + * + * This function shall build the header-insertion block request for a + * single Ethernet+RNDIS header) + * this header shall be inserted for packets processed by IPA + * and destined for USB client. + * This header shall be used for HW bridging for packets destined for + * tethered PC. + * For SW data-path, this header won't be used. + */ +static void rndis_ipa_prepare_header_insertion( + int eth_type, + const char *hdr_name, struct ipa_hdr_add *add_hdr, + const void *dst_mac, const void *src_mac) +{ + struct ethhdr *eth_hdr; + + add_hdr->hdr_len = sizeof(rndis_template_hdr); + add_hdr->is_partial = false; + strlcpy(add_hdr->name, hdr_name, IPA_RESOURCE_NAME_MAX); + + memcpy(add_hdr->hdr, &rndis_template_hdr, sizeof(rndis_template_hdr)); + eth_hdr = (struct ethhdr *)(add_hdr->hdr + sizeof(rndis_template_hdr)); + memcpy(eth_hdr->h_dest, dst_mac, ETH_ALEN); + memcpy(eth_hdr->h_source, src_mac, ETH_ALEN); + eth_hdr->h_proto = htons(eth_type); + add_hdr->hdr_len += ETH_HLEN; + add_hdr->is_eth2_ofst_valid = true; + add_hdr->eth2_ofst = sizeof(rndis_template_hdr); + add_hdr->type = IPA_HDR_L2_ETHERNET_II; +} + +/** + * rndis_ipa_hdrs_cfg() - configure header insertion block in IPA core + * to allow HW bridging + * @rndis_ipa_ctx: main driver context + * @dst_mac: destination MAC address (tethered PC) + * @src_mac: source MAC address (MDM device) + * + * This function shall add 2 headers. + * One header for Ipv4 and one header for Ipv6. + * Both headers shall contain Ethernet header and RNDIS header, the only + * difference shall be in the EtherTye field. + * Headers will be committed to HW + * + * Returns negative errno, or zero on success + */ +static int rndis_ipa_hdrs_cfg( + struct rndis_ipa_dev *rndis_ipa_ctx, + const void *dst_mac, const void *src_mac) +{ + struct ipa_ioc_add_hdr *hdrs; + struct ipa_hdr_add *ipv4_hdr; + struct ipa_hdr_add *ipv6_hdr; + int result = 0; + + RNDIS_IPA_LOG_ENTRY(); + + hdrs = kzalloc + (sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr), + GFP_KERNEL); + if (!hdrs) { + result = -ENOMEM; + goto fail_mem; + } + + ipv4_hdr = &hdrs->hdr[0]; + ipv6_hdr = &hdrs->hdr[1]; + rndis_ipa_prepare_header_insertion + (ETH_P_IP, IPV4_HDR_NAME, + ipv4_hdr, dst_mac, src_mac); + rndis_ipa_prepare_header_insertion + (ETH_P_IPV6, IPV6_HDR_NAME, + ipv6_hdr, dst_mac, src_mac); + + hdrs->commit = 1; + hdrs->num_hdrs = 2; + result = ipa_add_hdr(hdrs); + if (result) { + RNDIS_IPA_ERROR("Fail on Header-Insertion(%d)\n", result); + goto fail_add_hdr; + } + if (ipv4_hdr->status) { + RNDIS_IPA_ERROR("Fail on Header-Insertion ipv4(%d)\n", + ipv4_hdr->status); + result = ipv4_hdr->status; + goto fail_add_hdr; + } + if (ipv6_hdr->status) { + RNDIS_IPA_ERROR("Fail on Header-Insertion ipv6(%d)\n", + ipv6_hdr->status); + result = ipv6_hdr->status; + goto fail_add_hdr; + } + rndis_ipa_ctx->eth_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl; + rndis_ipa_ctx->eth_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl; + + RNDIS_IPA_LOG_EXIT(); + +fail_add_hdr: + kfree(hdrs); +fail_mem: + return result; +} + +/** + * rndis_ipa_hdrs_destroy() - remove the IPA core configuration done for + * the driver data path bridging. + * @rndis_ipa_ctx: the driver context + * + * Revert the work done on rndis_ipa_hdrs_cfg(), which is, + * remove 2 headers for Ethernet+RNDIS. + */ +static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + struct ipa_ioc_del_hdr *del_hdr; + struct ipa_hdr_del *ipv4; + struct ipa_hdr_del *ipv6; + int result; + + del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) + + sizeof(*ipv6), GFP_KERNEL); + if (!del_hdr) + return -ENOMEM; + + del_hdr->commit = 1; + del_hdr->num_hdls = 2; + + ipv4 = &del_hdr->hdl[0]; + ipv4->hdl = rndis_ipa_ctx->eth_ipv4_hdr_hdl; + ipv6 = &del_hdr->hdl[1]; + ipv6->hdl = rndis_ipa_ctx->eth_ipv6_hdr_hdl; + + result = ipa_del_hdr(del_hdr); + if (result || ipv4->status || ipv6->status) + RNDIS_IPA_ERROR("ipa_del_hdr failed\n"); + else + RNDIS_IPA_DEBUG("hdrs deletion done\n"); + + kfree(del_hdr); + return result; +} + +static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net) +{ + return &net->stats; +} + +/** + * rndis_ipa_register_properties() - set Tx/Rx properties needed + * by IPA configuration manager + * @netdev_name: a string with the name of the network interface device + * + * Register Tx/Rx properties to allow user space configuration (IPA + * Configuration Manager): + * + * - Two Tx properties (IPA->USB): specify the header names and pipe number + * that shall be used by user space for header-addition configuration + * for ipv4/ipv6 packets flowing from IPA to USB for HW bridging data. + * That header-addition header is added by the Netdev and used by user + * space to close the the HW bridge by adding filtering and routing rules + * that point to this header. + * + * - Two Rx properties (USB->IPA): these properties shall be used by user space + * to configure the IPA core to identify the packets destined + * for Apps-processor by configuring the unicast rules destined for + * the Netdev IP address. + * This rules shall be added based on the attribute mask supplied at + * this function, that is, always hit rule. + */ +static int rndis_ipa_register_properties(char *netdev_name) +{ + struct ipa_tx_intf tx_properties = {0}; + struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} }; + struct ipa_ioc_tx_intf_prop *ipv4_property; + struct ipa_ioc_tx_intf_prop *ipv6_property; + struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} }; + struct ipa_rx_intf rx_properties = {0}; + struct ipa_ioc_rx_intf_prop *rx_ipv4_property; + struct ipa_ioc_rx_intf_prop *rx_ipv6_property; + int result = 0; + + RNDIS_IPA_LOG_ENTRY(); + + tx_properties.prop = properties; + ipv4_property = &tx_properties.prop[0]; + ipv4_property->ip = IPA_IP_v4; + ipv4_property->dst_pipe = IPA_TO_USB_CLIENT; + strlcpy + (ipv4_property->hdr_name, IPV4_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + ipv6_property = &tx_properties.prop[1]; + ipv6_property->ip = IPA_IP_v6; + ipv6_property->dst_pipe = IPA_TO_USB_CLIENT; + strlcpy + (ipv6_property->hdr_name, IPV6_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + tx_properties.num_props = 2; + + rx_properties.prop = rx_ioc_properties; + rx_ipv4_property = &rx_properties.prop[0]; + rx_ipv4_property->ip = IPA_IP_v4; + rx_ipv4_property->attrib.attrib_mask = 0; + rx_ipv4_property->src_pipe = IPA_CLIENT_USB_PROD; + rx_ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + rx_ipv6_property = &rx_properties.prop[1]; + rx_ipv6_property->ip = IPA_IP_v6; + rx_ipv6_property->attrib.attrib_mask = 0; + rx_ipv6_property->src_pipe = IPA_CLIENT_USB_PROD; + rx_ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II; + rx_properties.num_props = 2; + + result = ipa_register_intf("rndis0", &tx_properties, &rx_properties); + if (result) + RNDIS_IPA_ERROR("fail on Tx/Rx properties registration\n"); + else + RNDIS_IPA_DEBUG("Tx/Rx properties registration done\n"); + + RNDIS_IPA_LOG_EXIT(); + + return result; +} + +/** + * rndis_ipa_deregister_properties() - remove the 2 Tx and 2 Rx properties + * @netdev_name: a string with the name of the network interface device + * + * This function revert the work done on rndis_ipa_register_properties(). + */ +static int rndis_ipa_deregister_properties(char *netdev_name) +{ + int result; + + RNDIS_IPA_LOG_ENTRY(); + + result = ipa_deregister_intf(netdev_name); + if (result) { + RNDIS_IPA_DEBUG("Fail on Tx prop deregister\n"); + return result; + } + RNDIS_IPA_LOG_EXIT(); + + return 0; +} + +/** + * rndis_ipa_create_rm_resource() -creates the resource representing + * this Netdev and supply notification callback for resource event + * such as Grant/Release + * @rndis_ipa_ctx: this driver context + * + * In order make sure all needed resources are available during packet + * transmit this Netdev shall use Request/Release mechanism of + * the IPA resource manager. + * This mechanism shall iterate over a dependency graph and make sure + * all dependent entities are ready to for packet Tx + * transfer (Apps->IPA->USB). + * In this function the resource representing the Netdev is created + * in addition to the basic dependency between the Netdev and the USB client. + * Hence, USB client, is a dependency for the Netdev and may be notified in + * case of packet transmit from this Netdev to tethered Host. + * As implied from the "may" in the above sentence there is a scenario where + * the USB is not notified. This is done thanks to the IPA resource manager + * inactivity timer. + * The inactivity timer allow the Release requests to be delayed in order + * prevent ping-pong with the USB and other dependencies. + */ +static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + struct ipa_rm_create_params create_params = {0}; + struct ipa_rm_perf_profile profile; + int result; + + RNDIS_IPA_LOG_ENTRY(); + + create_params.name = DRV_RESOURCE_ID; + create_params.reg_params.user_data = rndis_ipa_ctx; + create_params.reg_params.notify_cb = rndis_ipa_rm_notify; + result = ipa_rm_create_resource(&create_params); + if (result) { + RNDIS_IPA_ERROR("Fail on ipa_rm_create_resource\n"); + goto fail_rm_create; + } + RNDIS_IPA_DEBUG("RM client was created\n"); + + profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; + ipa_rm_set_perf_profile(DRV_RESOURCE_ID, &profile); + + result = ipa_rm_inactivity_timer_init + (DRV_RESOURCE_ID, + INACTIVITY_MSEC_DELAY); + if (result) { + RNDIS_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n"); + goto fail_inactivity_timer; + } + + RNDIS_IPA_DEBUG("rm_it client was created\n"); + + result = ipa_rm_add_dependency_sync + (DRV_RESOURCE_ID, + IPA_RM_RESOURCE_USB_CONS); + + if (result && result != -EINPROGRESS) + RNDIS_IPA_ERROR("unable to add RNDIS/USB dependency (%d)\n", + result); + else + RNDIS_IPA_DEBUG("RNDIS/USB dependency was set\n"); + + result = ipa_rm_add_dependency_sync + (IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (result && result != -EINPROGRESS) + RNDIS_IPA_ERROR("unable to add USB/APPS dependency (%d)\n", + result); + else + RNDIS_IPA_DEBUG("USB/APPS dependency was set\n"); + + RNDIS_IPA_LOG_EXIT(); + + return 0; + +fail_inactivity_timer: +fail_rm_create: + return result; +} + +/** + * rndis_ipa_destroy_rm_resource() - delete the dependency and destroy + * the resource done on rndis_ipa_create_rm_resource() + * @rndis_ipa_ctx: this driver context + * + * This function shall delete the dependency create between + * the Netdev to the USB. + * In addition the inactivity time shall be destroy and the resource shall + * be deleted. + */ +static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + int result; + + RNDIS_IPA_LOG_ENTRY(); + + result = ipa_rm_delete_dependency + (DRV_RESOURCE_ID, + IPA_RM_RESOURCE_USB_CONS); + if (result && result != -EINPROGRESS) { + RNDIS_IPA_ERROR("Fail to delete RNDIS/USB dependency\n"); + goto bail; + } + RNDIS_IPA_DEBUG("RNDIS/USB dependency was successfully deleted\n"); + + result = ipa_rm_delete_dependency + (IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (result == -EINPROGRESS) { + RNDIS_IPA_DEBUG("RM dependency deletion is in progress"); + } else if (result) { + RNDIS_IPA_ERROR("Fail to delete USB/APPS dependency\n"); + goto bail; + } else { + RNDIS_IPA_DEBUG("USB/APPS dependency was deleted\n"); + } + + result = ipa_rm_inactivity_timer_destroy(DRV_RESOURCE_ID); + if (result) { + RNDIS_IPA_ERROR("Fail to destroy inactivity timern"); + goto bail; + } + RNDIS_IPA_DEBUG("RM inactivity timer was successfully destroy\n"); + + result = ipa_rm_delete_resource(DRV_RESOURCE_ID); + if (result) { + RNDIS_IPA_ERROR("resource deletion failed\n"); + goto bail; + } + RNDIS_IPA_DEBUG + ("Netdev RM resource was deleted (resid:%d)\n", + DRV_RESOURCE_ID); + + RNDIS_IPA_LOG_EXIT(); + +bail: + return result; +} + +/** + * resource_request() - request for the Netdev resource + * @rndis_ipa_ctx: main driver context + * + * This function shall send the IPA resource manager inactivity time a request + * to Grant the Netdev producer. + * In case the resource is already Granted the function shall return immediately + * and "pet" the inactivity timer. + * In case the resource was not already Granted this function shall + * return EINPROGRESS and the Netdev shall stop the send queue until + * the IPA resource manager notify it that the resource is + * granted (done in a differ context) + */ +static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + int result = 0; + + if (!rm_enabled(rndis_ipa_ctx)) + goto out; + result = ipa_rm_inactivity_timer_request_resource( + DRV_RESOURCE_ID); +out: + return result; +} + +/** + * resource_release() - release the Netdev resource + * @rndis_ipa_ctx: main driver context + * + * start the inactivity timer count down.by using the IPA resource + * manager inactivity time. + * The actual resource release shall occur only if no request shall be done + * during the INACTIVITY_MSEC_DELAY. + */ +static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + if (!rm_enabled(rndis_ipa_ctx)) + goto out; + ipa_rm_inactivity_timer_release_resource(DRV_RESOURCE_ID); +out: + return; +} + +/** + * rndis_encapsulate_skb() - encapsulate the given Ethernet skb with + * an RNDIS header + * @skb: packet to be encapsulated with the RNDIS header + * + * Shall use a template header for RNDIS and update it with the given + * skb values. + * Ethernet is expected to be already encapsulate the packet. + */ +static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb) +{ + struct rndis_pkt_hdr *rndis_hdr; + int payload_byte_len = skb->len; + + /* if there is no room in this skb, allocate a new one */ + if (unlikely(skb_headroom(skb) < sizeof(rndis_template_hdr))) { + struct sk_buff *new_skb = skb_copy_expand(skb, + sizeof(rndis_template_hdr), 0, GFP_ATOMIC); + if (!new_skb) { + RNDIS_IPA_ERROR("no memory for skb expand\n"); + return skb; + } + RNDIS_IPA_DEBUG("skb expanded. old %pK new %pK\n", + skb, new_skb); + dev_kfree_skb_any(skb); + skb = new_skb; + } + + /* make room at the head of the SKB to put the RNDIS header */ + rndis_hdr = (struct rndis_pkt_hdr *)skb_push(skb, + sizeof(rndis_template_hdr)); + + memcpy(rndis_hdr, &rndis_template_hdr, sizeof(*rndis_hdr)); + rndis_hdr->msg_len += payload_byte_len; + rndis_hdr->data_len += payload_byte_len; + + return skb; +} + +/** + * rx_filter() - logic that decide if the current skb is to be filtered out + * @skb: skb that may be sent up to the network stack + * + * This function shall do Rx packet filtering on the Netdev level. + */ +static bool rx_filter(struct sk_buff *skb) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev); + + return rndis_ipa_ctx->rx_filter; +} + +/** + * tx_filter() - logic that decide if the current skb is to be filtered out + * @skb: skb that may be sent to the USB core + * + * This function shall do Tx packet filtering on the Netdev level. + * ICMP filter bypass is possible to allow only ICMP packet to be + * sent (pings and etc) + */ + +static bool tx_filter(struct sk_buff *skb) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev); + bool is_icmp; + + if (likely(!rndis_ipa_ctx->tx_filter)) + return false; + + is_icmp = (skb->protocol == htons(ETH_P_IP) && + ip_hdr(skb)->protocol == IPPROTO_ICMP); + + if ((!rndis_ipa_ctx->icmp_filter) && is_icmp) + return false; + + return true; +} + +/** + * rm_enabled() - allow the use of resource manager Request/Release to + * be bypassed + * @rndis_ipa_ctx: main driver context + * + * By disabling the resource manager flag the Request for the Netdev resource + * shall be bypassed and the packet shall be sent. + * accordingly, Release request shall be bypass as well. + */ +static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + return rndis_ipa_ctx->rm_enable; +} + +/** + * rndis_ipa_ep_registers_cfg() - configure the USB endpoints + * @usb_to_ipa_hdl: handle received from ipa_connect which represents + * the USB to IPA end-point + * @ipa_to_usb_hdl: handle received from ipa_connect which represents + * the IPA to USB end-point + * @max_xfer_size_bytes_to_dev: the maximum size, in bytes, that the device + * expects to receive from the host. supplied on REMOTE_NDIS_INITIALIZE_CMPLT. + * @max_xfer_size_bytes_to_host: the maximum size, in bytes, that the host + * expects to receive from the device. supplied on REMOTE_NDIS_INITIALIZE_MSG. + * @mtu: the netdev MTU size, in bytes + * + * USB to IPA pipe: + * - de-aggregation + * - Remove Ethernet header + * - Remove RNDIS header + * - SRC NAT + * - Default routing(0) + * IPA to USB Pipe: + * - aggregation + * - Add Ethernet header + * - Add RNDIS header + */ +static int rndis_ipa_ep_registers_cfg( + u32 usb_to_ipa_hdl, + u32 ipa_to_usb_hdl, + u32 max_xfer_size_bytes_to_dev, + u32 max_xfer_size_bytes_to_host, + u32 mtu, + bool deaggr_enable) +{ + int result; + struct ipa_ep_cfg *usb_to_ipa_ep_cfg; + + if (deaggr_enable) { + usb_to_ipa_ep_cfg = &usb_to_ipa_ep_cfg_deaggr_en; + RNDIS_IPA_DEBUG("deaggregation enabled\n"); + } else { + usb_to_ipa_ep_cfg = &usb_to_ipa_ep_cfg_deaggr_dis; + RNDIS_IPA_DEBUG("deaggregation disabled\n"); + } + + usb_to_ipa_ep_cfg->deaggr.max_packet_len = max_xfer_size_bytes_to_dev; + result = ipa_cfg_ep(usb_to_ipa_hdl, usb_to_ipa_ep_cfg); + if (result) { + pr_err("failed to configure USB to IPA point\n"); + return result; + } + RNDIS_IPA_DEBUG("IPA<-USB end-point configured\n"); + + ipa_to_usb_ep_cfg.aggr.aggr_byte_limit = + (max_xfer_size_bytes_to_host - mtu) / 1024; + + if (ipa_to_usb_ep_cfg.aggr.aggr_byte_limit == 0) { + ipa_to_usb_ep_cfg.aggr.aggr_time_limit = 0; + ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit = 1; + } else { + ipa_to_usb_ep_cfg.aggr.aggr_time_limit = + DEFAULT_AGGR_TIME_LIMIT; + ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit = + DEFAULT_AGGR_PKT_LIMIT; + } + + RNDIS_IPA_DEBUG( + "RNDIS aggregation param: en=%d byte_limit=%d time_limit=%d pkt_limit=%d\n" + , ipa_to_usb_ep_cfg.aggr.aggr_en, + ipa_to_usb_ep_cfg.aggr.aggr_byte_limit, + ipa_to_usb_ep_cfg.aggr.aggr_time_limit, + ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit); + + result = ipa_cfg_ep(ipa_to_usb_hdl, &ipa_to_usb_ep_cfg); + if (result) { + pr_err("failed to configure IPA to USB end-point\n"); + return result; + } + RNDIS_IPA_DEBUG("IPA->USB end-point configured\n"); + + return 0; +} + +/** + * rndis_ipa_set_device_ethernet_addr() - set device Ethernet address + * @dev_ethaddr: device Ethernet address + * + * Returns 0 for success, negative otherwise + */ +static int rndis_ipa_set_device_ethernet_addr( + u8 *dev_ethaddr, + u8 device_ethaddr[]) +{ + if (!is_valid_ether_addr(device_ethaddr)) + return -EINVAL; + memcpy(dev_ethaddr, device_ethaddr, ETH_ALEN); + + return 0; +} + +/** rndis_ipa_next_state - return the next state of the driver + * @current_state: the current state of the driver + * @operation: an enum which represent the operation being made on the driver + * by its API. + * + * This function implements the driver internal state machine. + * Its decisions are based on the driver current state and the operation + * being made. + * In case the operation is invalid this state machine will return + * the value RNDIS_IPA_INVALID to inform the caller for a forbidden sequence. + */ +static enum rndis_ipa_state rndis_ipa_next_state( + enum rndis_ipa_state current_state, + enum rndis_ipa_operation operation) +{ + int next_state = RNDIS_IPA_INVALID; + + switch (current_state) { + case RNDIS_IPA_UNLOADED: + if (operation == RNDIS_IPA_INITIALIZE) + next_state = RNDIS_IPA_INITIALIZED; + break; + case RNDIS_IPA_INITIALIZED: + if (operation == RNDIS_IPA_CONNECT) + next_state = RNDIS_IPA_CONNECTED; + else if (operation == RNDIS_IPA_OPEN) + next_state = RNDIS_IPA_UP; + else if (operation == RNDIS_IPA_CLEANUP) + next_state = RNDIS_IPA_UNLOADED; + break; + case RNDIS_IPA_CONNECTED: + if (operation == RNDIS_IPA_DISCONNECT) + next_state = RNDIS_IPA_INITIALIZED; + else if (operation == RNDIS_IPA_OPEN) + next_state = RNDIS_IPA_CONNECTED_AND_UP; + break; + case RNDIS_IPA_UP: + if (operation == RNDIS_IPA_STOP) + next_state = RNDIS_IPA_INITIALIZED; + else if (operation == RNDIS_IPA_CONNECT) + next_state = RNDIS_IPA_CONNECTED_AND_UP; + else if (operation == RNDIS_IPA_CLEANUP) + next_state = RNDIS_IPA_UNLOADED; + break; + case RNDIS_IPA_CONNECTED_AND_UP: + if (operation == RNDIS_IPA_STOP) + next_state = RNDIS_IPA_CONNECTED; + else if (operation == RNDIS_IPA_DISCONNECT) + next_state = RNDIS_IPA_UP; + break; + default: + RNDIS_IPA_ERROR("State is not supported\n"); + break; + } + + RNDIS_IPA_DEBUG + ("state transition ( %s -> %s )- %s\n", + rndis_ipa_state_string(current_state), + rndis_ipa_state_string(next_state), + next_state == RNDIS_IPA_INVALID ? + "Forbidden" : "Allowed"); + + return next_state; +} + +/** + * rndis_ipa_state_string - return the state string representation + * @state: enum which describe the state + */ +static const char *rndis_ipa_state_string(enum rndis_ipa_state state) +{ + switch (state) { + case RNDIS_IPA_UNLOADED: + return "RNDIS_IPA_UNLOADED"; + case RNDIS_IPA_INITIALIZED: + return "RNDIS_IPA_INITIALIZED"; + case RNDIS_IPA_CONNECTED: + return "RNDIS_IPA_CONNECTED"; + case RNDIS_IPA_UP: + return "RNDIS_IPA_UP"; + case RNDIS_IPA_CONNECTED_AND_UP: + return "RNDIS_IPA_CONNECTED_AND_UP"; + default: + return "Not supported"; + } +} + +static void rndis_ipa_dump_skb(struct sk_buff *skb) +{ + int i; + u32 *cur = (u32 *)skb->data; + u8 *byte; + + RNDIS_IPA_DEBUG + ("packet dump start for skb->len=%d\n", + skb->len); + + for (i = 0; i < (skb->len / 4); i++) { + byte = (u8 *)(cur + i); + pr_info + ("%2d %08x %02x %02x %02x %02x\n", + i, *(cur + i), + byte[0], byte[1], byte[2], byte[3]); + } + RNDIS_IPA_DEBUG + ("packet dump ended for skb->len=%d\n", skb->len); +} + +#ifdef CONFIG_DEBUG_FS +/** + * Creates the root folder for the driver + */ +static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + const mode_t flags_read_write = 0666; + const mode_t flags_read_only = 0444; + const mode_t flags_write_only = 0222; + struct dentry *file; + struct dentry *aggr_directory; + + RNDIS_IPA_LOG_ENTRY(); + + if (!rndis_ipa_ctx) + return; + + rndis_ipa_ctx->directory = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL); + if (!rndis_ipa_ctx->directory) { + RNDIS_IPA_ERROR("could not create debugfs directory entry\n"); + goto fail_directory; + } + + file = debugfs_create_bool + ("tx_filter", flags_read_write, + rndis_ipa_ctx->directory, &rndis_ipa_ctx->tx_filter); + if (!file) { + RNDIS_IPA_ERROR("could not create debugfs tx_filter file\n"); + goto fail_file; + } + + file = debugfs_create_bool + ("rx_filter", flags_read_write, + rndis_ipa_ctx->directory, &rndis_ipa_ctx->rx_filter); + if (!file) { + RNDIS_IPA_ERROR("could not create debugfs rx_filter file\n"); + goto fail_file; + } + + file = debugfs_create_bool + ("icmp_filter", flags_read_write, + rndis_ipa_ctx->directory, &rndis_ipa_ctx->icmp_filter); + if (!file) { + RNDIS_IPA_ERROR("could not create debugfs icmp_filter file\n"); + goto fail_file; + } + + file = debugfs_create_bool + ("rm_enable", flags_read_write, + rndis_ipa_ctx->directory, &rndis_ipa_ctx->rm_enable); + if (!file) { + RNDIS_IPA_ERROR("could not create debugfs rm file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("outstanding_high", flags_read_write, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->outstanding_high); + if (!file) { + RNDIS_IPA_ERROR("could not create outstanding_high file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("outstanding_low", flags_read_write, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->outstanding_low); + if (!file) { + RNDIS_IPA_ERROR("could not create outstanding_low file\n"); + goto fail_file; + } + + file = debugfs_create_file + ("outstanding", flags_read_only, + rndis_ipa_ctx->directory, + rndis_ipa_ctx, &rndis_ipa_debugfs_atomic_ops); + if (!file) { + RNDIS_IPA_ERROR("could not create outstanding file\n"); + goto fail_file; + } + + file = debugfs_create_u8 + ("state", flags_read_only, + rndis_ipa_ctx->directory, (u8 *)&rndis_ipa_ctx->state); + if (!file) { + RNDIS_IPA_ERROR("could not create state file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("tx_dropped", flags_read_only, + rndis_ipa_ctx->directory, &rndis_ipa_ctx->tx_dropped); + if (!file) { + RNDIS_IPA_ERROR("could not create tx_dropped file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("rx_dropped", flags_read_only, + rndis_ipa_ctx->directory, &rndis_ipa_ctx->rx_dropped); + if (!file) { + RNDIS_IPA_ERROR("could not create rx_dropped file\n"); + goto fail_file; + } + + aggr_directory = debugfs_create_dir + (DEBUGFS_AGGR_DIR_NAME, + rndis_ipa_ctx->directory); + if (!aggr_directory) { + RNDIS_IPA_ERROR("could not create debugfs aggr entry\n"); + goto fail_directory; + } + + file = debugfs_create_file + ("aggr_value_set", flags_write_only, + aggr_directory, + rndis_ipa_ctx, &rndis_ipa_aggr_ops); + if (!file) { + RNDIS_IPA_ERROR("could not create aggr_value_set file\n"); + goto fail_file; + } + + file = debugfs_create_u8 + ("aggr_enable", flags_read_write, + aggr_directory, (u8 *)&ipa_to_usb_ep_cfg.aggr.aggr_en); + if (!file) { + RNDIS_IPA_ERROR("could not create aggr_enable file\n"); + goto fail_file; + } + + file = debugfs_create_u8 + ("aggr_type", flags_read_write, + aggr_directory, (u8 *)&ipa_to_usb_ep_cfg.aggr.aggr); + if (!file) { + RNDIS_IPA_ERROR("could not create aggr_type file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("aggr_byte_limit", flags_read_write, + aggr_directory, + &ipa_to_usb_ep_cfg.aggr.aggr_byte_limit); + if (!file) { + RNDIS_IPA_ERROR("could not create aggr_byte_limit file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("aggr_time_limit", flags_read_write, + aggr_directory, + &ipa_to_usb_ep_cfg.aggr.aggr_time_limit); + if (!file) { + RNDIS_IPA_ERROR("could not create aggr_time_limit file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("aggr_pkt_limit", flags_read_write, + aggr_directory, + &ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit); + if (!file) { + RNDIS_IPA_ERROR("could not create aggr_pkt_limit file\n"); + goto fail_file; + } + + file = debugfs_create_bool + ("tx_dump_enable", flags_read_write, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->tx_dump_enable); + if (!file) { + RNDIS_IPA_ERROR("fail to create tx_dump_enable file\n"); + goto fail_file; + } + + file = debugfs_create_bool + ("rx_dump_enable", flags_read_write, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->rx_dump_enable); + if (!file) { + RNDIS_IPA_ERROR("fail to create rx_dump_enable file\n"); + goto fail_file; + } + + file = debugfs_create_bool + ("deaggregation_enable", flags_read_write, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->deaggregation_enable); + if (!file) { + RNDIS_IPA_ERROR("fail to create deaggregation_enable file\n"); + goto fail_file; + } + + file = debugfs_create_u32 + ("error_msec_sleep_time", flags_read_write, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->error_msec_sleep_time); + if (!file) { + RNDIS_IPA_ERROR("fail to create error_msec_sleep_time file\n"); + goto fail_file; + } + + file = debugfs_create_bool + ("during_xmit_error", flags_read_only, + rndis_ipa_ctx->directory, + &rndis_ipa_ctx->during_xmit_error); + if (!file) { + RNDIS_IPA_ERROR("fail to create during_xmit_error file\n"); + goto fail_file; + } + + RNDIS_IPA_DEBUG("debugfs entries were created\n"); + RNDIS_IPA_LOG_EXIT(); + + return; +fail_file: + debugfs_remove_recursive(rndis_ipa_ctx->directory); +fail_directory: + return; +} + +static void rndis_ipa_debugfs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + debugfs_remove_recursive(rndis_ipa_ctx->directory); +} + +#else /* !CONFIG_DEBUG_FS */ + +static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx) {} + +static void rndis_ipa_debugfs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx) {} + +#endif /* CONFIG_DEBUG_FS*/ + +static int rndis_ipa_debugfs_aggr_open + (struct inode *inode, + struct file *file) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = inode->i_private; + + file->private_data = rndis_ipa_ctx; + + return 0; +} + +static ssize_t rndis_ipa_debugfs_aggr_write + (struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = file->private_data; + int result; + + result = ipa_cfg_ep(rndis_ipa_ctx->usb_to_ipa_hdl, &ipa_to_usb_ep_cfg); + if (result) { + pr_err("failed to re-configure USB to IPA point\n"); + return result; + } + pr_info("IPA<-USB end-point re-configured\n"); + + return count; +} + +static int rndis_ipa_debugfs_atomic_open(struct inode *inode, struct file *file) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = inode->i_private; + + RNDIS_IPA_LOG_ENTRY(); + + file->private_data = &rndis_ipa_ctx->outstanding_pkts; + + RNDIS_IPA_LOG_EXIT(); + + return 0; +} + +static ssize_t rndis_ipa_debugfs_atomic_read + (struct file *file, char __user *ubuf, size_t count, loff_t *ppos) +{ + int nbytes; + u8 atomic_str[DEBUGFS_TEMP_BUF_SIZE] = {0}; + atomic_t *atomic_var = file->private_data; + + RNDIS_IPA_LOG_ENTRY(); + + nbytes = scnprintf + (atomic_str, sizeof(atomic_str), "%d\n", + atomic_read(atomic_var)); + + RNDIS_IPA_LOG_EXIT(); + + return simple_read_from_buffer(ubuf, count, ppos, atomic_str, nbytes); +} + +static int rndis_ipa_init_module(void) +{ + pr_info("RNDIS_IPA module is loaded."); + return 0; +} + +static void rndis_ipa_cleanup_module(void) +{ + pr_info("RNDIS_IPA module is unloaded."); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("RNDIS_IPA network interface"); + +late_initcall(rndis_ipa_init_module); +module_exit(rndis_ipa_cleanup_module); diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa_trace.h b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa_trace.h new file mode 100644 index 000000000000..a888a1f76052 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa_trace.h @@ -0,0 +1,81 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rndis_ipa +#define TRACE_INCLUDE_FILE rndis_ipa_trace + +#if !defined(_RNDIS_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _RNDIS_IPA_TRACE_H + +#include + +TRACE_EVENT( + rndis_netif_ni, + + TP_PROTO(unsigned long proto), + + TP_ARGS(proto), + + TP_STRUCT__entry( + __field(unsigned long, proto) + ), + + TP_fast_assign( + __entry->proto = proto; + ), + + TP_printk("proto =%lu\n", __entry->proto) +); + +TRACE_EVENT( + rndis_tx_dp, + + TP_PROTO(unsigned long proto), + + TP_ARGS(proto), + + TP_STRUCT__entry( + __field(unsigned long, proto) + ), + + TP_fast_assign( + __entry->proto = proto; + ), + + TP_printk("proto =%lu\n", __entry->proto) +); + +TRACE_EVENT( + rndis_status_rcvd, + + TP_PROTO(unsigned long proto), + + TP_ARGS(proto), + + TP_STRUCT__entry( + __field(unsigned long, proto) + ), + + TP_fast_assign( + __entry->proto = proto; + ), + + TP_printk("proto =%lu\n", __entry->proto) +); + +#endif /* _RNDIS_IPA_TRACE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h new file mode 100644 index 000000000000..32c8b251f5bc --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_common_i.h @@ -0,0 +1,389 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#ifndef _IPA_COMMON_I_H_ +#define _IPA_COMMON_I_H_ +#include +#include +#include + +#define __FILENAME__ \ + (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) + +#define IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client) \ + log_info.file = __FILENAME__; \ + log_info.line = __LINE__; \ + log_info.type = EP; \ + log_info.id_string = (client < 0 || client >= IPA_CLIENT_MAX) \ + ? "Invalid Client" : ipa_clients_strings[client] + +#define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \ + log_info.file = __FILENAME__; \ + log_info.line = __LINE__; \ + log_info.type = SIMPLE; \ + log_info.id_string = __func__ + +#define IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name) \ + log_info.file = __FILENAME__; \ + log_info.line = __LINE__; \ + log_info.type = RESOURCE; \ + log_info.id_string = resource_name + +#define IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str) \ + log_info.file = __FILENAME__; \ + log_info.line = __LINE__; \ + log_info.type = SPECIAL; \ + log_info.id_string = id_str + +#define IPA_ACTIVE_CLIENTS_INC_EP(client) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \ + ipa_inc_client_enable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_DEC_EP(client) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \ + ipa_dec_client_disable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_INC_SIMPLE() \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \ + ipa_inc_client_enable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_DEC_SIMPLE() \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \ + ipa_dec_client_disable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_INC_RESOURCE(resource_name) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \ + ipa_inc_client_enable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_DEC_RESOURCE(resource_name) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \ + ipa_dec_client_disable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_INC_SPECIAL(id_str) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \ + ipa_inc_client_enable_clks(&log_info); \ + } while (0) + +#define IPA_ACTIVE_CLIENTS_DEC_SPECIAL(id_str) \ + do { \ + struct ipa_active_client_logging_info log_info; \ + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \ + ipa_dec_client_disable_clks(&log_info); \ + } while (0) + +#define ipa_assert_on(condition)\ +do {\ + if (unlikely(condition))\ + ipa_assert();\ +} while (0) + +#define IPA_CLIENT_IS_PROD(x) \ + (x < IPA_CLIENT_MAX && (x & 0x1) == 0) +#define IPA_CLIENT_IS_CONS(x) \ + (x < IPA_CLIENT_MAX && (x & 0x1) == 1) + +#define IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC (1000) +#define IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC (2000) + +enum ipa_active_client_log_type { + EP, + SIMPLE, + RESOURCE, + SPECIAL, + INVALID +}; + +struct ipa_active_client_logging_info { + const char *id_string; + char *file; + int line; + enum ipa_active_client_log_type type; +}; + +/** + * struct ipa_mem_buffer - IPA memory buffer + * @base: base + * @phys_base: physical base address + * @size: size of memory buffer + */ +struct ipa_mem_buffer { + void *base; + dma_addr_t phys_base; + u32 size; +}; + +#define IPA_MHI_GSI_ER_START 10 +#define IPA_MHI_GSI_ER_END 16 + +/** + * enum ipa3_mhi_burst_mode - MHI channel burst mode state + * + * Values are according to MHI specification + * @IPA_MHI_BURST_MODE_DEFAULT: burst mode enabled for HW channels, + * disabled for SW channels + * @IPA_MHI_BURST_MODE_RESERVED: + * @IPA_MHI_BURST_MODE_DISABLE: Burst mode is disabled for this channel + * @IPA_MHI_BURST_MODE_ENABLE: Burst mode is enabled for this channel + * + */ +enum ipa3_mhi_burst_mode { + IPA_MHI_BURST_MODE_DEFAULT, + IPA_MHI_BURST_MODE_RESERVED, + IPA_MHI_BURST_MODE_DISABLE, + IPA_MHI_BURST_MODE_ENABLE, +}; + +/** + * enum ipa_hw_mhi_channel_states - MHI channel state machine + * + * Values are according to MHI specification + * @IPA_HW_MHI_CHANNEL_STATE_DISABLE: Channel is disabled and not processed by + * the host or device. + * @IPA_HW_MHI_CHANNEL_STATE_ENABLE: A channel is enabled after being + * initialized and configured by host, including its channel context and + * associated transfer ring. While this state, the channel is not active + * and the device does not process transfer. + * @IPA_HW_MHI_CHANNEL_STATE_RUN: The device processes transfers and doorbell + * for channels. + * @IPA_HW_MHI_CHANNEL_STATE_SUSPEND: Used to halt operations on the channel. + * The device does not process transfers for the channel in this state. + * This state is typically used to synchronize the transition to low power + * modes. + * @IPA_HW_MHI_CHANNEL_STATE_STOP: Used to halt operations on the channel. + * The device does not process transfers for the channel in this state. + * @IPA_HW_MHI_CHANNEL_STATE_ERROR: The device detected an error in an element + * from the transfer ring associated with the channel. + * @IPA_HW_MHI_CHANNEL_STATE_INVALID: Invalid state. Shall not be in use in + * operational scenario. + */ +enum ipa_hw_mhi_channel_states { + IPA_HW_MHI_CHANNEL_STATE_DISABLE = 0, + IPA_HW_MHI_CHANNEL_STATE_ENABLE = 1, + IPA_HW_MHI_CHANNEL_STATE_RUN = 2, + IPA_HW_MHI_CHANNEL_STATE_SUSPEND = 3, + IPA_HW_MHI_CHANNEL_STATE_STOP = 4, + IPA_HW_MHI_CHANNEL_STATE_ERROR = 5, + IPA_HW_MHI_CHANNEL_STATE_INVALID = 0xFF +}; + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO + * command. Parameters are sent as 32b immediate parameters. + * @isDlUlSyncEnabled: Flag to indicate if DL UL Syncronization is enabled + * @UlAccmVal: UL Timer Accumulation value (Period after which device will poll + * for UL data) + * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events + * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events + */ +union IpaHwMhiDlUlSyncCmdData_t { + struct IpaHwMhiDlUlSyncCmdParams_t { + u32 isDlUlSyncEnabled:8; + u32 UlAccmVal:8; + u32 ulMsiEventThreshold:8; + u32 dlMsiEventThreshold:8; + } params; + u32 raw32b; +}; + +struct ipa_mhi_ch_ctx { + u8 chstate;/*0-7*/ + u8 brstmode:2;/*8-9*/ + u8 pollcfg:6;/*10-15*/ + u16 rsvd;/*16-31*/ + u32 chtype; + u32 erindex; + u64 rbase; + u64 rlen; + u64 rp; + u64 wp; +} __packed; + +struct ipa_mhi_ev_ctx { + u32 intmodc:16; + u32 intmodt:16; + u32 ertype; + u32 msivec; + u64 rbase; + u64 rlen; + u64 rp; + u64 wp; +} __packed; + +struct ipa_mhi_init_uc_engine { + struct ipa_mhi_msi_info *msi; + u32 mmio_addr; + u32 host_ctrl_addr; + u32 host_data_addr; + u32 first_ch_idx; + u32 first_er_idx; + union IpaHwMhiDlUlSyncCmdData_t *ipa_cached_dl_ul_sync_info; +}; + +struct ipa_mhi_init_gsi_engine { + u32 first_ch_idx; +}; + +struct ipa_mhi_init_engine { + struct ipa_mhi_init_uc_engine uC; + struct ipa_mhi_init_gsi_engine gsi; +}; + +struct start_gsi_channel { + enum ipa_hw_mhi_channel_states state; + struct ipa_mhi_msi_info *msi; + struct ipa_mhi_ev_ctx *ev_ctx_host; + u64 event_context_addr; + struct ipa_mhi_ch_ctx *ch_ctx_host; + u64 channel_context_addr; + void (*ch_err_cb)(struct gsi_chan_err_notify *notify); + void (*ev_err_cb)(struct gsi_evt_err_notify *notify); + void *channel; + bool assert_bit40; + struct gsi_mhi_channel_scratch *mhi; + unsigned long *cached_gsi_evt_ring_hdl; + uint8_t evchid; +}; + +struct start_uc_channel { + enum ipa_hw_mhi_channel_states state; + u8 index; + u8 id; +}; + +struct start_mhi_channel { + struct start_uc_channel uC; + struct start_gsi_channel gsi; +}; + +struct ipa_mhi_connect_params_internal { + struct ipa_sys_connect_params *sys; + u8 channel_id; + struct start_mhi_channel start; +}; + +/** + * struct ipa_hdr_offset_entry - IPA header offset entry + * @link: entry's link in global header offset entries list + * @offset: the offset + * @bin: bin + */ +struct ipa_hdr_offset_entry { + struct list_head link; + u32 offset; + u32 bin; +}; + +extern const char *ipa_clients_strings[]; + +#define IPA_IPC_LOGGING(buf, fmt, args...) \ + do { \ + if (buf) \ + ipc_log_string((buf), fmt, __func__, __LINE__, \ + ## args); \ + } while (0) + +void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id); +void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id); +int ipa_inc_client_enable_clks_no_block( + struct ipa_active_client_logging_info *id); +int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource); +int ipa_resume_resource(enum ipa_rm_resource_name name); +int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource); +int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage, + u32 bandwidth_mbps); +void *ipa_get_ipc_logbuf(void); +void *ipa_get_ipc_logbuf_low(void); +void ipa_assert(void); + +/* MHI */ +int ipa_mhi_init_engine(struct ipa_mhi_init_engine *params); +int ipa_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in, + u32 *clnt_hdl); +int ipa_disconnect_mhi_pipe(u32 clnt_hdl); +bool ipa_mhi_stop_gsi_channel(enum ipa_client_type client); +int ipa_qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req); +int ipa_qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req); +int ipa_generate_tag_process(void); +int ipa_disable_sps_pipe(enum ipa_client_type client); +int ipa_mhi_reset_channel_internal(enum ipa_client_type client); +int ipa_mhi_start_channel_internal(enum ipa_client_type client); +bool ipa_mhi_sps_channel_empty(enum ipa_client_type client); +int ipa_mhi_resume_channels_internal(enum ipa_client_type client, + bool LPTransitionRejected, bool brstmode_enabled, + union __packed gsi_channel_scratch ch_scratch, u8 index); +int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req); +int ipa_mhi_query_ch_info(enum ipa_client_type client, + struct gsi_chan_info *ch_info); +int ipa_mhi_destroy_channel(enum ipa_client_type client); +int ipa_mhi_is_using_dma(bool *flag); +const char *ipa_mhi_get_state_str(int state); + +/* MHI uC */ +int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd); +int ipa_uc_mhi_init + (void (*ready_cb)(void), void (*wakeup_request_cb)(void)); +void ipa_uc_mhi_cleanup(void); +int ipa_uc_mhi_reset_channel(int channelHandle); +int ipa_uc_mhi_suspend_channel(int channelHandle); +int ipa_uc_mhi_stop_event_update_channel(int channelHandle); +int ipa_uc_mhi_print_stats(char *dbg_buff, int size); + +/* uC */ +int ipa_uc_state_check(void); + +/* general */ +void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb); +void ipa_set_tag_process_before_gating(bool val); +bool ipa_has_open_aggr_frame(enum ipa_client_type client); +int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp); + +int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl); +u8 *ipa_write_64(u64 w, u8 *dest); +u8 *ipa_write_32(u32 w, u8 *dest); +u8 *ipa_write_16(u16 hw, u8 *dest); +u8 *ipa_write_8(u8 b, u8 *dest); +u8 *ipa_pad_to_64(u8 *dest); +u8 *ipa_pad_to_32(u8 *dest); +int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data), + void *user_data); +void ipa_ntn_uc_dereg_rdyCB(void); +const char *ipa_get_version_string(enum ipa_hw_type ver); +int ipa_start_gsi_channel(u32 clnt_hdl); + +#endif /* _IPA_COMMON_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_rm.c b/drivers/platform/msm/ipa/ipa_rm.c new file mode 100644 index 000000000000..24d86636ffee --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm.c @@ -0,0 +1,1191 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include "ipa_rm_dependency_graph.h" +#include "ipa_rm_i.h" +#include "ipa_common_i.h" + +static const char *resource_name_to_str[IPA_RM_RESOURCE_MAX] = { + __stringify(IPA_RM_RESOURCE_Q6_PROD), + __stringify(IPA_RM_RESOURCE_Q6_CONS), + __stringify(IPA_RM_RESOURCE_USB_PROD), + __stringify(IPA_RM_RESOURCE_USB_CONS), + __stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD), + __stringify(IPA_RM_RESOURCE_USB_DPL_CONS), + __stringify(IPA_RM_RESOURCE_HSIC_PROD), + __stringify(IPA_RM_RESOURCE_HSIC_CONS), + __stringify(IPA_RM_RESOURCE_STD_ECM_PROD), + __stringify(IPA_RM_RESOURCE_APPS_CONS), + __stringify(IPA_RM_RESOURCE_RNDIS_PROD), + __stringify(RESERVED_CONS_11), + __stringify(IPA_RM_RESOURCE_WWAN_0_PROD), + __stringify(RESERVED_CONS_13), + __stringify(IPA_RM_RESOURCE_WLAN_PROD), + __stringify(IPA_RM_RESOURCE_WLAN_CONS), + __stringify(IPA_RM_RESOURCE_ODU_ADAPT_PROD), + __stringify(IPA_RM_RESOURCE_ODU_ADAPT_CONS), + __stringify(IPA_RM_RESOURCE_MHI_PROD), + __stringify(IPA_RM_RESOURCE_MHI_CONS), + __stringify(IPA_RM_RESOURCE_ETHERNET_PROD), + __stringify(IPA_RM_RESOURCE_ETHERNET_CONS), +}; + +struct ipa_rm_profile_vote_type { + enum ipa_voltage_level volt[IPA_RM_RESOURCE_MAX]; + enum ipa_voltage_level curr_volt; + u32 bw_resources[IPA_RM_RESOURCE_MAX]; + u32 curr_bw; +}; + +struct ipa_rm_context_type { + struct ipa_rm_dep_graph *dep_graph; + struct workqueue_struct *ipa_rm_wq; + spinlock_t ipa_rm_lock; + struct ipa_rm_profile_vote_type prof_vote; +}; +static struct ipa_rm_context_type *ipa_rm_ctx; + +struct ipa_rm_notify_ipa_work_type { + struct work_struct work; + enum ipa_voltage_level volt; + u32 bandwidth_mbps; +}; + +/** + * ipa_rm_create_resource() - create resource + * @create_params: [in] parameters needed + * for resource initialization + * + * Returns: 0 on success, negative on failure + * + * This function is called by IPA RM client to initialize client's resources. + * This API should be called before any other IPA RM API on a given resource + * name. + * + */ +int ipa_rm_create_resource(struct ipa_rm_create_params *create_params) +{ + struct ipa_rm_resource *resource; + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + if (!create_params) { + IPA_RM_ERR("invalid args\n"); + return -EINVAL; + } + IPA_RM_DBG("%s\n", ipa_rm_resource_str(create_params->name)); + + if (create_params->floor_voltage < 0 || + create_params->floor_voltage >= IPA_VOLTAGE_MAX) { + IPA_RM_ERR("invalid voltage %d\n", + create_params->floor_voltage); + return -EINVAL; + } + + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + create_params->name, + &resource) == 0) { + IPA_RM_ERR("resource already exists\n"); + result = -EEXIST; + goto bail; + } + result = ipa_rm_resource_create(create_params, + &resource); + if (result) { + IPA_RM_ERR("ipa_rm_resource_create() failed\n"); + goto bail; + } + result = ipa_rm_dep_graph_add(ipa_rm_ctx->dep_graph, resource); + if (result) { + IPA_RM_ERR("ipa_rm_dep_graph_add() failed\n"); + ipa_rm_resource_delete(resource); + goto bail; + } +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_create_resource); + +/** + * ipa_rm_delete_resource() - delete resource + * @resource_name: name of resource to be deleted + * + * Returns: 0 on success, negative on failure + * + * This function is called by IPA RM client to delete client's resources. + * + */ +int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name) +{ + struct ipa_rm_resource *resource; + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exist\n"); + result = -EINVAL; + goto bail; + } + result = ipa_rm_resource_delete(resource); + if (result) { + IPA_RM_ERR("ipa_rm_resource_delete() failed\n"); + goto bail; + } + result = ipa_rm_dep_graph_remove(ipa_rm_ctx->dep_graph, + resource_name); + if (result) { + IPA_RM_ERR("ipa_rm_dep_graph_remove() failed\n"); + goto bail; + } +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_delete_resource); + +static int _ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name, + bool userspace_dep) +{ + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + result = ipa_rm_dep_graph_add_dependency( + ipa_rm_ctx->dep_graph, + resource_name, + depends_on_name, + userspace_dep); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_add_dependency() - create dependency between 2 resources + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * Returns: 0 on success, negative on failure + * + * Side effects: IPA_RM_RESORCE_GRANTED could be generated + * in case client registered with IPA RM + */ +int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return _ipa_rm_add_dependency(resource_name, depends_on_name, false); +} +EXPORT_SYMBOL(ipa_rm_add_dependency); + +/** + * ipa_rm_add_dependency_from_ioctl() - create dependency between 2 resources + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * This function is expected to be called from IOCTL and the dependency will be + * marked as is was added by the userspace. + * + * Returns: 0 on success, negative on failure + * + * Side effects: IPA_RM_RESORCE_GRANTED could be generated + * in case client registered with IPA RM + */ +int ipa_rm_add_dependency_from_ioctl(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return _ipa_rm_add_dependency(resource_name, depends_on_name, true); +} + +static int _ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name, + bool userspsace_dep) +{ + int result; + struct ipa_rm_resource *consumer; + unsigned long time; + unsigned long flags; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + result = ipa_rm_dep_graph_add_dependency( + ipa_rm_ctx->dep_graph, + resource_name, + depends_on_name, + userspsace_dep); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + if (result == -EINPROGRESS) { + ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + depends_on_name, + &consumer); + IPA_RM_DBG("%s waits for GRANT of %s.\n", + ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name)); + time = wait_for_completion_timeout( + &((struct ipa_rm_resource_cons *)consumer)-> + request_consumer_in_progress, + HZ * 5); + result = 0; + if (!time) { + IPA_RM_ERR("TIMEOUT waiting for %s GRANT event.", + ipa_rm_resource_str(depends_on_name)); + result = -ETIME; + } else { + IPA_RM_DBG("%s waited for %s GRANT %lu time.\n", + ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name), + time); + } + } + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +/** + * ipa_rm_add_dependency_sync() - Create a dependency between 2 resources + * in a synchronized fashion. In case a producer resource is in GRANTED state + * and the newly added consumer resource is in RELEASED state, the consumer + * entity will be requested and the function will block until the consumer + * is granted. + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * This function is expected to be called from IOCTL and the dependency will be + * marked as is was added by the userspace. + * + * Returns: 0 on success, negative on failure + * + * Side effects: May block. See documentation above. + */ +int ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return _ipa_rm_add_dependency_sync(resource_name, depends_on_name, + false); +} +EXPORT_SYMBOL(ipa_rm_add_dependency_sync); + +/** + * ipa_rm_add_dependency_sync_from_ioctl() - Create a dependency between 2 + * resources in a synchronized fashion. In case a producer resource is in + * GRANTED state and the newly added consumer resource is in RELEASED state, + * the consumer entity will be requested and the function will block until + * the consumer is granted. + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * Returns: 0 on success, negative on failure + * + * Side effects: May block. See documentation above. + */ +int ipa_rm_add_dependency_sync_from_ioctl( + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return _ipa_rm_add_dependency_sync(resource_name, depends_on_name, + true); +} + +static int _ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name, + bool userspace_dep) +{ + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name), + ipa_rm_resource_str(depends_on_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + result = ipa_rm_dep_graph_delete_dependency( + ipa_rm_ctx->dep_graph, + resource_name, + depends_on_name, + userspace_dep); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_delete_dependency() - delete dependency between 2 resources + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * Returns: 0 on success, negative on failure + * + * Side effects: IPA_RM_RESORCE_GRANTED could be generated + * in case client registered with IPA RM + */ +int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return _ipa_rm_delete_dependency(resource_name, depends_on_name, false); +} +EXPORT_SYMBOL(ipa_rm_delete_dependency); + +/** + * ipa_rm_delete_dependency_fron_ioctl() - delete dependency between 2 resources + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + * + * This function is expected to be called from IOCTL and the dependency will be + * marked as is was added by the userspace. + * + * Returns: 0 on success, negative on failure + * + * Side effects: IPA_RM_RESORCE_GRANTED could be generated + * in case client registered with IPA RM + */ +int ipa_rm_delete_dependency_from_ioctl(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return _ipa_rm_delete_dependency(resource_name, depends_on_name, true); +} + +/** + * ipa_rm_request_resource() - request resource + * @resource_name: [in] name of the requested resource + * + * Returns: 0 on success, negative on failure + * + * All registered callbacks are called with IPA_RM_RESOURCE_GRANTED + * on successful completion of this operation. + */ +int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name) +{ + struct ipa_rm_resource *resource; + unsigned long flags; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { + IPA_RM_ERR("can be called on PROD only\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exist\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_producer_request( + (struct ipa_rm_resource_prod *)resource); + +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + return result; +} +EXPORT_SYMBOL(ipa_rm_request_resource); + +void delayed_release_work_func(struct work_struct *work) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_delayed_release_work_type *rwork = container_of( + to_delayed_work(work), + struct ipa_rm_delayed_release_work_type, + work); + + if (!IPA_RM_RESORCE_IS_CONS(rwork->resource_name)) { + IPA_RM_ERR("can be called on CONS only\n"); + kfree(rwork); + return; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + rwork->resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + goto bail; + } + + ipa_rm_resource_consumer_release( + (struct ipa_rm_resource_cons *)resource, rwork->needed_bw, + rwork->dec_usage_count); + +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + kfree(rwork); + +} + +/** + * ipa_rm_request_resource_with_timer() - requests the specified consumer + * resource and releases it after 1 second + * @resource_name: name of the requested resource + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_delayed_release_work_type *release_work; + int result; + + if (!IPA_RM_RESORCE_IS_CONS(resource_name)) { + IPA_RM_ERR("can be called on CONS only\n"); + return -EINVAL; + } + + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_consumer_request( + (struct ipa_rm_resource_cons *)resource, 0, false, true); + if (result != 0 && result != -EINPROGRESS) { + IPA_RM_ERR("consumer request returned error %d\n", result); + result = -EPERM; + goto bail; + } + + release_work = kzalloc(sizeof(*release_work), GFP_ATOMIC); + if (!release_work) { + result = -ENOMEM; + goto bail; + } + release_work->resource_name = resource->name; + release_work->needed_bw = 0; + release_work->dec_usage_count = false; + INIT_DELAYED_WORK(&release_work->work, delayed_release_work_func); + schedule_delayed_work(&release_work->work, + msecs_to_jiffies(IPA_RM_RELEASE_DELAY_IN_MSEC)); + result = 0; +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + return result; +} + +/** + * ipa_rm_release_resource() - release resource + * @resource_name: [in] name of the requested resource + * + * Returns: 0 on success, negative on failure + * + * All registered callbacks are called with IPA_RM_RESOURCE_RELEASED + * on successful completion of this operation. + */ +int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { + IPA_RM_ERR("can be called on PROD only\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_producer_release( + (struct ipa_rm_resource_prod *)resource); + +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + return result; +} +EXPORT_SYMBOL(ipa_rm_release_resource); + +/** + * ipa_rm_register() - register for event + * @resource_name: resource name + * @reg_params: [in] registration parameters + * + * Returns: 0 on success, negative on failure + * + * Registration parameters provided here should be the same + * as provided later in ipa_rm_deregister() call. + */ +int ipa_rm_register(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params) +{ + int result; + unsigned long flags; + struct ipa_rm_resource *resource; + + IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); + + if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { + IPA_RM_ERR("can be called on PROD only\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_producer_register( + (struct ipa_rm_resource_prod *)resource, + reg_params, + true); +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_register); + +/** + * ipa_rm_deregister() - cancel the registration + * @resource_name: resource name + * @reg_params: [in] registration parameters + * + * Returns: 0 on success, negative on failure + * + * Registration parameters provided here should be the same + * as provided in ipa_rm_register() call. + */ +int ipa_rm_deregister(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params) +{ + int result; + unsigned long flags; + struct ipa_rm_resource *resource; + + IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); + + if (!IPA_RM_RESORCE_IS_PROD(resource_name)) { + IPA_RM_ERR("can be called on PROD only\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_producer_deregister( + (struct ipa_rm_resource_prod *)resource, + reg_params); +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_deregister); + +/** + * ipa_rm_set_perf_profile() - set performance profile + * @resource_name: resource name + * @profile: [in] profile information. + * + * Returns: 0 on success, negative on failure + * + * Set resource performance profile. + * Updates IPA driver if performance level changed. + */ +int ipa_rm_set_perf_profile(enum ipa_rm_resource_name resource_name, + struct ipa_rm_perf_profile *profile) +{ + int result; + unsigned long flags; + struct ipa_rm_resource *resource; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name)); + if (profile) + IPA_RM_DBG("BW: %d\n", profile->max_supported_bandwidth_mbps); + + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + result = -EPERM; + goto bail; + } + result = ipa_rm_resource_set_perf_profile(resource, profile); + if (result) { + IPA_RM_ERR("ipa_rm_resource_set_perf_profile failed %d\n", + result); + goto bail; + } + + result = 0; +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_set_perf_profile); + +/** + * ipa_rm_notify_completion() - + * consumer driver notification for + * request_resource / release_resource operations + * completion + * @event: notified event + * @resource_name: resource name + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_notify_completion(enum ipa_rm_event event, + enum ipa_rm_resource_name resource_name) +{ + int result; + + if (unlikely(!ipa_rm_ctx)) { + IPA_RM_ERR("IPA RM was not initialized\n"); + return -EINVAL; + } + + IPA_RM_DBG("event %d on %s\n", event, + ipa_rm_resource_str(resource_name)); + if (!IPA_RM_RESORCE_IS_CONS(resource_name)) { + IPA_RM_ERR("can be called on CONS only\n"); + result = -EINVAL; + goto bail; + } + ipa_rm_wq_send_cmd(IPA_RM_WQ_RESOURCE_CB, + resource_name, + event, + false); + result = 0; +bail: + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} +EXPORT_SYMBOL(ipa_rm_notify_completion); + +static void ipa_rm_wq_handler(struct work_struct *work) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_wq_work_type *ipa_rm_work = + container_of(work, + struct ipa_rm_wq_work_type, + work); + IPA_RM_DBG_LOW("%s cmd=%d event=%d notify_registered_only=%d\n", + ipa_rm_resource_str(ipa_rm_work->resource_name), + ipa_rm_work->wq_cmd, + ipa_rm_work->event, + ipa_rm_work->notify_registered_only); + switch (ipa_rm_work->wq_cmd) { + case IPA_RM_WQ_NOTIFY_PROD: + if (!IPA_RM_RESORCE_IS_PROD(ipa_rm_work->resource_name)) { + IPA_RM_ERR("resource is not PROD\n"); + goto free_work; + } + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + ipa_rm_work->resource_name, + &resource) != 0){ + IPA_RM_ERR("resource does not exists\n"); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + goto free_work; + } + ipa_rm_resource_producer_notify_clients( + (struct ipa_rm_resource_prod *)resource, + ipa_rm_work->event, + ipa_rm_work->notify_registered_only); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + break; + case IPA_RM_WQ_NOTIFY_CONS: + break; + case IPA_RM_WQ_RESOURCE_CB: + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + ipa_rm_work->resource_name, + &resource) != 0){ + IPA_RM_ERR("resource does not exists\n"); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + goto free_work; + } + ipa_rm_resource_consumer_handle_cb( + (struct ipa_rm_resource_cons *)resource, + ipa_rm_work->event); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + break; + default: + break; + } + +free_work: + kfree((void *) work); +} + +static void ipa_rm_wq_resume_handler(struct work_struct *work) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_wq_suspend_resume_work_type *ipa_rm_work = + container_of(work, + struct ipa_rm_wq_suspend_resume_work_type, + work); + IPA_RM_DBG_LOW("resume work handler: %s", + ipa_rm_resource_str(ipa_rm_work->resource_name)); + + if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) { + IPA_RM_ERR("resource is not CONS\n"); + return; + } + IPA_ACTIVE_CLIENTS_INC_RESOURCE(ipa_rm_resource_str( + ipa_rm_work->resource_name)); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + ipa_rm_work->resource_name, + &resource) != 0){ + IPA_RM_ERR("resource does not exists\n"); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str( + ipa_rm_work->resource_name)); + goto bail; + } + ipa_rm_resource_consumer_request_work( + (struct ipa_rm_resource_cons *)resource, + ipa_rm_work->prev_state, ipa_rm_work->needed_bw, true, + ipa_rm_work->inc_usage_count); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); +bail: + kfree(ipa_rm_work); +} + + +static void ipa_rm_wq_suspend_handler(struct work_struct *work) +{ + unsigned long flags; + struct ipa_rm_resource *resource; + struct ipa_rm_wq_suspend_resume_work_type *ipa_rm_work = + container_of(work, + struct ipa_rm_wq_suspend_resume_work_type, + work); + IPA_RM_DBG_LOW("suspend work handler: %s", + ipa_rm_resource_str(ipa_rm_work->resource_name)); + + if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) { + IPA_RM_ERR("resource is not CONS\n"); + return; + } + ipa_suspend_resource_sync(ipa_rm_work->resource_name); + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + ipa_rm_work->resource_name, + &resource) != 0){ + IPA_RM_ERR("resource does not exists\n"); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + return; + } + ipa_rm_resource_consumer_release_work( + (struct ipa_rm_resource_cons *)resource, + ipa_rm_work->prev_state, + true); + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + kfree(ipa_rm_work); +} + +/** + * ipa_rm_wq_send_cmd() - send a command for deferred work + * @wq_cmd: command that should be executed + * @resource_name: resource on which command should be executed + * @notify_registered_only: notify only clients registered by + * ipa_rm_register() + * + * Returns: 0 on success, negative otherwise + */ +int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_event event, + bool notify_registered_only) +{ + int result = -ENOMEM; + struct ipa_rm_wq_work_type *work = kzalloc(sizeof(*work), GFP_ATOMIC); + + if (work) { + INIT_WORK((struct work_struct *)work, ipa_rm_wq_handler); + work->wq_cmd = wq_cmd; + work->resource_name = resource_name; + work->event = event; + work->notify_registered_only = notify_registered_only; + result = queue_work(ipa_rm_ctx->ipa_rm_wq, + (struct work_struct *)work); + } + + return result; +} + +int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_state prev_state, + u32 needed_bw) +{ + int result = -ENOMEM; + struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work), + GFP_ATOMIC); + if (work) { + INIT_WORK((struct work_struct *)work, + ipa_rm_wq_suspend_handler); + work->resource_name = resource_name; + work->prev_state = prev_state; + work->needed_bw = needed_bw; + result = queue_work(ipa_rm_ctx->ipa_rm_wq, + (struct work_struct *)work); + } + + return result; +} + +int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_state prev_state, + u32 needed_bw, + bool inc_usage_count) +{ + int result = -ENOMEM; + struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work), + GFP_ATOMIC); + if (work) { + INIT_WORK((struct work_struct *)work, ipa_rm_wq_resume_handler); + work->resource_name = resource_name; + work->prev_state = prev_state; + work->needed_bw = needed_bw; + work->inc_usage_count = inc_usage_count; + result = queue_work(ipa_rm_ctx->ipa_rm_wq, + (struct work_struct *)work); + } else { + IPA_RM_ERR("no mem\n"); + } + + return result; +} +/** + * ipa_rm_initialize() - initialize IPA RM component + * + * Returns: 0 on success, negative otherwise + */ +int ipa_rm_initialize(void) +{ + int result; + + ipa_rm_ctx = kzalloc(sizeof(*ipa_rm_ctx), GFP_KERNEL); + if (!ipa_rm_ctx) { + IPA_RM_ERR("no mem\n"); + result = -ENOMEM; + goto bail; + } + ipa_rm_ctx->ipa_rm_wq = create_singlethread_workqueue("ipa_rm_wq"); + if (!ipa_rm_ctx->ipa_rm_wq) { + IPA_RM_ERR("create workqueue failed\n"); + result = -ENOMEM; + goto create_wq_fail; + } + result = ipa_rm_dep_graph_create(&(ipa_rm_ctx->dep_graph)); + if (result) { + IPA_RM_ERR("create dependency graph failed\n"); + goto graph_alloc_fail; + } + spin_lock_init(&ipa_rm_ctx->ipa_rm_lock); + IPA_RM_DBG("SUCCESS\n"); + + return 0; +graph_alloc_fail: + destroy_workqueue(ipa_rm_ctx->ipa_rm_wq); +create_wq_fail: + kfree(ipa_rm_ctx); +bail: + return result; +} + +/** + * ipa_rm_stat() - print RM stat + * @buf: [in] The user buff used to print + * @size: [in] The size of buf + * Returns: number of bytes used on success, negative on failure + * + * This function is called by ipa_debugfs in order to receive + * a full picture of the current state of the RM + */ + +int ipa_rm_stat(char *buf, int size) +{ + unsigned long flags; + int i, cnt = 0, result = EINVAL; + struct ipa_rm_resource *resource = NULL; + u32 sum_bw_prod = 0; + u32 sum_bw_cons = 0; + + if (!buf || size < 0) + return result; + + spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags); + for (i = 0; i < IPA_RM_RESOURCE_MAX; ++i) { + if (!IPA_RM_RESORCE_IS_PROD(i)) + continue; + result = ipa_rm_dep_graph_get_resource( + ipa_rm_ctx->dep_graph, + i, + &resource); + if (!result) { + result = ipa_rm_resource_producer_print_stat( + resource, buf + cnt, + size-cnt); + if (result < 0) + goto bail; + cnt += result; + } + } + + for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) { + if (IPA_RM_RESORCE_IS_PROD(i)) + sum_bw_prod += ipa_rm_ctx->prof_vote.bw_resources[i]; + else + sum_bw_cons += ipa_rm_ctx->prof_vote.bw_resources[i]; + } + + result = scnprintf(buf + cnt, size - cnt, + "All prod bandwidth: %d, All cons bandwidth: %d\n", + sum_bw_prod, sum_bw_cons); + cnt += result; + + result = scnprintf(buf + cnt, size - cnt, + "Voting: voltage %d, bandwidth %d\n", + ipa_rm_ctx->prof_vote.curr_volt, + ipa_rm_ctx->prof_vote.curr_bw); + cnt += result; + + result = cnt; +bail: + spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags); + + return result; +} + +/** + * ipa_rm_resource_str() - returns string that represent the resource + * @resource_name: [in] resource name + */ +const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name) +{ + if (resource_name < 0 || resource_name >= IPA_RM_RESOURCE_MAX) + return "INVALID RESOURCE"; + + return resource_name_to_str[resource_name]; +}; + +static void ipa_rm_perf_profile_notify_to_ipa_work(struct work_struct *work) +{ + struct ipa_rm_notify_ipa_work_type *notify_work = container_of(work, + struct ipa_rm_notify_ipa_work_type, + work); + int res; + + IPA_RM_DBG_LOW("calling to IPA driver. voltage %d bandwidth %d\n", + notify_work->volt, notify_work->bandwidth_mbps); + + res = ipa_set_required_perf_profile(notify_work->volt, + notify_work->bandwidth_mbps); + if (res) { + IPA_RM_ERR("ipa_set_required_perf_profile failed %d\n", res); + goto bail; + } + + IPA_RM_DBG_LOW("IPA driver notified\n"); +bail: + kfree(notify_work); +} + +static void ipa_rm_perf_profile_notify_to_ipa(enum ipa_voltage_level volt, + u32 bandwidth) +{ + struct ipa_rm_notify_ipa_work_type *work; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; + + INIT_WORK(&work->work, ipa_rm_perf_profile_notify_to_ipa_work); + work->volt = volt; + work->bandwidth_mbps = bandwidth; + queue_work(ipa_rm_ctx->ipa_rm_wq, &work->work); +} + +/** + * ipa_rm_perf_profile_change() - change performance profile vote for resource + * @resource_name: [in] resource name + * + * change bandwidth and voltage vote based on resource state. + */ +void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name) +{ + enum ipa_voltage_level old_volt; + u32 *bw_ptr; + u32 old_bw; + struct ipa_rm_resource *resource; + int i; + u32 sum_bw_prod = 0; + u32 sum_bw_cons = 0; + + IPA_RM_DBG_LOW("%s\n", ipa_rm_resource_str(resource_name)); + + if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph, + resource_name, + &resource) != 0) { + IPA_RM_ERR("resource does not exists\n"); + WARN_ON(1); + return; + } + + old_volt = ipa_rm_ctx->prof_vote.curr_volt; + old_bw = ipa_rm_ctx->prof_vote.curr_bw; + + bw_ptr = &ipa_rm_ctx->prof_vote.bw_resources[resource_name]; + + switch (resource->state) { + case IPA_RM_GRANTED: + case IPA_RM_REQUEST_IN_PROGRESS: + IPA_RM_DBG_LOW("max_bw = %d, needed_bw = %d\n", + resource->max_bw, resource->needed_bw); + *bw_ptr = min(resource->max_bw, resource->needed_bw); + ipa_rm_ctx->prof_vote.volt[resource_name] = + resource->floor_voltage; + break; + + case IPA_RM_RELEASE_IN_PROGRESS: + case IPA_RM_RELEASED: + *bw_ptr = 0; + ipa_rm_ctx->prof_vote.volt[resource_name] = 0; + break; + + default: + IPA_RM_ERR("unknown state %d\n", resource->state); + WARN_ON(1); + return; + } + IPA_RM_DBG_LOW("resource bandwidth: %d voltage: %d\n", *bw_ptr, + resource->floor_voltage); + + ipa_rm_ctx->prof_vote.curr_volt = IPA_VOLTAGE_UNSPECIFIED; + for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) { + if (ipa_rm_ctx->prof_vote.volt[i] > + ipa_rm_ctx->prof_vote.curr_volt) { + ipa_rm_ctx->prof_vote.curr_volt = + ipa_rm_ctx->prof_vote.volt[i]; + } + } + + for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) { + if (IPA_RM_RESORCE_IS_PROD(i)) + sum_bw_prod += ipa_rm_ctx->prof_vote.bw_resources[i]; + else + sum_bw_cons += ipa_rm_ctx->prof_vote.bw_resources[i]; + } + + IPA_RM_DBG_LOW("all prod bandwidth: %d all cons bandwidth: %d\n", + sum_bw_prod, sum_bw_cons); + ipa_rm_ctx->prof_vote.curr_bw = min(sum_bw_prod, sum_bw_cons); + + if (ipa_rm_ctx->prof_vote.curr_volt == old_volt && + ipa_rm_ctx->prof_vote.curr_bw == old_bw) { + IPA_RM_DBG_LOW("same voting\n"); + return; + } + + IPA_RM_DBG_LOW("new voting: voltage %d bandwidth %d\n", + ipa_rm_ctx->prof_vote.curr_volt, + ipa_rm_ctx->prof_vote.curr_bw); + + ipa_rm_perf_profile_notify_to_ipa(ipa_rm_ctx->prof_vote.curr_volt, + ipa_rm_ctx->prof_vote.curr_bw); + + return; +}; +/** + * ipa_rm_exit() - free all IPA RM resources + */ +void ipa_rm_exit(void) +{ + IPA_RM_DBG("ENTER\n"); + ipa_rm_dep_graph_delete(ipa_rm_ctx->dep_graph); + destroy_workqueue(ipa_rm_ctx->ipa_rm_wq); + kfree(ipa_rm_ctx); + ipa_rm_ctx = NULL; + IPA_RM_DBG("EXIT\n"); +} diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c new file mode 100644 index 000000000000..43c4e91c1ec6 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c @@ -0,0 +1,247 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "ipa_rm_dependency_graph.h" +#include "ipa_rm_i.h" + +static int ipa_rm_dep_get_index(enum ipa_rm_resource_name resource_name) +{ + int resource_index = IPA_RM_INDEX_INVALID; + + if (IPA_RM_RESORCE_IS_PROD(resource_name)) + resource_index = ipa_rm_prod_index(resource_name); + else if (IPA_RM_RESORCE_IS_CONS(resource_name)) + resource_index = ipa_rm_cons_index(resource_name); + + return resource_index; +} + +/** + * ipa_rm_dep_graph_create() - creates graph + * @dep_graph: [out] created dependency graph + * + * Returns: dependency graph on success, NULL on failure + */ +int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph) +{ + int result = 0; + + *dep_graph = kzalloc(sizeof(**dep_graph), GFP_KERNEL); + if (!*dep_graph) + result = -ENOMEM; + return result; +} + +/** + * ipa_rm_dep_graph_delete() - destroyes the graph + * @graph: [in] dependency graph + * + * Frees all resources. + */ +void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph) +{ + int resource_index; + + if (!graph) { + IPA_RM_ERR("invalid params\n"); + return; + } + for (resource_index = 0; + resource_index < IPA_RM_RESOURCE_MAX; + resource_index++) + kfree(graph->resource_table[resource_index]); + memset(graph->resource_table, 0, sizeof(graph->resource_table)); +} + +/** + * ipa_rm_dep_graph_get_resource() - provides a resource by name + * @graph: [in] dependency graph + * @name: [in] name of the resource + * @resource: [out] resource in case of success + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_dep_graph_get_resource( + struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + struct ipa_rm_resource **resource) +{ + int result; + int resource_index; + + if (!graph) { + result = -EINVAL; + goto bail; + } + resource_index = ipa_rm_dep_get_index(resource_name); + if (resource_index == IPA_RM_INDEX_INVALID) { + result = -EINVAL; + goto bail; + } + *resource = graph->resource_table[resource_index]; + if (!*resource) { + result = -EINVAL; + goto bail; + } + result = 0; +bail: + return result; +} + +/** + * ipa_rm_dep_graph_add() - adds resource to graph + * @graph: [in] dependency graph + * @resource: [in] resource to add + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph, + struct ipa_rm_resource *resource) +{ + int result = 0; + int resource_index; + + if (!graph || !resource) { + result = -EINVAL; + goto bail; + } + resource_index = ipa_rm_dep_get_index(resource->name); + if (resource_index == IPA_RM_INDEX_INVALID) { + result = -EINVAL; + goto bail; + } + graph->resource_table[resource_index] = resource; +bail: + return result; +} + +/** + * ipa_rm_dep_graph_remove() - removes resource from graph + * @graph: [in] dependency graph + * @resource: [in] resource to add + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name) +{ + if (!graph) + return -EINVAL; + graph->resource_table[resource_name] = NULL; + + return 0; +} + +/** + * ipa_rm_dep_graph_add_dependency() - adds dependency between + * two nodes in graph + * @graph: [in] dependency graph + * @resource_name: [in] resource to add + * @depends_on_name: [in] resource to add + * @userspace_dep: [in] operation requested by userspace ? + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name, + bool userspace_dep) +{ + struct ipa_rm_resource *dependent = NULL; + struct ipa_rm_resource *dependency = NULL; + int result; + + if (!graph || + !IPA_RM_RESORCE_IS_PROD(resource_name) || + !IPA_RM_RESORCE_IS_CONS(depends_on_name)) { + IPA_RM_ERR("invalid params\n"); + result = -EINVAL; + goto bail; + } + if (ipa_rm_dep_graph_get_resource(graph, + resource_name, + &dependent)) { + IPA_RM_ERR("%s does not exist\n", + ipa_rm_resource_str(resource_name)); + result = -EINVAL; + goto bail; + } + if (ipa_rm_dep_graph_get_resource(graph, + depends_on_name, + &dependency)) { + IPA_RM_ERR("%s does not exist\n", + ipa_rm_resource_str(depends_on_name)); + result = -EINVAL; + goto bail; + } + result = ipa_rm_resource_add_dependency(dependent, dependency, + userspace_dep); +bail: + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_dep_graph_delete_dependency() - deleted dependency between + * two nodes in graph + * @graph: [in] dependency graph + * @resource_name: [in] resource to delete + * @depends_on_name: [in] resource to delete + * @userspace_dep: [in] operation requested by userspace ? + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name, + bool userspace_dep) +{ + struct ipa_rm_resource *dependent = NULL; + struct ipa_rm_resource *dependency = NULL; + int result; + + if (!graph || + !IPA_RM_RESORCE_IS_PROD(resource_name) || + !IPA_RM_RESORCE_IS_CONS(depends_on_name)) { + IPA_RM_ERR("invalid params\n"); + result = -EINVAL; + goto bail; + } + + if (ipa_rm_dep_graph_get_resource(graph, + resource_name, + &dependent)) { + IPA_RM_DBG("%s does not exist\n", + ipa_rm_resource_str(resource_name)); + result = -EINVAL; + goto bail; + } + + if (ipa_rm_dep_graph_get_resource(graph, + depends_on_name, + &dependency)) { + IPA_RM_DBG("%s does not exist\n", + ipa_rm_resource_str(depends_on_name)); + result = -EINVAL; + goto bail; + } + + result = ipa_rm_resource_delete_dependency(dependent, dependency, + userspace_dep); +bail: + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h new file mode 100644 index 000000000000..8f67b2d2d1f4 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h @@ -0,0 +1,49 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_RM_DEPENDENCY_GRAPH_H_ +#define _IPA_RM_DEPENDENCY_GRAPH_H_ + +#include +#include +#include "ipa_rm_resource.h" + +struct ipa_rm_dep_graph { + struct ipa_rm_resource *resource_table[IPA_RM_RESOURCE_MAX]; +}; + +int ipa_rm_dep_graph_get_resource( + struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name name, + struct ipa_rm_resource **resource); + +int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph); + +void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph); + +int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph, + struct ipa_rm_resource *resource); + +int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name); + +int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name, + bool userspsace_dep); + +int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name, + bool userspsace_dep); + +#endif /* _IPA_RM_DEPENDENCY_GRAPH_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_rm_i.h b/drivers/platform/msm/ipa/ipa_rm_i.h new file mode 100644 index 000000000000..c0e3ce204848 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_i.h @@ -0,0 +1,157 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_RM_I_H_ +#define _IPA_RM_I_H_ + +#include +#include +#include "ipa_rm_resource.h" +#include "ipa_common_i.h" + +#define IPA_RM_DRV_NAME "ipa_rm" + +#define IPA_RM_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \ + ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) +#define IPA_RM_DBG(fmt, args...) \ + do { \ + pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \ + ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_RM_ERR(fmt, args...) \ + do { \ + pr_err(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \ + ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_RM_RESORCE_IS_PROD(x) \ + (x < IPA_RM_RESOURCE_MAX && (x & 0x1) == 0) +#define IPA_RM_RESORCE_IS_CONS(x) \ + (x < IPA_RM_RESOURCE_MAX && (x & 0x1) == 1) +#define IPA_RM_INDEX_INVALID (-1) +#define IPA_RM_RELEASE_DELAY_IN_MSEC 1000 + +int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name); +int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name); + +/** + * struct ipa_rm_delayed_release_work_type - IPA RM delayed resource release + * work type + * @delayed_work: work struct + * @ipa_rm_resource_name: name of the resource on which this work should be done + * @needed_bw: bandwidth required for resource in Mbps + * @dec_usage_count: decrease usage count on release ? + */ +struct ipa_rm_delayed_release_work_type { + struct delayed_work work; + enum ipa_rm_resource_name resource_name; + u32 needed_bw; + bool dec_usage_count; + +}; + +/** + * enum ipa_rm_wq_cmd - workqueue commands + */ +enum ipa_rm_wq_cmd { + IPA_RM_WQ_NOTIFY_PROD, + IPA_RM_WQ_NOTIFY_CONS, + IPA_RM_WQ_RESOURCE_CB +}; + +/** + * struct ipa_rm_wq_work_type - IPA RM worqueue specific + * work type + * @work: work struct + * @wq_cmd: command that should be processed in workqueue context + * @resource_name: name of the resource on which this work + * should be done + * @dep_graph: data structure to search for resource if exists + * @event: event to notify + * @notify_registered_only: notify only clients registered by + * ipa_rm_register() + */ +struct ipa_rm_wq_work_type { + struct work_struct work; + enum ipa_rm_wq_cmd wq_cmd; + enum ipa_rm_resource_name resource_name; + enum ipa_rm_event event; + bool notify_registered_only; +}; + +/** + * struct ipa_rm_wq_suspend_resume_work_type - IPA RM worqueue resume or + * suspend work type + * @work: work struct + * @resource_name: name of the resource on which this work + * should be done + * @prev_state: + * @needed_bw: + */ +struct ipa_rm_wq_suspend_resume_work_type { + struct work_struct work; + enum ipa_rm_resource_name resource_name; + enum ipa_rm_resource_state prev_state; + u32 needed_bw; + bool inc_usage_count; + +}; + +int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd, + enum ipa_rm_resource_name resource_name, + enum ipa_rm_event event, + bool notify_registered_only); + +int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_state prev_state, + u32 needed_bw, + bool inc_usage_count); + +int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_state prev_state, + u32 needed_bw); + +int ipa_rm_initialize(void); + +int ipa_rm_stat(char *buf, int size); + +const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name); + +void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name); + +int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name); + +void delayed_release_work_func(struct work_struct *work); + +int ipa_rm_add_dependency_from_ioctl(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name); + +int ipa_rm_delete_dependency_from_ioctl(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name); + +void ipa_rm_exit(void); + +#endif /* _IPA_RM_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c new file mode 100644 index 000000000000..9c59bcc002d4 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c @@ -0,0 +1,268 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_rm_i.h" + +/** + * struct ipa_rm_it_private - IPA RM Inactivity Timer private + * data + * @initied: indicates if instance was initialized + * @lock - spinlock for mutual exclusion + * @resource_name - resource name + * @work: delayed work object for running delayed releas + * function + * @resource_requested: boolean flag indicates if resource was requested + * @reschedule_work: boolean flag indicates to not release and to + * reschedule the release work. + * @work_in_progress: boolean flag indicates is release work was scheduled. + * @jiffies: number of jiffies for timeout + * + * WWAN private - holds all relevant info about WWAN driver + */ +struct ipa_rm_it_private { + bool initied; + enum ipa_rm_resource_name resource_name; + spinlock_t lock; + struct delayed_work work; + bool resource_requested; + bool reschedule_work; + bool work_in_progress; + unsigned long jiffies; +}; + +static struct ipa_rm_it_private ipa_rm_it_handles[IPA_RM_RESOURCE_MAX]; + +/** + * ipa_rm_inactivity_timer_func() - called when timer expired in + * the context of the shared workqueue. Checks internally if + * reschedule_work flag is set. In case it is not set this function calls to + * ipa_rm_release_resource(). In case reschedule_work is set this function + * reschedule the work. This flag is cleared cleared when + * calling to ipa_rm_inactivity_timer_release_resource(). + * + * @work: work object provided by the work queue + * + * Return codes: + * None + */ +static void ipa_rm_inactivity_timer_func(struct work_struct *work) +{ + + struct ipa_rm_it_private *me = container_of(to_delayed_work(work), + struct ipa_rm_it_private, + work); + unsigned long flags; + + IPA_RM_DBG_LOW("timer expired for resource %d\n", me->resource_name); + + spin_lock_irqsave( + &ipa_rm_it_handles[me->resource_name].lock, flags); + if (ipa_rm_it_handles[me->resource_name].reschedule_work) { + IPA_RM_DBG_LOW("setting delayed work\n"); + ipa_rm_it_handles[me->resource_name].reschedule_work = false; + queue_delayed_work(system_unbound_wq, + &ipa_rm_it_handles[me->resource_name].work, + ipa_rm_it_handles[me->resource_name].jiffies); + } else if (ipa_rm_it_handles[me->resource_name].resource_requested) { + IPA_RM_DBG_LOW("not calling release\n"); + ipa_rm_it_handles[me->resource_name].work_in_progress = false; + } else { + IPA_RM_DBG_LOW("calling release_resource on resource %d\n", + me->resource_name); + ipa_rm_release_resource(me->resource_name); + ipa_rm_it_handles[me->resource_name].work_in_progress = false; + } + spin_unlock_irqrestore( + &ipa_rm_it_handles[me->resource_name].lock, flags); +} + +/** + * ipa_rm_inactivity_timer_init() - Init function for IPA RM + * inactivity timer. This function shall be called prior calling + * any other API of IPA RM inactivity timer. + * + * @resource_name: Resource name. @see ipa_rm.h + * @msecs: time in miliseccond, that IPA RM inactivity timer + * shall wait prior calling to ipa_rm_release_resource(). + * + * Return codes: + * 0: success + * -EINVAL: invalid parameters + */ +int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name, + unsigned long msecs) +{ + IPA_RM_DBG_LOW("resource %d\n", resource_name); + + if (resource_name < 0 || + resource_name >= IPA_RM_RESOURCE_MAX) { + IPA_RM_ERR("Invalid parameter\n"); + return -EINVAL; + } + + if (ipa_rm_it_handles[resource_name].initied) { + IPA_RM_ERR("resource %d already inited\n", resource_name); + return -EINVAL; + } + + spin_lock_init(&ipa_rm_it_handles[resource_name].lock); + ipa_rm_it_handles[resource_name].resource_name = resource_name; + ipa_rm_it_handles[resource_name].jiffies = msecs_to_jiffies(msecs); + ipa_rm_it_handles[resource_name].resource_requested = false; + ipa_rm_it_handles[resource_name].reschedule_work = false; + ipa_rm_it_handles[resource_name].work_in_progress = false; + + INIT_DELAYED_WORK(&ipa_rm_it_handles[resource_name].work, + ipa_rm_inactivity_timer_func); + ipa_rm_it_handles[resource_name].initied = 1; + + return 0; +} +EXPORT_SYMBOL(ipa_rm_inactivity_timer_init); + +/** + * ipa_rm_inactivity_timer_destroy() - De-Init function for IPA + * RM inactivity timer. + * @resource_name: Resource name. @see ipa_rm.h + * Return codes: + * 0: success + * -EINVAL: invalid parameters + */ +int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name) +{ + IPA_RM_DBG_LOW("resource %d\n", resource_name); + + if (resource_name < 0 || + resource_name >= IPA_RM_RESOURCE_MAX) { + IPA_RM_ERR("Invalid parameter\n"); + return -EINVAL; + } + + if (!ipa_rm_it_handles[resource_name].initied) { + IPA_RM_ERR("resource %d already inited\n", + resource_name); + return -EINVAL; + } + + cancel_delayed_work_sync(&ipa_rm_it_handles[resource_name].work); + + memset(&ipa_rm_it_handles[resource_name], 0, + sizeof(struct ipa_rm_it_private)); + + return 0; +} +EXPORT_SYMBOL(ipa_rm_inactivity_timer_destroy); + +/** + * ipa_rm_inactivity_timer_request_resource() - Same as + * ipa_rm_request_resource(), with a difference that calling to + * this function will also cancel the inactivity timer, if + * ipa_rm_inactivity_timer_release_resource() was called earlier. + * + * @resource_name: Resource name. @see ipa_rm.h + * + * Return codes: + * 0: success + * -EINVAL: invalid parameters + */ +int ipa_rm_inactivity_timer_request_resource( + enum ipa_rm_resource_name resource_name) +{ + int ret; + unsigned long flags; + + IPA_RM_DBG_LOW("resource %d\n", resource_name); + + if (resource_name < 0 || + resource_name >= IPA_RM_RESOURCE_MAX) { + IPA_RM_ERR("Invalid parameter\n"); + return -EINVAL; + } + + if (!ipa_rm_it_handles[resource_name].initied) { + IPA_RM_ERR("Not initialized\n"); + return -EINVAL; + } + + spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags); + ipa_rm_it_handles[resource_name].resource_requested = true; + spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags); + ret = ipa_rm_request_resource(resource_name); + IPA_RM_DBG_LOW("resource %d: returning %d\n", resource_name, ret); + + return ret; +} +EXPORT_SYMBOL(ipa_rm_inactivity_timer_request_resource); + +/** + * ipa_rm_inactivity_timer_release_resource() - Sets the + * inactivity timer to the timeout set by + * ipa_rm_inactivity_timer_init(). When the timeout expires, IPA + * RM inactivity timer will call to ipa_rm_release_resource(). + * If a call to ipa_rm_inactivity_timer_request_resource() was + * made BEFORE the timeout has expired, rge timer will be + * cancelled. + * + * @resource_name: Resource name. @see ipa_rm.h + * + * Return codes: + * 0: success + * -EINVAL: invalid parameters + */ +int ipa_rm_inactivity_timer_release_resource( + enum ipa_rm_resource_name resource_name) +{ + unsigned long flags; + + IPA_RM_DBG_LOW("resource %d\n", resource_name); + + if (resource_name < 0 || + resource_name >= IPA_RM_RESOURCE_MAX) { + IPA_RM_ERR("Invalid parameter\n"); + return -EINVAL; + } + + if (!ipa_rm_it_handles[resource_name].initied) { + IPA_RM_ERR("Not initialized\n"); + return -EINVAL; + } + + spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags); + ipa_rm_it_handles[resource_name].resource_requested = false; + if (ipa_rm_it_handles[resource_name].work_in_progress) { + IPA_RM_DBG_LOW("Timer already set, no sched again %d\n", + resource_name); + ipa_rm_it_handles[resource_name].reschedule_work = true; + spin_unlock_irqrestore( + &ipa_rm_it_handles[resource_name].lock, flags); + return 0; + } + ipa_rm_it_handles[resource_name].work_in_progress = true; + ipa_rm_it_handles[resource_name].reschedule_work = false; + IPA_RM_DBG_LOW("setting delayed work\n"); + queue_delayed_work(system_unbound_wq, + &ipa_rm_it_handles[resource_name].work, + ipa_rm_it_handles[resource_name].jiffies); + spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags); + + return 0; +} +EXPORT_SYMBOL(ipa_rm_inactivity_timer_release_resource); + diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.c b/drivers/platform/msm/ipa/ipa_rm_peers_list.c new file mode 100644 index 000000000000..19032278acb4 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.c @@ -0,0 +1,277 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "ipa_rm_i.h" + +/** + * ipa_rm_peers_list_get_resource_index() - resource name to index + * of this resource in corresponding peers list + * @resource_name: [in] resource name + * + * Returns: resource index mapping, IPA_RM_INDEX_INVALID + * in case provided resource name isn't contained in enum + * ipa_rm_resource_name. + * + */ +static int ipa_rm_peers_list_get_resource_index( + enum ipa_rm_resource_name resource_name) +{ + int resource_index = IPA_RM_INDEX_INVALID; + + if (IPA_RM_RESORCE_IS_PROD(resource_name)) + resource_index = ipa_rm_prod_index(resource_name); + else if (IPA_RM_RESORCE_IS_CONS(resource_name)) + resource_index = ipa_rm_cons_index(resource_name); + + return resource_index; +} + +static bool ipa_rm_peers_list_check_index(int index, + struct ipa_rm_peers_list *peers_list) +{ + return !(index > peers_list->max_peers || index < 0); +} + +/** + * ipa_rm_peers_list_create() - creates the peers list + * + * @max_peers: maximum number of peers in new list + * @peers_list: [out] newly created peers list + * + * Returns: 0 in case of SUCCESS, negative otherwise + */ +int ipa_rm_peers_list_create(int max_peers, + struct ipa_rm_peers_list **peers_list) +{ + int result; + + *peers_list = kzalloc(sizeof(**peers_list), GFP_ATOMIC); + if (!*peers_list) { + IPA_RM_ERR("no mem\n"); + result = -ENOMEM; + goto bail; + } + + (*peers_list)->max_peers = max_peers; + (*peers_list)->peers = kzalloc((*peers_list)->max_peers * + sizeof(*((*peers_list)->peers)), GFP_ATOMIC); + if (!((*peers_list)->peers)) { + IPA_RM_ERR("no mem\n"); + result = -ENOMEM; + goto list_alloc_fail; + } + + return 0; + +list_alloc_fail: + kfree(*peers_list); +bail: + return result; +} + +/** + * ipa_rm_peers_list_delete() - deletes the peers list + * + * @peers_list: peers list + * + */ +void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list) +{ + if (peers_list) { + kfree(peers_list->peers); + kfree(peers_list); + } +} + +/** + * ipa_rm_peers_list_remove_peer() - removes peer from the list + * + * @peers_list: peers list + * @resource_name: name of the resource to remove + * + */ +void ipa_rm_peers_list_remove_peer( + struct ipa_rm_peers_list *peers_list, + enum ipa_rm_resource_name resource_name) +{ + if (!peers_list) + return; + + peers_list->peers[ipa_rm_peers_list_get_resource_index( + resource_name)].resource = NULL; + peers_list->peers[ipa_rm_peers_list_get_resource_index( + resource_name)].userspace_dep = false; + peers_list->peers_count--; +} + +/** + * ipa_rm_peers_list_add_peer() - adds peer to the list + * + * @peers_list: peers list + * @resource: resource to add + * + */ +void ipa_rm_peers_list_add_peer( + struct ipa_rm_peers_list *peers_list, + struct ipa_rm_resource *resource, + bool userspace_dep) +{ + if (!peers_list || !resource) + return; + + peers_list->peers[ipa_rm_peers_list_get_resource_index( + resource->name)].resource = resource; + peers_list->peers[ipa_rm_peers_list_get_resource_index( + resource->name)].userspace_dep = userspace_dep; + peers_list->peers_count++; +} + +/** + * ipa_rm_peers_list_is_empty() - checks + * if resource peers list is empty + * + * @peers_list: peers list + * + * Returns: true if the list is empty, false otherwise + */ +bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list) +{ + bool result = true; + + if (!peers_list) + goto bail; + + if (peers_list->peers_count > 0) + result = false; +bail: + return result; +} + +/** + * ipa_rm_peers_list_has_last_peer() - checks + * if resource peers list has exactly one peer + * + * @peers_list: peers list + * + * Returns: true if the list has exactly one peer, false otherwise + */ +bool ipa_rm_peers_list_has_last_peer( + struct ipa_rm_peers_list *peers_list) +{ + bool result = false; + + if (!peers_list) + goto bail; + + if (peers_list->peers_count == 1) + result = true; +bail: + return result; +} + +/** + * ipa_rm_peers_list_check_dependency() - check dependency + * between 2 peer lists + * @resource_peers: first peers list + * @resource_name: first peers list resource name + * @depends_on_peers: second peers list + * @depends_on_name: second peers list resource name + * @userspace_dep: [out] dependency was created by userspace + * + * Returns: true if there is dependency, false otherwise + * + */ +bool ipa_rm_peers_list_check_dependency( + struct ipa_rm_peers_list *resource_peers, + enum ipa_rm_resource_name resource_name, + struct ipa_rm_peers_list *depends_on_peers, + enum ipa_rm_resource_name depends_on_name, + bool *userspace_dep) +{ + bool result = false; + int resource_index; + struct ipa_rm_resource_peer *peer_ptr; + + if (!resource_peers || !depends_on_peers || !userspace_dep) + return result; + + resource_index = ipa_rm_peers_list_get_resource_index(depends_on_name); + peer_ptr = &resource_peers->peers[resource_index]; + if (peer_ptr->resource != NULL) { + result = true; + *userspace_dep = peer_ptr->userspace_dep; + } + + resource_index = ipa_rm_peers_list_get_resource_index(resource_name); + peer_ptr = &depends_on_peers->peers[resource_index]; + if (peer_ptr->resource != NULL) { + result = true; + *userspace_dep = peer_ptr->userspace_dep; + } + + return result; +} + +/** + * ipa_rm_peers_list_get_resource() - get resource by + * resource index + * @resource_index: resource index + * @resource_peers: peers list + * + * Returns: the resource if found, NULL otherwise + */ +struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index, + struct ipa_rm_peers_list *resource_peers) +{ + struct ipa_rm_resource *result = NULL; + + if (!ipa_rm_peers_list_check_index(resource_index, resource_peers)) + goto bail; + + result = resource_peers->peers[resource_index].resource; +bail: + return result; +} + +/** + * ipa_rm_peers_list_get_userspace_dep() - returns whether resource dependency + * was added by userspace + * @resource_index: resource index + * @resource_peers: peers list + * + * Returns: true if dependency was added by userspace, false by kernel + */ +bool ipa_rm_peers_list_get_userspace_dep(int resource_index, + struct ipa_rm_peers_list *resource_peers) +{ + bool result = false; + + if (!ipa_rm_peers_list_check_index(resource_index, resource_peers)) + goto bail; + + result = resource_peers->peers[resource_index].userspace_dep; +bail: + return result; +} + +/** + * ipa_rm_peers_list_get_size() - get peers list sise + * + * @peers_list: peers list + * + * Returns: the size of the peers list + */ +int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list) +{ + return peers_list->max_peers; +} diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.h b/drivers/platform/msm/ipa/ipa_rm_peers_list.h new file mode 100644 index 000000000000..4d63549e694b --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.h @@ -0,0 +1,62 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_RM_PEERS_LIST_H_ +#define _IPA_RM_PEERS_LIST_H_ + +#include "ipa_rm_resource.h" + +struct ipa_rm_resource_peer { + struct ipa_rm_resource *resource; + bool userspace_dep; +}; + +/** + * struct ipa_rm_peers_list - IPA RM resource peers list + * @peers: the list of references to resources dependent on this resource + * in case of producer or list of dependencies in case of consumer + * @max_peers: maximum number of peers for this resource + * @peers_count: actual number of peers for this resource + */ +struct ipa_rm_peers_list { + struct ipa_rm_resource_peer *peers; + int max_peers; + int peers_count; +}; + +int ipa_rm_peers_list_create(int max_peers, + struct ipa_rm_peers_list **peers_list); +void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list); +void ipa_rm_peers_list_remove_peer( + struct ipa_rm_peers_list *peers_list, + enum ipa_rm_resource_name resource_name); +void ipa_rm_peers_list_add_peer( + struct ipa_rm_peers_list *peers_list, + struct ipa_rm_resource *resource, + bool userspace_dep); +bool ipa_rm_peers_list_check_dependency( + struct ipa_rm_peers_list *resource_peers, + enum ipa_rm_resource_name resource_name, + struct ipa_rm_peers_list *depends_on_peers, + enum ipa_rm_resource_name depends_on_name, + bool *userspace_dep); +struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index, + struct ipa_rm_peers_list *peers_list); +bool ipa_rm_peers_list_get_userspace_dep(int resource_index, + struct ipa_rm_peers_list *resource_peers); +int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list); +bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list); +bool ipa_rm_peers_list_has_last_peer( + struct ipa_rm_peers_list *peers_list); + + +#endif /* _IPA_RM_PEERS_LIST_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c new file mode 100644 index 000000000000..232caa3f8a73 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_resource.c @@ -0,0 +1,1211 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "ipa_rm_resource.h" +#include "ipa_rm_i.h" +#include "ipa_common_i.h" +/** + * ipa_rm_dep_prod_index() - producer name to producer index mapping + * @resource_name: [in] resource name (should be of producer) + * + * Returns: resource index mapping, IPA_RM_INDEX_INVALID + * in case provided resource name isn't contained + * in enum ipa_rm_resource_name or is not of producers. + * + */ +int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name) +{ + int result = resource_name; + + switch (resource_name) { + case IPA_RM_RESOURCE_Q6_PROD: + case IPA_RM_RESOURCE_USB_PROD: + case IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD: + case IPA_RM_RESOURCE_HSIC_PROD: + case IPA_RM_RESOURCE_STD_ECM_PROD: + case IPA_RM_RESOURCE_RNDIS_PROD: + case IPA_RM_RESOURCE_WWAN_0_PROD: + case IPA_RM_RESOURCE_WLAN_PROD: + case IPA_RM_RESOURCE_ODU_ADAPT_PROD: + case IPA_RM_RESOURCE_MHI_PROD: + case IPA_RM_RESOURCE_ETHERNET_PROD: + break; + default: + result = IPA_RM_INDEX_INVALID; + break; + } + + return result; +} + +/** + * ipa_rm_cons_index() - consumer name to consumer index mapping + * @resource_name: [in] resource name (should be of consumer) + * + * Returns: resource index mapping, IPA_RM_INDEX_INVALID + * in case provided resource name isn't contained + * in enum ipa_rm_resource_name or is not of consumers. + * + */ +int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name) +{ + int result = resource_name; + + switch (resource_name) { + case IPA_RM_RESOURCE_Q6_CONS: + case IPA_RM_RESOURCE_USB_CONS: + case IPA_RM_RESOURCE_HSIC_CONS: + case IPA_RM_RESOURCE_WLAN_CONS: + case IPA_RM_RESOURCE_APPS_CONS: + case IPA_RM_RESOURCE_ODU_ADAPT_CONS: + case IPA_RM_RESOURCE_MHI_CONS: + case IPA_RM_RESOURCE_USB_DPL_CONS: + case IPA_RM_RESOURCE_ETHERNET_CONS: + break; + default: + result = IPA_RM_INDEX_INVALID; + break; + } + + return result; +} + +int ipa_rm_resource_consumer_release_work( + struct ipa_rm_resource_cons *consumer, + enum ipa_rm_resource_state prev_state, + bool notify_completion) +{ + int driver_result; + + IPA_RM_DBG_LOW("calling driver CB\n"); + driver_result = consumer->release_resource(); + IPA_RM_DBG_LOW("driver CB returned with %d\n", driver_result); + /* + * Treat IPA_RM_RELEASE_IN_PROGRESS as IPA_RM_RELEASED + * for CONS which remains in RELEASE_IN_PROGRESS. + */ + if (driver_result == -EINPROGRESS) + driver_result = 0; + if (driver_result != 0 && driver_result != -EINPROGRESS) { + IPA_RM_ERR("driver CB returned error %d\n", driver_result); + consumer->resource.state = prev_state; + goto bail; + } + if (driver_result == 0) { + if (notify_completion) + ipa_rm_resource_consumer_handle_cb(consumer, + IPA_RM_RESOURCE_RELEASED); + else + consumer->resource.state = IPA_RM_RELEASED; + } + complete_all(&consumer->request_consumer_in_progress); + + ipa_rm_perf_profile_change(consumer->resource.name); +bail: + return driver_result; +} + +int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer, + enum ipa_rm_resource_state prev_state, + u32 prod_needed_bw, + bool notify_completion, + bool dec_client_on_err) +{ + int driver_result; + + IPA_RM_DBG_LOW("calling driver CB\n"); + driver_result = consumer->request_resource(); + IPA_RM_DBG_LOW("driver CB returned with %d\n", driver_result); + if (driver_result == 0) { + if (notify_completion) { + ipa_rm_resource_consumer_handle_cb(consumer, + IPA_RM_RESOURCE_GRANTED); + } else { + consumer->resource.state = IPA_RM_GRANTED; + ipa_rm_perf_profile_change(consumer->resource.name); + ipa_resume_resource(consumer->resource.name); + } + } else if (driver_result != -EINPROGRESS) { + consumer->resource.state = prev_state; + consumer->resource.needed_bw -= prod_needed_bw; + if (dec_client_on_err) + consumer->usage_count--; + } + + return driver_result; +} + +int ipa_rm_resource_consumer_request( + struct ipa_rm_resource_cons *consumer, + u32 prod_needed_bw, + bool inc_usage_count, + bool wake_client) +{ + int result = 0; + enum ipa_rm_resource_state prev_state; + struct ipa_active_client_logging_info log_info; + + IPA_RM_DBG_LOW("%s state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); + + prev_state = consumer->resource.state; + consumer->resource.needed_bw += prod_needed_bw; + switch (consumer->resource.state) { + case IPA_RM_RELEASED: + case IPA_RM_RELEASE_IN_PROGRESS: + reinit_completion(&consumer->request_consumer_in_progress); + consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS; + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, + ipa_rm_resource_str(consumer->resource.name)); + if (prev_state == IPA_RM_RELEASE_IN_PROGRESS || + ipa_inc_client_enable_clks_no_block(&log_info) != 0) { + IPA_RM_DBG_LOW("async resume work for %s\n", + ipa_rm_resource_str(consumer->resource.name)); + ipa_rm_wq_send_resume_cmd(consumer->resource.name, + prev_state, + prod_needed_bw, + inc_usage_count); + result = -EINPROGRESS; + break; + } + result = ipa_rm_resource_consumer_request_work(consumer, + prev_state, + prod_needed_bw, + false, + inc_usage_count); + break; + case IPA_RM_GRANTED: + if (wake_client) { + result = ipa_rm_resource_consumer_request_work( + consumer, prev_state, prod_needed_bw, false, + inc_usage_count); + break; + } + ipa_rm_perf_profile_change(consumer->resource.name); + break; + case IPA_RM_REQUEST_IN_PROGRESS: + result = -EINPROGRESS; + break; + default: + consumer->resource.needed_bw -= prod_needed_bw; + result = -EPERM; + goto bail; + } + if (inc_usage_count) + consumer->usage_count++; +bail: + IPA_RM_DBG_LOW("%s new state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); + IPA_RM_DBG_LOW("EXIT with %d\n", result); + + return result; +} + +int ipa_rm_resource_consumer_release( + struct ipa_rm_resource_cons *consumer, + u32 prod_needed_bw, + bool dec_usage_count) +{ + int result = 0; + enum ipa_rm_resource_state save_state; + + IPA_RM_DBG_LOW("%s state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); + save_state = consumer->resource.state; + consumer->resource.needed_bw -= prod_needed_bw; + switch (consumer->resource.state) { + case IPA_RM_RELEASED: + break; + case IPA_RM_GRANTED: + case IPA_RM_REQUEST_IN_PROGRESS: + if (dec_usage_count && consumer->usage_count > 0) + consumer->usage_count--; + if (consumer->usage_count == 0) { + consumer->resource.state = IPA_RM_RELEASE_IN_PROGRESS; + if (save_state == IPA_RM_REQUEST_IN_PROGRESS || + ipa_suspend_resource_no_block( + consumer->resource.name) != 0) { + ipa_rm_wq_send_suspend_cmd( + consumer->resource.name, + save_state, + prod_needed_bw); + result = -EINPROGRESS; + goto bail; + } + result = ipa_rm_resource_consumer_release_work(consumer, + save_state, false); + goto bail; + } else if (consumer->resource.state == IPA_RM_GRANTED) { + ipa_rm_perf_profile_change(consumer->resource.name); + } + break; + case IPA_RM_RELEASE_IN_PROGRESS: + if (dec_usage_count && consumer->usage_count > 0) + consumer->usage_count--; + result = -EINPROGRESS; + break; + default: + result = -EPERM; + goto bail; + } +bail: + IPA_RM_DBG_LOW("%s new state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); + IPA_RM_DBG_LOW("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_resource_producer_notify_clients() - notify + * all registered clients of given producer + * @producer: producer + * @event: event to notify + * @notify_registered_only: notify only clients registered by + * ipa_rm_register() + */ +void ipa_rm_resource_producer_notify_clients( + struct ipa_rm_resource_prod *producer, + enum ipa_rm_event event, + bool notify_registered_only) +{ + struct ipa_rm_notification_info *reg_info; + + IPA_RM_DBG_LOW("%s event: %d notify_registered_only: %d\n", + ipa_rm_resource_str(producer->resource.name), + event, + notify_registered_only); + + list_for_each_entry(reg_info, &(producer->event_listeners), link) { + if (notify_registered_only && !reg_info->explicit) + continue; + + IPA_RM_DBG_LOW("Notifying %s event: %d\n", + ipa_rm_resource_str(producer->resource.name), event); + reg_info->reg_params.notify_cb(reg_info->reg_params.user_data, + event, + 0); + IPA_RM_DBG_LOW("back from client CB\n"); + } +} + +static int ipa_rm_resource_producer_create(struct ipa_rm_resource **resource, + struct ipa_rm_resource_prod **producer, + struct ipa_rm_create_params *create_params, + int *max_peers) +{ + int result = 0; + + *producer = kzalloc(sizeof(**producer), GFP_ATOMIC); + if (*producer == NULL) { + result = -ENOMEM; + goto bail; + } + + INIT_LIST_HEAD(&((*producer)->event_listeners)); + result = ipa_rm_resource_producer_register(*producer, + &(create_params->reg_params), + false); + if (result) { + IPA_RM_ERR("ipa_rm_resource_producer_register() failed\n"); + goto register_fail; + } + + (*resource) = (struct ipa_rm_resource *) (*producer); + (*resource)->type = IPA_RM_PRODUCER; + *max_peers = IPA_RM_RESOURCE_MAX; + goto bail; +register_fail: + kfree(*producer); +bail: + return result; +} + +static void ipa_rm_resource_producer_delete( + struct ipa_rm_resource_prod *producer) +{ + struct ipa_rm_notification_info *reg_info; + struct list_head *pos, *q; + + ipa_rm_resource_producer_release(producer); + list_for_each_safe(pos, q, &(producer->event_listeners)) { + reg_info = list_entry(pos, + struct ipa_rm_notification_info, + link); + list_del(pos); + kfree(reg_info); + } +} + +static int ipa_rm_resource_consumer_create(struct ipa_rm_resource **resource, + struct ipa_rm_resource_cons **consumer, + struct ipa_rm_create_params *create_params, + int *max_peers) +{ + int result = 0; + + *consumer = kzalloc(sizeof(**consumer), GFP_ATOMIC); + if (*consumer == NULL) { + result = -ENOMEM; + goto bail; + } + + (*consumer)->request_resource = create_params->request_resource; + (*consumer)->release_resource = create_params->release_resource; + (*resource) = (struct ipa_rm_resource *) (*consumer); + (*resource)->type = IPA_RM_CONSUMER; + init_completion(&((*consumer)->request_consumer_in_progress)); + *max_peers = IPA_RM_RESOURCE_MAX; +bail: + return result; +} + +/** + * ipa_rm_resource_create() - creates resource + * @create_params: [in] parameters needed + * for resource initialization with IPA RM + * @resource: [out] created resource + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_resource_create( + struct ipa_rm_create_params *create_params, + struct ipa_rm_resource **resource) +{ + struct ipa_rm_resource_cons *consumer; + struct ipa_rm_resource_prod *producer; + int max_peers; + int result = 0; + + if (!create_params) { + result = -EINVAL; + goto bail; + } + + if (IPA_RM_RESORCE_IS_PROD(create_params->name)) { + result = ipa_rm_resource_producer_create(resource, + &producer, + create_params, + &max_peers); + if (result) { + IPA_RM_ERR("ipa_rm_resource_producer_create failed\n"); + goto bail; + } + } else if (IPA_RM_RESORCE_IS_CONS(create_params->name)) { + result = ipa_rm_resource_consumer_create(resource, + &consumer, + create_params, + &max_peers); + if (result) { + IPA_RM_ERR("ipa_rm_resource_producer_create failed\n"); + goto bail; + } + } else { + IPA_RM_ERR("invalid resource\n"); + result = -EPERM; + goto bail; + } + + result = ipa_rm_peers_list_create(max_peers, + &((*resource)->peers_list)); + if (result) { + IPA_RM_ERR("ipa_rm_peers_list_create failed\n"); + goto peers_alloc_fail; + } + (*resource)->name = create_params->name; + (*resource)->floor_voltage = create_params->floor_voltage; + (*resource)->state = IPA_RM_RELEASED; + goto bail; + +peers_alloc_fail: + ipa_rm_resource_delete(*resource); +bail: + return result; +} + +/** + * ipa_rm_resource_delete() - deletes resource + * @resource: [in] resource + * for resource initialization with IPA RM + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_resource_delete(struct ipa_rm_resource *resource) +{ + struct ipa_rm_resource *consumer; + struct ipa_rm_resource *producer; + int peers_index; + int result = 0; + int list_size; + bool userspace_dep; + + if (!resource) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + IPA_RM_DBG("ENTER with resource %d\n", resource->name); + if (resource->type == IPA_RM_PRODUCER) { + if (resource->peers_list) { + list_size = ipa_rm_peers_list_get_size( + resource->peers_list); + for (peers_index = 0; + peers_index < list_size; + peers_index++) { + consumer = ipa_rm_peers_list_get_resource( + peers_index, + resource->peers_list); + if (consumer) { + userspace_dep = + ipa_rm_peers_list_get_userspace_dep( + peers_index, + resource->peers_list); + ipa_rm_resource_delete_dependency( + resource, + consumer, + userspace_dep); + } + } + } + + ipa_rm_resource_producer_delete( + (struct ipa_rm_resource_prod *) resource); + } else if (resource->type == IPA_RM_CONSUMER) { + if (resource->peers_list) { + list_size = ipa_rm_peers_list_get_size( + resource->peers_list); + for (peers_index = 0; + peers_index < list_size; + peers_index++){ + producer = ipa_rm_peers_list_get_resource( + peers_index, + resource->peers_list); + if (producer) { + userspace_dep = + ipa_rm_peers_list_get_userspace_dep( + peers_index, + resource->peers_list); + ipa_rm_resource_delete_dependency( + producer, + resource, + userspace_dep); + } + } + } + } + ipa_rm_peers_list_delete(resource->peers_list); + kfree(resource); + return result; +} + +/** + * ipa_rm_resource_register() - register resource + * @resource: [in] resource + * @reg_params: [in] registration parameters + * @explicit: [in] registered explicitly by ipa_rm_register() + * + * Returns: 0 on success, negative on failure + * + * Producer resource is expected for this call. + * + */ +int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer, + struct ipa_rm_register_params *reg_params, + bool explicit) +{ + int result = 0; + struct ipa_rm_notification_info *reg_info; + struct list_head *pos; + + if (!producer || !reg_params) { + IPA_RM_ERR("invalid params\n"); + result = -EPERM; + goto bail; + } + + list_for_each(pos, &(producer->event_listeners)) { + reg_info = list_entry(pos, + struct ipa_rm_notification_info, + link); + if (reg_info->reg_params.notify_cb == + reg_params->notify_cb) { + IPA_RM_ERR("already registered\n"); + result = -EPERM; + goto bail; + } + + } + + reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); + if (reg_info == NULL) { + result = -ENOMEM; + goto bail; + } + + reg_info->reg_params.user_data = reg_params->user_data; + reg_info->reg_params.notify_cb = reg_params->notify_cb; + reg_info->explicit = explicit; + INIT_LIST_HEAD(®_info->link); + list_add(®_info->link, &producer->event_listeners); +bail: + return result; +} + +/** + * ipa_rm_resource_deregister() - register resource + * @resource: [in] resource + * @reg_params: [in] registration parameters + * + * Returns: 0 on success, negative on failure + * + * Producer resource is expected for this call. + * This function deleted only single instance of + * registration info. + * + */ +int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer, + struct ipa_rm_register_params *reg_params) +{ + int result = -EINVAL; + struct ipa_rm_notification_info *reg_info; + struct list_head *pos, *q; + + if (!producer || !reg_params) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + list_for_each_safe(pos, q, &(producer->event_listeners)) { + reg_info = list_entry(pos, + struct ipa_rm_notification_info, + link); + if (reg_info->reg_params.notify_cb == + reg_params->notify_cb) { + list_del(pos); + kfree(reg_info); + result = 0; + goto bail; + } + } +bail: + return result; +} + +/** + * ipa_rm_resource_add_dependency() - add dependency between two + * given resources + * @resource: [in] resource resource + * @depends_on: [in] depends_on resource + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource, + struct ipa_rm_resource *depends_on, + bool userspace_dep) +{ + int result = 0; + int consumer_result; + bool add_dep_by_userspace; + + if (!resource || !depends_on) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + if (ipa_rm_peers_list_check_dependency(resource->peers_list, + resource->name, + depends_on->peers_list, + depends_on->name, + &add_dep_by_userspace)) { + IPA_RM_ERR("dependency already exists, added by %s\n", + add_dep_by_userspace ? "userspace" : "kernel"); + return -EEXIST; + } + + ipa_rm_peers_list_add_peer(resource->peers_list, depends_on, + userspace_dep); + ipa_rm_peers_list_add_peer(depends_on->peers_list, resource, + userspace_dep); + IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(resource->name), + resource->state); + + resource->needed_bw += depends_on->max_bw; + switch (resource->state) { + case IPA_RM_RELEASED: + case IPA_RM_RELEASE_IN_PROGRESS: + break; + case IPA_RM_GRANTED: + case IPA_RM_REQUEST_IN_PROGRESS: + { + enum ipa_rm_resource_state prev_state = resource->state; + + resource->state = IPA_RM_REQUEST_IN_PROGRESS; + ((struct ipa_rm_resource_prod *) + resource)->pending_request++; + consumer_result = ipa_rm_resource_consumer_request( + (struct ipa_rm_resource_cons *)depends_on, + resource->max_bw, + true, false); + if (consumer_result != -EINPROGRESS) { + resource->state = prev_state; + ((struct ipa_rm_resource_prod *) + resource)->pending_request--; + ipa_rm_perf_profile_change(resource->name); + } + result = consumer_result; + break; + } + default: + IPA_RM_ERR("invalid state\n"); + result = -EPERM; + goto bail; + } +bail: + IPA_RM_DBG("%s new state: %d\n", ipa_rm_resource_str(resource->name), + resource->state); + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_resource_delete_dependency() - add dependency between two + * given resources + * @resource: [in] resource resource + * @depends_on: [in] depends_on resource + * + * Returns: 0 on success, negative on failure + * In case the resource state was changed, a notification + * will be sent to the RM client + */ +int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource, + struct ipa_rm_resource *depends_on, + bool userspace_dep) +{ + int result = 0; + bool state_changed = false; + bool release_consumer = false; + enum ipa_rm_event evt; + bool add_dep_by_userspace; + + if (!resource || !depends_on) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + if (!ipa_rm_peers_list_check_dependency(resource->peers_list, + resource->name, + depends_on->peers_list, + depends_on->name, + &add_dep_by_userspace)) { + IPA_RM_ERR("dependency does not exist\n"); + return -EINVAL; + } + + /* + * to avoid race conditions between kernel and userspace + * need to check that the dependency was added by same entity + */ + if (add_dep_by_userspace != userspace_dep) { + IPA_RM_DBG("dependency was added by %s\n", + add_dep_by_userspace ? "userspace" : "kernel"); + IPA_RM_DBG("ignore request to delete dependency by %s\n", + userspace_dep ? "userspace" : "kernel"); + return 0; + } + + IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(resource->name), + resource->state); + + resource->needed_bw -= depends_on->max_bw; + switch (resource->state) { + case IPA_RM_RELEASED: + break; + case IPA_RM_GRANTED: + ipa_rm_perf_profile_change(resource->name); + release_consumer = true; + break; + case IPA_RM_RELEASE_IN_PROGRESS: + if (((struct ipa_rm_resource_prod *) + resource)->pending_release > 0) + ((struct ipa_rm_resource_prod *) + resource)->pending_release--; + if (depends_on->state == IPA_RM_RELEASE_IN_PROGRESS && + ((struct ipa_rm_resource_prod *) + resource)->pending_release == 0) { + resource->state = IPA_RM_RELEASED; + state_changed = true; + evt = IPA_RM_RESOURCE_RELEASED; + ipa_rm_perf_profile_change(resource->name); + } + break; + case IPA_RM_REQUEST_IN_PROGRESS: + release_consumer = true; + if (((struct ipa_rm_resource_prod *) + resource)->pending_request > 0) + ((struct ipa_rm_resource_prod *) + resource)->pending_request--; + if (depends_on->state == IPA_RM_REQUEST_IN_PROGRESS && + ((struct ipa_rm_resource_prod *) + resource)->pending_request == 0) { + resource->state = IPA_RM_GRANTED; + state_changed = true; + evt = IPA_RM_RESOURCE_GRANTED; + ipa_rm_perf_profile_change(resource->name); + } + break; + default: + result = -EINVAL; + goto bail; + } + if (state_changed) { + (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, + resource->name, + evt, + false); + } + IPA_RM_DBG("%s new state: %d\n", ipa_rm_resource_str(resource->name), + resource->state); + ipa_rm_peers_list_remove_peer(resource->peers_list, + depends_on->name); + ipa_rm_peers_list_remove_peer(depends_on->peers_list, + resource->name); + if (release_consumer) + (void) ipa_rm_resource_consumer_release( + (struct ipa_rm_resource_cons *)depends_on, + resource->max_bw, + true); +bail: + IPA_RM_DBG("EXIT with %d\n", result); + + return result; +} + +/** + * ipa_rm_resource_producer_request() - producer resource request + * @producer: [in] producer + * + * Returns: 0 on success, negative on failure + */ +int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer) +{ + int peers_index; + int result = 0; + struct ipa_rm_resource *consumer; + int consumer_result; + enum ipa_rm_resource_state state; + + state = producer->resource.state; + switch (producer->resource.state) { + case IPA_RM_RELEASED: + case IPA_RM_RELEASE_IN_PROGRESS: + producer->resource.state = IPA_RM_REQUEST_IN_PROGRESS; + break; + case IPA_RM_GRANTED: + goto unlock_and_bail; + case IPA_RM_REQUEST_IN_PROGRESS: + result = -EINPROGRESS; + goto unlock_and_bail; + default: + result = -EINVAL; + goto unlock_and_bail; + } + + producer->pending_request = 0; + for (peers_index = 0; + peers_index < ipa_rm_peers_list_get_size( + producer->resource.peers_list); + peers_index++) { + consumer = ipa_rm_peers_list_get_resource(peers_index, + producer->resource.peers_list); + if (consumer) { + producer->pending_request++; + consumer_result = ipa_rm_resource_consumer_request( + (struct ipa_rm_resource_cons *)consumer, + producer->resource.max_bw, + true, false); + if (consumer_result == -EINPROGRESS) { + result = -EINPROGRESS; + } else { + producer->pending_request--; + if (consumer_result != 0) { + result = consumer_result; + goto bail; + } + } + } + } + + if (producer->pending_request == 0) { + producer->resource.state = IPA_RM_GRANTED; + ipa_rm_perf_profile_change(producer->resource.name); + (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, + producer->resource.name, + IPA_RM_RESOURCE_GRANTED, + true); + result = 0; + } +unlock_and_bail: + if (state != producer->resource.state) + IPA_RM_DBG_LOW("%s state changed %d->%d\n", + ipa_rm_resource_str(producer->resource.name), + state, + producer->resource.state); +bail: + return result; +} + +/** + * ipa_rm_resource_producer_release() - producer resource release + * producer: [in] producer resource + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer) +{ + int peers_index; + int result = 0; + struct ipa_rm_resource *consumer; + int consumer_result; + enum ipa_rm_resource_state state; + + state = producer->resource.state; + switch (producer->resource.state) { + case IPA_RM_RELEASED: + goto bail; + case IPA_RM_GRANTED: + case IPA_RM_REQUEST_IN_PROGRESS: + producer->resource.state = IPA_RM_RELEASE_IN_PROGRESS; + break; + case IPA_RM_RELEASE_IN_PROGRESS: + result = -EINPROGRESS; + goto bail; + default: + result = -EPERM; + goto bail; + } + + producer->pending_release = 0; + for (peers_index = 0; + peers_index < ipa_rm_peers_list_get_size( + producer->resource.peers_list); + peers_index++) { + consumer = ipa_rm_peers_list_get_resource(peers_index, + producer->resource.peers_list); + if (consumer) { + producer->pending_release++; + consumer_result = ipa_rm_resource_consumer_release( + (struct ipa_rm_resource_cons *)consumer, + producer->resource.max_bw, + true); + producer->pending_release--; + } + } + + if (producer->pending_release == 0) { + producer->resource.state = IPA_RM_RELEASED; + ipa_rm_perf_profile_change(producer->resource.name); + (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD, + producer->resource.name, + IPA_RM_RESOURCE_RELEASED, + true); + } +bail: + if (state != producer->resource.state) + IPA_RM_DBG_LOW("%s state changed %d->%d\n", + ipa_rm_resource_str(producer->resource.name), + state, + producer->resource.state); + + return result; +} + +static void ipa_rm_resource_producer_handle_cb( + struct ipa_rm_resource_prod *producer, + enum ipa_rm_event event) +{ + IPA_RM_DBG_LOW("%s state: %d event: %d pending_request: %d\n", + ipa_rm_resource_str(producer->resource.name), + producer->resource.state, + event, + producer->pending_request); + + switch (producer->resource.state) { + case IPA_RM_REQUEST_IN_PROGRESS: + if (event != IPA_RM_RESOURCE_GRANTED) + goto unlock_and_bail; + if (producer->pending_request > 0) { + producer->pending_request--; + if (producer->pending_request == 0) { + producer->resource.state = + IPA_RM_GRANTED; + ipa_rm_perf_profile_change( + producer->resource.name); + ipa_rm_resource_producer_notify_clients( + producer, + IPA_RM_RESOURCE_GRANTED, + false); + goto bail; + } + } + break; + case IPA_RM_RELEASE_IN_PROGRESS: + if (event != IPA_RM_RESOURCE_RELEASED) + goto unlock_and_bail; + if (producer->pending_release > 0) { + producer->pending_release--; + if (producer->pending_release == 0) { + producer->resource.state = + IPA_RM_RELEASED; + ipa_rm_perf_profile_change( + producer->resource.name); + ipa_rm_resource_producer_notify_clients( + producer, + IPA_RM_RESOURCE_RELEASED, + false); + goto bail; + } + } + break; + case IPA_RM_GRANTED: + case IPA_RM_RELEASED: + default: + goto unlock_and_bail; + } +unlock_and_bail: + IPA_RM_DBG_LOW("%s new state: %d\n", + ipa_rm_resource_str(producer->resource.name), + producer->resource.state); +bail: + return; +} + +/** + * ipa_rm_resource_consumer_handle_cb() - propagates resource + * notification to all dependent producers + * @consumer: [in] notifying resource + * + */ +void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer, + enum ipa_rm_event event) +{ + int peers_index; + struct ipa_rm_resource *producer; + + if (!consumer) { + IPA_RM_ERR("invalid params\n"); + return; + } + IPA_RM_DBG_LOW("%s state: %d event: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state, + event); + + switch (consumer->resource.state) { + case IPA_RM_REQUEST_IN_PROGRESS: + if (event == IPA_RM_RESOURCE_RELEASED) + goto bail; + consumer->resource.state = IPA_RM_GRANTED; + ipa_rm_perf_profile_change(consumer->resource.name); + ipa_resume_resource(consumer->resource.name); + complete_all(&consumer->request_consumer_in_progress); + break; + case IPA_RM_RELEASE_IN_PROGRESS: + if (event == IPA_RM_RESOURCE_GRANTED) + goto bail; + consumer->resource.state = IPA_RM_RELEASED; + break; + case IPA_RM_GRANTED: + case IPA_RM_RELEASED: + default: + goto bail; + } + + for (peers_index = 0; + peers_index < ipa_rm_peers_list_get_size( + consumer->resource.peers_list); + peers_index++) { + producer = ipa_rm_peers_list_get_resource(peers_index, + consumer->resource.peers_list); + if (producer) + ipa_rm_resource_producer_handle_cb( + (struct ipa_rm_resource_prod *) + producer, + event); + } + + return; +bail: + IPA_RM_DBG_LOW("%s new state: %d\n", + ipa_rm_resource_str(consumer->resource.name), + consumer->resource.state); +} + +/* + * ipa_rm_resource_set_perf_profile() - sets the performance profile to + * resource. + * + * @resource: [in] resource + * @profile: [in] profile to be set + * + * sets the profile to the given resource, In case the resource is + * granted, update bandwidth vote of the resource + */ +int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource, + struct ipa_rm_perf_profile *profile) +{ + int peers_index; + struct ipa_rm_resource *peer; + + if (!resource || !profile) { + IPA_RM_ERR("invalid params\n"); + return -EINVAL; + } + + if (profile->max_supported_bandwidth_mbps == resource->max_bw) { + IPA_RM_DBG_LOW("same profile\n"); + return 0; + } + + if ((resource->type == IPA_RM_PRODUCER && + (resource->state == IPA_RM_GRANTED || + resource->state == IPA_RM_REQUEST_IN_PROGRESS)) || + resource->type == IPA_RM_CONSUMER) { + for (peers_index = 0; + peers_index < ipa_rm_peers_list_get_size( + resource->peers_list); + peers_index++) { + peer = ipa_rm_peers_list_get_resource(peers_index, + resource->peers_list); + if (!peer) + continue; + peer->needed_bw -= resource->max_bw; + peer->needed_bw += + profile->max_supported_bandwidth_mbps; + if (peer->state == IPA_RM_GRANTED) + ipa_rm_perf_profile_change(peer->name); + } + } + + resource->max_bw = profile->max_supported_bandwidth_mbps; + if (resource->state == IPA_RM_GRANTED) + ipa_rm_perf_profile_change(resource->name); + + return 0; +} + + +/* + * ipa_rm_resource_producer_print_stat() - print the + * resource status and all his dependencies + * + * @resource: [in] Resource resource + * @buff: [in] The buf used to print + * @size: [in] Buf size + * + * Returns: number of bytes used on success, negative on failure + */ +int ipa_rm_resource_producer_print_stat( + struct ipa_rm_resource *resource, + char *buf, + int size) +{ + + int i; + int nbytes; + int cnt = 0; + struct ipa_rm_resource *consumer; + + if (!buf || size < 0) + return -EINVAL; + + nbytes = scnprintf(buf + cnt, size - cnt, + ipa_rm_resource_str(resource->name)); + cnt += nbytes; + nbytes = scnprintf(buf + cnt, size - cnt, "[%d, ", resource->max_bw); + cnt += nbytes; + + switch (resource->state) { + case IPA_RM_RELEASED: + nbytes = scnprintf(buf + cnt, size - cnt, + "Released] -> "); + cnt += nbytes; + break; + case IPA_RM_REQUEST_IN_PROGRESS: + nbytes = scnprintf(buf + cnt, size - cnt, + "Request In Progress] -> "); + cnt += nbytes; + break; + case IPA_RM_GRANTED: + nbytes = scnprintf(buf + cnt, size - cnt, + "Granted] -> "); + cnt += nbytes; + break; + case IPA_RM_RELEASE_IN_PROGRESS: + nbytes = scnprintf(buf + cnt, size - cnt, + "Release In Progress] -> "); + cnt += nbytes; + break; + default: + return -EPERM; + } + + for (i = 0; i < resource->peers_list->max_peers; ++i) { + consumer = + ipa_rm_peers_list_get_resource( + i, + resource->peers_list); + if (consumer) { + nbytes = scnprintf(buf + cnt, size - cnt, + ipa_rm_resource_str(consumer->name)); + cnt += nbytes; + nbytes = scnprintf(buf + cnt, size - cnt, "[%d, ", + consumer->max_bw); + cnt += nbytes; + + switch (consumer->state) { + case IPA_RM_RELEASED: + nbytes = scnprintf(buf + cnt, size - cnt, + "Released], "); + cnt += nbytes; + break; + case IPA_RM_REQUEST_IN_PROGRESS: + nbytes = scnprintf(buf + cnt, size - cnt, + "Request In Progress], "); + cnt += nbytes; + break; + case IPA_RM_GRANTED: + nbytes = scnprintf(buf + cnt, size - cnt, + "Granted], "); + cnt += nbytes; + break; + case IPA_RM_RELEASE_IN_PROGRESS: + nbytes = scnprintf(buf + cnt, size - cnt, + "Release In Progress], "); + cnt += nbytes; + break; + default: + return -EPERM; + } + } + } + nbytes = scnprintf(buf + cnt, size - cnt, "\n"); + cnt += nbytes; + + return cnt; +} diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.h b/drivers/platform/msm/ipa/ipa_rm_resource.h new file mode 100644 index 000000000000..98af5ac9872c --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_rm_resource.h @@ -0,0 +1,166 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_RM_RESOURCE_H_ +#define _IPA_RM_RESOURCE_H_ + +#include +#include +#include "ipa_rm_peers_list.h" + +/** + * enum ipa_rm_resource_state - resource state + */ +enum ipa_rm_resource_state { + IPA_RM_RELEASED, + IPA_RM_REQUEST_IN_PROGRESS, + IPA_RM_GRANTED, + IPA_RM_RELEASE_IN_PROGRESS +}; + +/** + * enum ipa_rm_resource_type - IPA resource manager resource type + */ +enum ipa_rm_resource_type { + IPA_RM_PRODUCER, + IPA_RM_CONSUMER +}; + +/** + * struct ipa_rm_notification_info - notification information + * of IPA RM client + * @reg_params: registration parameters + * @explicit: registered explicitly by ipa_rm_register() + * @link: link to the list of all registered clients information + */ +struct ipa_rm_notification_info { + struct ipa_rm_register_params reg_params; + bool explicit; + struct list_head link; +}; + +/** + * struct ipa_rm_resource - IPA RM resource + * @name: name identifying resource + * @type: type of resource (PRODUCER or CONSUMER) + * @floor_voltage: minimum voltage level for operation + * @max_bw: maximum bandwidth required for resource in Mbps + * @state: state of the resource + * @peers_list: list of the peers of the resource + */ +struct ipa_rm_resource { + enum ipa_rm_resource_name name; + enum ipa_rm_resource_type type; + enum ipa_voltage_level floor_voltage; + u32 max_bw; + u32 needed_bw; + enum ipa_rm_resource_state state; + struct ipa_rm_peers_list *peers_list; +}; + +/** + * struct ipa_rm_resource_cons - IPA RM consumer + * @resource: resource + * @usage_count: number of producers in GRANTED / REQUESTED state + * using this consumer + * @request_consumer_in_progress: when set, the consumer is during its request + * phase + * @request_resource: function which should be called to request resource + * from resource manager + * @release_resource: function which should be called to release resource + * from resource manager + * Add new fields after @resource only. + */ +struct ipa_rm_resource_cons { + struct ipa_rm_resource resource; + int usage_count; + struct completion request_consumer_in_progress; + int (*request_resource)(void); + int (*release_resource)(void); +}; + +/** + * struct ipa_rm_resource_prod - IPA RM producer + * @resource: resource + * @event_listeners: clients registered with this producer + * for notifications in resource state + * list Add new fields after @resource only. + */ +struct ipa_rm_resource_prod { + struct ipa_rm_resource resource; + struct list_head event_listeners; + int pending_request; + int pending_release; +}; + +int ipa_rm_resource_create( + struct ipa_rm_create_params *create_params, + struct ipa_rm_resource **resource); + +int ipa_rm_resource_delete(struct ipa_rm_resource *resource); + +int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer, + struct ipa_rm_register_params *reg_params, + bool explicit); + +int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer, + struct ipa_rm_register_params *reg_params); + +int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource, + struct ipa_rm_resource *depends_on, + bool userspace_dep); + +int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource, + struct ipa_rm_resource *depends_on, + bool userspace_dep); + +int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer); + +int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer); + +int ipa_rm_resource_consumer_request(struct ipa_rm_resource_cons *consumer, + u32 needed_bw, + bool inc_usage_count, + bool wake_client); + +int ipa_rm_resource_consumer_release(struct ipa_rm_resource_cons *consumer, + u32 needed_bw, + bool dec_usage_count); + +int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource, + struct ipa_rm_perf_profile *profile); + +void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer, + enum ipa_rm_event event); + +void ipa_rm_resource_producer_notify_clients( + struct ipa_rm_resource_prod *producer, + enum ipa_rm_event event, + bool notify_registered_only); + +int ipa_rm_resource_producer_print_stat( + struct ipa_rm_resource *resource, + char *buf, + int size); + +int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer, + enum ipa_rm_resource_state prev_state, + u32 needed_bw, + bool notify_completion, + bool dec_client_on_err); + +int ipa_rm_resource_consumer_release_work( + struct ipa_rm_resource_cons *consumer, + enum ipa_rm_resource_state prev_state, + bool notify_completion); + +#endif /* _IPA_RM_RESOURCE_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h new file mode 100644 index 000000000000..0bc4b768e847 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h @@ -0,0 +1,27 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#ifndef _IPA_UC_OFFLOAD_COMMON_I_H_ +#define _IPA_UC_OFFLOAD_COMMON_I_H_ + +int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp); +int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl); + +int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data), + void *user_data); +void ipa_ntn_uc_dereg_rdyCB(void); +#endif /* _IPA_UC_OFFLOAD_COMMON_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile new file mode 100644 index 000000000000..e3f8d452e052 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/Makefile @@ -0,0 +1,9 @@ +obj-$(CONFIG_IPA3) += ipahal/ + +obj-$(CONFIG_IPA3) += ipat.o +ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \ + ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \ + ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o \ + ipa_hw_stats.o + +obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c new file mode 100644 index 000000000000..6d994e56b50b --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -0,0 +1,5947 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_ARM64 + +/* Outer caches unsupported on ARM64 platforms */ +#define outer_flush_range(x, y) +#define __cpuc_flush_dcache_area __flush_dcache_area + +#endif + +#define IPA_SUBSYSTEM_NAME "ipa_fws" + +#include "ipa_i.h" +#include "../ipa_rm_i.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_fltrt.h" + +#define CREATE_TRACE_POINTS +#include "ipa_trace.h" + +#define IPA_GPIO_IN_QUERY_CLK_IDX 0 +#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0 +#define IPA_GPIO_OUT_CLK_VOTE_IDX 1 + +#define IPA_SUMMING_THRESHOLD (0x10) +#define IPA_PIPE_MEM_START_OFST (0x0) +#define IPA_PIPE_MEM_SIZE (0x0) +#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \ + x == IPA_MODE_MOBILE_AP_WAN || \ + x == IPA_MODE_MOBILE_AP_WLAN) +#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL) +#define IPA_A5_MUX_HEADER_LENGTH (8) + +#define IPA_AGGR_MAX_STR_LENGTH (10) + +#define CLEANUP_TAG_PROCESS_TIMEOUT 500 + +#define IPA_AGGR_STR_IN_BYTES(str) \ + (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1) + +#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100 + +#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048 + +#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0 +#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1 +#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2 +#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3 + +#define IPA_SMEM_SIZE (8 * 1024) + +/* round addresses for closes page per SMMU requirements */ +#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \ + do { \ + (iova_p) = rounddown((iova), PAGE_SIZE); \ + (pa_p) = rounddown((pa), PAGE_SIZE); \ + (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \ + } while (0) + + +/* The relative location in /lib/firmware where the FWs will reside */ +#define IPA_FWS_PATH "ipa/ipa_fws.elf" + +#ifdef CONFIG_COMPAT +#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_HDR, \ + compat_uptr_t) +#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_HDR, \ + compat_uptr_t) +#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_RT_RULE, \ + compat_uptr_t) +#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_RT_RULE, \ + compat_uptr_t) +#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_RT_TBL, \ + compat_uptr_t) +#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_COPY_HDR, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_TX_PROPS, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_RX_PROPS, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_EXT_PROPS, \ + compat_uptr_t) +#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_HDR, \ + compat_uptr_t) +#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ALLOC_NAT_MEM, \ + compat_uptr_t) +#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_V4_INIT_NAT, \ + compat_uptr_t) +#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NAT_DMA, \ + compat_uptr_t) +#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_V4_DEL_NAT, \ + compat_uptr_t) +#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_NAT_OFFSET, \ + compat_uptr_t) +#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_PULL_MSG, \ + compat_uptr_t) +#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_RM_ADD_DEPENDENCY, \ + compat_uptr_t) +#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_RM_DEL_DEPENDENCY, \ + compat_uptr_t) +#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GENERATE_FLT_EQ, \ + compat_uptr_t) +#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_RT_TBL_INDEX, \ + compat_uptr_t) +#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_WRITE_QMAPID, \ + compat_uptr_t) +#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_MDFY_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \ + compat_uptr_t) +#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_HDR_PROC_CTX, \ + compat_uptr_t) +#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_HDR_PROC_CTX, \ + compat_uptr_t) +#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_MDFY_RT_RULE, \ + compat_uptr_t) + +/** + * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation + * properties + * @dev_name: input parameter, the name of table + * @size: input parameter, size of table in bytes + * @offset: output parameter, offset into page in case of system memory + */ +struct ipa3_ioc_nat_alloc_mem32 { + char dev_name[IPA_RESOURCE_NAME_MAX]; + compat_size_t size; + compat_off_t offset; +}; +#endif + +#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311 +#define TZ_MEM_PROTECT_REGION_ID 0x10 + +struct tz_smmu_ipa_protect_region_iovec_s { + u64 input_addr; + u64 output_addr; + u64 size; + u32 attr; +} __packed; + +struct tz_smmu_ipa_protect_region_s { + phys_addr_t iovec_buf; + u32 size_bytes; +} __packed; + +static void ipa3_start_tag_process(struct work_struct *work); +static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process); + +static void ipa3_transport_release_resource(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa3_transport_release_resource_work, + ipa3_transport_release_resource); +static void ipa_gsi_notify_cb(struct gsi_per_notify *notify); + +static void ipa3_post_init_wq(struct work_struct *work); +static DECLARE_WORK(ipa3_post_init_work, ipa3_post_init_wq); + +static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work); +static DECLARE_WORK(ipa_dec_clients_disable_clks_on_wq_work, + ipa_dec_clients_disable_clks_on_wq); + +static struct ipa3_plat_drv_res ipa3_res = {0, }; +struct msm_bus_scale_pdata *ipa3_bus_scale_table; + +static struct clk *ipa3_clk; + +struct ipa3_context *ipa3_ctx; +static struct device *master_dev; +struct platform_device *ipa3_pdev; +static struct { + bool present; + bool arm_smmu; + bool fast_map; + bool s1_bypass; + bool use_64_bit_dma_mask; + u32 ipa_base; + u32 ipa_size; +} smmu_info; + +static char *active_clients_table_buf; + +int ipa3_active_clients_log_print_buffer(char *buf, int size) +{ + int i; + int nbytes; + int cnt = 0; + int start_idx; + int end_idx; + unsigned long flags; + + spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags); + start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) % + IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; + end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head; + for (i = start_idx; i != end_idx; + i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) { + nbytes = scnprintf(buf + cnt, size - cnt, "%s\n", + ipa3_ctx->ipa3_active_clients_logging + .log_buffer[i]); + cnt += nbytes; + } + spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock, + flags); + + return cnt; +} + +int ipa3_active_clients_log_print_table(char *buf, int size) +{ + int i; + struct ipa3_active_client_htable_entry *iterator; + int cnt = 0; + unsigned long flags; + + spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags); + cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n"); + hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i, + iterator, list) { + switch (iterator->type) { + case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d ENDPOINT\n", + iterator->id_string, iterator->count); + break; + case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d SIMPLE\n", + iterator->id_string, iterator->count); + break; + case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d RESOURCE\n", + iterator->id_string, iterator->count); + break; + case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d SPECIAL\n", + iterator->id_string, iterator->count); + break; + default: + IPAERR("Trying to print illegal active_clients type"); + break; + } + } + cnt += scnprintf(buf + cnt, size - cnt, + "\nTotal active clients count: %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); + spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock, + flags); + + return cnt; +} + +static int ipa3_active_clients_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr) +{ + mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex); + ipa3_active_clients_log_print_table(active_clients_table_buf, + IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE); + IPAERR("%s", active_clients_table_buf); + mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex); + + return NOTIFY_DONE; +} + +static struct notifier_block ipa3_active_clients_panic_blk = { + .notifier_call = ipa3_active_clients_panic_notifier, +}; + +static int ipa3_active_clients_log_insert(const char *string) +{ + int head; + int tail; + + if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy) + return -EPERM; + + head = ipa3_ctx->ipa3_active_clients_logging.log_head; + tail = ipa3_ctx->ipa3_active_clients_logging.log_tail; + + memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_', + IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN); + strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string, + (size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN); + head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; + if (tail == head) + tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; + + ipa3_ctx->ipa3_active_clients_logging.log_tail = tail; + ipa3_ctx->ipa3_active_clients_logging.log_head = head; + + return 0; +} + +static int ipa3_active_clients_log_init(void) +{ + int i; + + spin_lock_init(&ipa3_ctx->ipa3_active_clients_logging.lock); + ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kcalloc( + IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES, + sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]), + GFP_KERNEL); + active_clients_table_buf = kzalloc(sizeof( + char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL); + if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) { + pr_err("Active Clients Logging memory allocation failed"); + goto bail; + } + for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) { + ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] = + ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] + + (IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i); + } + ipa3_ctx->ipa3_active_clients_logging.log_head = 0; + ipa3_ctx->ipa3_active_clients_logging.log_tail = + IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1; + hash_init(ipa3_ctx->ipa3_active_clients_logging.htable); + atomic_notifier_chain_register(&panic_notifier_list, + &ipa3_active_clients_panic_blk); + ipa3_ctx->ipa3_active_clients_logging.log_rdy = 1; + + return 0; + +bail: + return -ENOMEM; +} + +void ipa3_active_clients_log_clear(void) +{ + unsigned long flags; + + spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags); + ipa3_ctx->ipa3_active_clients_logging.log_head = 0; + ipa3_ctx->ipa3_active_clients_logging.log_tail = + IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1; + spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock, + flags); +} + +static void ipa3_active_clients_log_destroy(void) +{ + unsigned long flags; + + spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags); + ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0; + kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]); + ipa3_ctx->ipa3_active_clients_logging.log_head = 0; + ipa3_ctx->ipa3_active_clients_logging.log_tail = + IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1; + spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock, + flags); +} + +enum ipa_smmu_cb_type { + IPA_SMMU_CB_AP, + IPA_SMMU_CB_WLAN, + IPA_SMMU_CB_UC, + IPA_SMMU_CB_MAX + +}; + +static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX]; + +struct iommu_domain *ipa3_get_smmu_domain(void) +{ + if (smmu_cb[IPA_SMMU_CB_AP].valid) + return smmu_cb[IPA_SMMU_CB_AP].mapping->domain; + + IPAERR("CB not valid\n"); + + return NULL; +} + +struct iommu_domain *ipa3_get_uc_smmu_domain(void) +{ + if (smmu_cb[IPA_SMMU_CB_UC].valid) + return smmu_cb[IPA_SMMU_CB_UC].mapping->domain; + + IPAERR("CB not valid\n"); + + return NULL; +} + +struct iommu_domain *ipa3_get_wlan_smmu_domain(void) +{ + if (smmu_cb[IPA_SMMU_CB_WLAN].valid) + return smmu_cb[IPA_SMMU_CB_WLAN].iommu; + + IPAERR("CB not valid\n"); + + return NULL; +} + + +struct device *ipa3_get_dma_dev(void) +{ + return ipa3_ctx->pdev; +} + +/** + * ipa3_get_smmu_ctx()- Return the wlan smmu context + * + * Return value: pointer to smmu context address + */ +struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void) +{ + return &smmu_cb[IPA_SMMU_CB_AP]; +} + +/** + * ipa3_get_wlan_smmu_ctx()- Return the wlan smmu context + * + * Return value: pointer to smmu context address + */ +struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void) +{ + return &smmu_cb[IPA_SMMU_CB_WLAN]; +} + +/** + * ipa3_get_uc_smmu_ctx()- Return the uc smmu context + * + * Return value: pointer to smmu context address + */ +struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void) +{ + return &smmu_cb[IPA_SMMU_CB_UC]; +} + +static int ipa3_open(struct inode *inode, struct file *filp) +{ + struct ipa3_context *ctx = NULL; + + IPADBG_LOW("ENTER\n"); + ctx = container_of(inode->i_cdev, struct ipa3_context, cdev); + filp->private_data = ctx; + + return 0; +} + +static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type) +{ + if (!buff) { + IPAERR("Null buffer\n"); + return; + } + + if (type != WAN_UPSTREAM_ROUTE_ADD && + type != WAN_UPSTREAM_ROUTE_DEL && + type != WAN_EMBMS_CONNECT) { + IPAERR("Wrong type given. buff %pK type %d\n", buff, type); + return; + } + + kfree(buff); +} + +static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type) +{ + int retval; + struct ipa_wan_msg *wan_msg; + struct ipa_msg_meta msg_meta; + + wan_msg = kzalloc(sizeof(*wan_msg), GFP_KERNEL); + if (!wan_msg) + return -ENOMEM; + + if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param, + sizeof(struct ipa_wan_msg))) { + kfree(wan_msg); + return -EFAULT; + } + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = msg_type; + msg_meta.msg_len = sizeof(struct ipa_wan_msg); + retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb); + if (retval) { + IPAERR_RL("ipa3_send_msg failed: %d\n", retval); + kfree(wan_msg); + return retval; + } + + return 0; +} + +static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type) +{ + if (!buff) { + IPAERR("Null buffer\n"); + return; + } + + if (type != ADD_VLAN_IFACE && + type != DEL_VLAN_IFACE && + type != ADD_L2TP_VLAN_MAPPING && + type != DEL_L2TP_VLAN_MAPPING) { + IPAERR("Wrong type given. buff %pK type %d\n", buff, type); + return; + } + + kfree(buff); +} + +static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type) +{ + int retval; + struct ipa_ioc_vlan_iface_info *vlan_info; + struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info; + struct ipa_msg_meta msg_meta; + + if (msg_type == ADD_VLAN_IFACE || + msg_type == DEL_VLAN_IFACE) { + vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info), + GFP_KERNEL); + if (!vlan_info) + return -ENOMEM; + + if (copy_from_user((u8 *)vlan_info, (void __user *)usr_param, + sizeof(struct ipa_ioc_vlan_iface_info))) { + kfree(vlan_info); + return -EFAULT; + } + + memset(&msg_meta, 0, sizeof(msg_meta)); + msg_meta.msg_type = msg_type; + msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info); + retval = ipa3_send_msg(&msg_meta, vlan_info, + ipa3_vlan_l2tp_msg_free_cb); + if (retval) { + IPAERR("ipa3_send_msg failed: %d\n", retval); + kfree(vlan_info); + return retval; + } + } else if (msg_type == ADD_L2TP_VLAN_MAPPING || + msg_type == DEL_L2TP_VLAN_MAPPING) { + mapping_info = kzalloc(sizeof(struct + ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL); + if (!mapping_info) + return -ENOMEM; + + if (copy_from_user((u8 *)mapping_info, + (void __user *)usr_param, + sizeof(struct ipa_ioc_l2tp_vlan_mapping_info))) { + kfree(mapping_info); + return -EFAULT; + } + + memset(&msg_meta, 0, sizeof(msg_meta)); + msg_meta.msg_type = msg_type; + msg_meta.msg_len = sizeof(struct + ipa_ioc_l2tp_vlan_mapping_info); + retval = ipa3_send_msg(&msg_meta, mapping_info, + ipa3_vlan_l2tp_msg_free_cb); + if (retval) { + IPAERR("ipa3_send_msg failed: %d\n", retval); + kfree(mapping_info); + return retval; + } + } else { + IPAERR("Unexpected event\n"); + return -EFAULT; + } + + return 0; +} + +static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int retval = 0; + u32 pyld_sz; + u8 header[128] = { 0 }; + u8 *param = NULL; + struct ipa_ioc_nat_alloc_mem nat_mem; + struct ipa_ioc_v4_nat_init nat_init; + struct ipa_ioc_v4_nat_del nat_del; + struct ipa_ioc_nat_pdn_entry mdfy_pdn; + struct ipa_ioc_rm_dependency rm_depend; + size_t sz; + int pre_entry; + + IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd)); + + if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC) + return -ENOTTY; + if (_IOC_NR(cmd) >= IPA_IOCTL_MAX) + return -ENOTTY; + + if (!ipa3_is_ready()) { + IPAERR("IPA not ready, waiting for init completion\n"); + wait_for_completion(&ipa3_ctx->init_completion_obj); + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + switch (cmd) { + case IPA_IOC_ALLOC_NAT_MEM: + if (copy_from_user((u8 *)&nat_mem, (u8 *)arg, + sizeof(struct ipa_ioc_nat_alloc_mem))) { + retval = -EFAULT; + break; + } + /* null terminate the string */ + nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0'; + + if (ipa3_allocate_nat_device(&nat_mem)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, (u8 *)&nat_mem, + sizeof(struct ipa_ioc_nat_alloc_mem))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_V4_INIT_NAT: + if (copy_from_user((u8 *)&nat_init, (u8 *)arg, + sizeof(struct ipa_ioc_v4_nat_init))) { + retval = -EFAULT; + break; + } + if (ipa3_nat_init_cmd(&nat_init)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_NAT_DMA: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_nat_dma_cmd))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_nat_dma_cmd *)header)->entries; + pyld_sz = + sizeof(struct ipa_ioc_nat_dma_cmd) + + pre_entry * sizeof(struct ipa_ioc_nat_dma_one); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_nat_dma_cmd *)param)->entries, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_V4_DEL_NAT: + if (copy_from_user((u8 *)&nat_del, (u8 *)arg, + sizeof(struct ipa_ioc_v4_nat_del))) { + retval = -EFAULT; + break; + } + if (ipa3_nat_del_cmd(&nat_del)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_NAT_MODIFY_PDN: + if (copy_from_user((u8 *)&mdfy_pdn, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_pdn_entry))) { + retval = -EFAULT; + break; + } + if (ipa3_nat_mdfy_pdn(&mdfy_pdn)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_HDR: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_add_hdr))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_hdr *)header)->num_hdrs; + pyld_sz = + sizeof(struct ipa_ioc_add_hdr) + + pre_entry * sizeof(struct ipa_hdr_add); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_hdr *)param)->num_hdrs, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_HDR: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_del_hdr))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_hdr *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_hdr) + + pre_entry * sizeof(struct ipa_hdr_del); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_hdr *)param)->num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_del_hdr_by_user((struct ipa_ioc_del_hdr *)param, + true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_RT_RULE: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_add_rt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_rt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_add_rt_rule) + + pre_entry * sizeof(struct ipa_rt_rule_add); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_rt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_ADD_RT_RULE_AFTER: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_add_rt_rule_after))) { + + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_add_rt_rule_after) + + pre_entry * sizeof(struct ipa_rt_rule_add); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)-> + num_rules != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_rt_rule_after *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_add_rt_rule_after( + (struct ipa_ioc_add_rt_rule_after *)param)) { + + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_MDFY_RT_RULE: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_mdfy_rt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_mdfy_rt_rule) + + pre_entry * sizeof(struct ipa_rt_rule_mdfy); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_mdfy_rt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_RT_RULE: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_del_rt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_rt_rule *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_rt_rule) + + pre_entry * sizeof(struct ipa_rt_rule_del); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_rt_rule *)param)->num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_FLT_RULE: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_add_flt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_flt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_add_flt_rule) + + pre_entry * sizeof(struct ipa_flt_rule_add); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_flt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_FLT_RULE_AFTER: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_add_flt_rule_after))) { + + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_flt_rule_after *)header)-> + num_rules; + pyld_sz = + sizeof(struct ipa_ioc_add_flt_rule_after) + + pre_entry * sizeof(struct ipa_flt_rule_add); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)-> + num_rules != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_flt_rule_after *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_add_flt_rule_after( + (struct ipa_ioc_add_flt_rule_after *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_FLT_RULE: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_del_flt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_flt_rule *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_flt_rule) + + pre_entry * sizeof(struct ipa_flt_rule_del); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_flt_rule *)param)-> + num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_MDFY_FLT_RULE: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_mdfy_flt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_mdfy_flt_rule) + + pre_entry * sizeof(struct ipa_flt_rule_mdfy); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_mdfy_flt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_COMMIT_HDR: + retval = ipa3_commit_hdr(); + break; + case IPA_IOC_RESET_HDR: + retval = ipa3_reset_hdr(); + break; + case IPA_IOC_COMMIT_RT: + retval = ipa3_commit_rt(arg); + break; + case IPA_IOC_RESET_RT: + retval = ipa3_reset_rt(arg); + break; + case IPA_IOC_COMMIT_FLT: + retval = ipa3_commit_flt(arg); + break; + case IPA_IOC_RESET_FLT: + retval = ipa3_reset_flt(arg); + break; + case IPA_IOC_GET_RT_TBL: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_get_rt_tbl))) { + retval = -EFAULT; + break; + } + if (ipa3_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, header, + sizeof(struct ipa_ioc_get_rt_tbl))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_PUT_RT_TBL: + retval = ipa3_put_rt_tbl(arg); + break; + case IPA_IOC_GET_HDR: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_get_hdr))) { + retval = -EFAULT; + break; + } + if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, header, + sizeof(struct ipa_ioc_get_hdr))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_PUT_HDR: + retval = ipa3_put_hdr(arg); + break; + case IPA_IOC_SET_FLT: + retval = ipa3_cfg_filter(arg); + break; + case IPA_IOC_COPY_HDR: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_copy_hdr))) { + retval = -EFAULT; + break; + } + if (ipa3_copy_hdr((struct ipa_ioc_copy_hdr *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, header, + sizeof(struct ipa_ioc_copy_hdr))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_query_intf))) { + retval = -EFAULT; + break; + } + if (ipa3_query_intf((struct ipa_ioc_query_intf *)header)) { + retval = -1; + break; + } + if (copy_to_user((u8 *)arg, header, + sizeof(struct ipa_ioc_query_intf))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF_TX_PROPS: + sz = sizeof(struct ipa_ioc_query_intf_tx_props); + if (copy_from_user(header, (u8 *)arg, sz)) { + retval = -EFAULT; + break; + } + + if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props + > IPA_NUM_PROPS_MAX) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_query_intf_tx_props *) + header)->num_tx_props; + pyld_sz = sz + pre_entry * + sizeof(struct ipa_ioc_tx_intf_prop); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_query_intf_tx_props *) + param)->num_tx_props + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_query_intf_tx_props *) + param)->num_tx_props, pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_query_intf_tx_props( + (struct ipa_ioc_query_intf_tx_props *)param)) { + retval = -1; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF_RX_PROPS: + sz = sizeof(struct ipa_ioc_query_intf_rx_props); + if (copy_from_user(header, (u8 *)arg, sz)) { + retval = -EFAULT; + break; + } + + if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props + > IPA_NUM_PROPS_MAX) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_query_intf_rx_props *) + header)->num_rx_props; + pyld_sz = sz + pre_entry * + sizeof(struct ipa_ioc_rx_intf_prop); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_query_intf_rx_props *) + param)->num_rx_props != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_query_intf_rx_props *) + param)->num_rx_props, pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_query_intf_rx_props( + (struct ipa_ioc_query_intf_rx_props *)param)) { + retval = -1; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF_EXT_PROPS: + sz = sizeof(struct ipa_ioc_query_intf_ext_props); + if (copy_from_user(header, (u8 *)arg, sz)) { + retval = -EFAULT; + break; + } + + if (((struct ipa_ioc_query_intf_ext_props *) + header)->num_ext_props > IPA_NUM_PROPS_MAX) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_query_intf_ext_props *) + header)->num_ext_props; + pyld_sz = sz + pre_entry * + sizeof(struct ipa_ioc_ext_intf_prop); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_query_intf_ext_props *) + param)->num_ext_props != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_query_intf_ext_props *) + param)->num_ext_props, pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_query_intf_ext_props( + (struct ipa_ioc_query_intf_ext_props *)param)) { + retval = -1; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_PULL_MSG: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_msg_meta))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_msg_meta *)header)->msg_len; + pyld_sz = sizeof(struct ipa_msg_meta) + + pre_entry; + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_msg_meta *)param)->msg_len + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_msg_meta *)param)->msg_len, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_pull_msg((struct ipa_msg_meta *)param, + (char *)param + sizeof(struct ipa_msg_meta), + ((struct ipa_msg_meta *)param)->msg_len) != + ((struct ipa_msg_meta *)param)->msg_len) { + retval = -1; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_RM_ADD_DEPENDENCY: + if (copy_from_user((u8 *)&rm_depend, (u8 *)arg, + sizeof(struct ipa_ioc_rm_dependency))) { + retval = -EFAULT; + break; + } + retval = ipa_rm_add_dependency_from_ioctl( + rm_depend.resource_name, rm_depend.depends_on_name); + break; + case IPA_IOC_RM_DEL_DEPENDENCY: + if (copy_from_user((u8 *)&rm_depend, (u8 *)arg, + sizeof(struct ipa_ioc_rm_dependency))) { + retval = -EFAULT; + break; + } + retval = ipa_rm_delete_dependency_from_ioctl( + rm_depend.resource_name, rm_depend.depends_on_name); + break; + case IPA_IOC_GENERATE_FLT_EQ: + { + struct ipa_ioc_generate_flt_eq flt_eq; + + if (copy_from_user(&flt_eq, (u8 *)arg, + sizeof(struct ipa_ioc_generate_flt_eq))) { + retval = -EFAULT; + break; + } + if (ipahal_flt_generate_equation(flt_eq.ip, + &flt_eq.attrib, &flt_eq.eq_attrib)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, &flt_eq, + sizeof(struct ipa_ioc_generate_flt_eq))) { + retval = -EFAULT; + break; + } + break; + } + case IPA_IOC_QUERY_EP_MAPPING: + { + retval = ipa3_get_ep_mapping(arg); + break; + } + case IPA_IOC_QUERY_RT_TBL_INDEX: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_get_rt_tbl_indx))) { + retval = -EFAULT; + break; + } + if (ipa3_query_rt_index( + (struct ipa_ioc_get_rt_tbl_indx *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, header, + sizeof(struct ipa_ioc_get_rt_tbl_indx))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_WRITE_QMAPID: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_write_qmapid))) { + retval = -EFAULT; + break; + } + if (ipa3_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, header, + sizeof(struct ipa_ioc_write_qmapid))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD: + retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD); + if (retval) { + IPAERR("ipa3_send_wan_msg failed: %d\n", retval); + break; + } + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL: + retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL); + if (retval) { + IPAERR("ipa3_send_wan_msg failed: %d\n", retval); + break; + } + break; + case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED: + retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT); + if (retval) { + IPAERR("ipa3_send_wan_msg failed: %d\n", retval); + break; + } + break; + case IPA_IOC_ADD_HDR_PROC_CTX: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_add_hdr_proc_ctx))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_hdr_proc_ctx *) + header)->num_proc_ctxs; + pyld_sz = + sizeof(struct ipa_ioc_add_hdr_proc_ctx) + + pre_entry * sizeof(struct ipa_hdr_proc_ctx_add); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *) + param)->num_proc_ctxs != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_hdr_proc_ctx *) + param)->num_proc_ctxs, pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_add_hdr_proc_ctx( + (struct ipa_ioc_add_hdr_proc_ctx *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_DEL_HDR_PROC_CTX: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_del_hdr_proc_ctx))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_hdr_proc_ctx) + + pre_entry * sizeof(struct ipa_hdr_proc_ctx_del); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *) + param)->num_hdls != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_hdr_proc_ctx *)param)-> + num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa3_del_hdr_proc_ctx_by_user( + (struct ipa_ioc_del_hdr_proc_ctx *)param, true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_GET_HW_VERSION: + pyld_sz = sizeof(enum ipa_hw_type); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + memcpy(param, &ipa3_ctx->ipa_hw_type, pyld_sz); + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_VLAN_IFACE: + if (ipa3_send_vlan_l2tp_msg(arg, ADD_VLAN_IFACE)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_VLAN_IFACE: + if (ipa3_send_vlan_l2tp_msg(arg, DEL_VLAN_IFACE)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_L2TP_VLAN_MAPPING: + if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_L2TP_VLAN_MAPPING: + if (ipa3_send_vlan_l2tp_msg(arg, DEL_L2TP_VLAN_MAPPING)) { + retval = -EFAULT; + break; + } + break; + + default: /* redundant, as cmd was checked against MAXNR */ + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return -ENOTTY; + } + kfree(param); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return retval; +} + +/** + * ipa3_setup_dflt_rt_tables() - Setup default routing tables + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate memory + * -EPERM: failed to add the tables + */ +int ipa3_setup_dflt_rt_tables(void) +{ + struct ipa_ioc_add_rt_rule *rt_rule; + struct ipa_rt_rule_add *rt_rule_entry; + + rt_rule = + kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 * + sizeof(struct ipa_rt_rule_add), GFP_KERNEL); + if (!rt_rule) + return -ENOMEM; + + /* setup a default v4 route to point to Apps */ + rt_rule->num_rules = 1; + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v4; + strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME, + IPA_RESOURCE_NAME_MAX); + + rt_rule_entry = &rt_rule->rules[0]; + rt_rule_entry->at_rear = 1; + rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS; + rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl; + rt_rule_entry->rule.retain_hdr = 1; + + if (ipa3_add_rt_rule(rt_rule)) { + IPAERR("fail to add dflt v4 rule\n"); + kfree(rt_rule); + return -EPERM; + } + IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + ipa3_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl; + + /* setup a default v6 route to point to A5 */ + rt_rule->ip = IPA_IP_v6; + if (ipa3_add_rt_rule(rt_rule)) { + IPAERR("fail to add dflt v6 rule\n"); + kfree(rt_rule); + return -EPERM; + } + IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + ipa3_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl; + + /* + * because these tables are the very first to be added, they will both + * have the same index (0) which is essential for programming the + * "route" end-point config + */ + + kfree(rt_rule); + + return 0; +} + +static int ipa3_setup_exception_path(void) +{ + struct ipa_ioc_add_hdr *hdr; + struct ipa_hdr_add *hdr_entry; + struct ipahal_reg_route route = { 0 }; + int ret; + + /* install the basic exception header */ + hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 * + sizeof(struct ipa_hdr_add), GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + hdr->num_hdrs = 1; + hdr->commit = 1; + hdr_entry = &hdr->hdr[0]; + + strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX); + hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH; + + if (ipa3_add_hdr(hdr)) { + IPAERR("fail to add exception hdr\n"); + ret = -EPERM; + goto bail; + } + + if (hdr_entry->status) { + IPAERR("fail to add exception hdr\n"); + ret = -EPERM; + goto bail; + } + + ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl; + + /* set the route register to pass exception packets to Apps */ + route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + route.route_frag_def_pipe = ipa3_get_ep_mapping( + IPA_CLIENT_APPS_LAN_CONS); + route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl; + route.route_def_retain_hdr = 1; + + if (ipa3_cfg_route(&route)) { + IPAERR("fail to add exception hdr\n"); + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + kfree(hdr); + return ret; +} + +static int ipa3_init_smem_region(int memory_region_size, + int memory_region_offset) +{ + struct ipahal_imm_cmd_dma_shared_mem cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipa3_desc desc; + struct ipa_mem_buffer mem; + int rc; + + if (memory_region_size == 0) + return 0; + + memset(&desc, 0, sizeof(desc)); + memset(&cmd, 0, sizeof(cmd)); + memset(&mem, 0, sizeof(mem)); + + mem.size = memory_region_size; + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, + &mem.phys_base, GFP_KERNEL); + if (!mem.base) { + IPAERR("failed to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + memset(mem.base, 0, mem.size); + cmd.is_read = false; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + cmd.size = mem.size; + cmd.system_addr = mem.phys_base; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + memory_region_offset; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + return -ENOMEM; + } + desc.opcode = cmd_pyld->opcode; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + desc.type = IPA_IMM_CMD_DESC; + + rc = ipa3_send_cmd(1, &desc); + if (rc) { + IPAERR("failed to send immediate command (error %d)\n", rc); + rc = -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, + mem.phys_base); + + return rc; +} + +/** + * ipa3_init_q6_smem() - Initialize Q6 general memory and + * header memory regions in IPA. + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate dma memory + * -EFAULT: failed to send IPA command to initialize the memory + */ +int ipa3_init_q6_smem(void) +{ + int rc; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size), + IPA_MEM_PART(modem_ofst)); + if (rc) { + IPAERR("failed to initialize Modem RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + + rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_size), + IPA_MEM_PART(modem_hdr_ofst)); + if (rc) { + IPAERR("failed to initialize Modem HDRs RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + + rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size), + IPA_MEM_PART(modem_hdr_proc_ctx_ofst)); + if (rc) { + IPAERR("failed to initialize Modem proc ctx RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + + rc = ipa3_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size), + IPA_MEM_PART(modem_comp_decomp_ofst)); + if (rc) { + IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return rc; +} + +static void ipa3_destroy_imm(void *user1, int user2) +{ + ipahal_destroy_imm_cmd(user1); +} + +static void ipa3_q6_pipe_delay(bool delay) +{ + int client_idx; + int ep_idx; + struct ipa_ep_cfg_ctrl ep_ctrl; + + memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_ctrl.ipa_ep_delay = delay; + + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + if (IPA_CLIENT_IS_Q6_PROD(client_idx)) { + ep_idx = ipa3_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, + ep_idx, &ep_ctrl); + } + } +} + +static void ipa3_q6_avoid_holb(void) +{ + int ep_idx; + int client_idx; + struct ipa_ep_cfg_ctrl ep_suspend; + struct ipa_ep_cfg_holb ep_holb; + + memset(&ep_suspend, 0, sizeof(ep_suspend)); + memset(&ep_holb, 0, sizeof(ep_holb)); + + ep_suspend.ipa_ep_suspend = true; + ep_holb.tmr_val = 0; + ep_holb.en = 1; + + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + if (IPA_CLIENT_IS_Q6_CONS(client_idx)) { + ep_idx = ipa3_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + /* + * ipa3_cfg_ep_holb is not used here because we are + * setting HOLB on Q6 pipes, and from APPS perspective + * they are not valid, therefore, the above function + * will fail. + */ + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, + ep_idx, &ep_holb); + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_HOL_BLOCK_EN_n, + ep_idx, &ep_holb); + + /* from IPA 4.0 pipe suspend is not supported */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_CTRL_n, + ep_idx, &ep_suspend); + } + } +} + +static void ipa3_halt_q6_cons_gsi_channels(void) +{ + int ep_idx; + int client_idx; + const struct ipa_gsi_ep_config *gsi_ep_cfg; + int ret; + int code = 0; + + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + if (IPA_CLIENT_IS_Q6_CONS(client_idx)) { + ep_idx = ipa3_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + gsi_ep_cfg = ipa3_get_gsi_ep_info(client_idx); + if (!gsi_ep_cfg) { + IPAERR("failed to get GSI config\n"); + ipa_assert(); + return; + } + + ret = gsi_halt_channel_ee( + gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee, + &code); + if (ret == GSI_STATUS_SUCCESS) + IPADBG("halted gsi ch %d ee %d with code %d\n", + gsi_ep_cfg->ipa_gsi_chan_num, + gsi_ep_cfg->ee, + code); + else + IPAERR("failed to halt ch %d ee %d code %d\n", + gsi_ep_cfg->ipa_gsi_chan_num, + gsi_ep_cfg->ee, + code); + } + } +} + + +static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip, + enum ipa_rule_type rlt) +{ + struct ipa3_desc *desc; + struct ipahal_imm_cmd_dma_shared_mem cmd = {0}; + struct ipahal_imm_cmd_pyld **cmd_pyld; + int retval = 0; + int pipe_idx; + int flt_idx = 0; + int num_cmds = 0; + int index; + u32 lcl_addr_mem_part; + u32 lcl_hdr_sz; + struct ipa_mem_buffer mem; + + IPADBG("Entry\n"); + + if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) { + IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt); + return -EINVAL; + } + + /* Up to filtering pipes we have filtering tables */ + desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc), + GFP_KERNEL); + if (!desc) + return -ENOMEM; + + cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num, + sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL); + if (!cmd_pyld) { + retval = -ENOMEM; + goto free_desc; + } + + if (ip == IPA_IP_v4) { + if (rlt == IPA_RULE_HASHABLE) { + lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size); + } else { + lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size); + } + } else { + if (rlt == IPA_RULE_HASHABLE) { + lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size); + } else { + lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size); + } + } + + retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz, + 0, &mem, true); + if (retval) { + IPAERR("failed to generate flt single tbl empty img\n"); + goto free_cmd_pyld; + } + + for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) { + if (!ipa_is_ep_support_flt(pipe_idx)) + continue; + + /* + * Iterating over all the filtering pipes which are either + * invalid but connected or connected but not configured by AP. + */ + if (!ipa3_ctx->ep[pipe_idx].valid || + ipa3_ctx->ep[pipe_idx].skip_ep_cfg) { + + cmd.is_read = false; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + cmd.size = mem.size; + cmd.system_addr = mem.phys_base; + cmd.local_addr = + ipa3_ctx->smem_restricted_bytes + + lcl_addr_mem_part + + ipahal_get_hw_tbl_hdr_width() + + flt_idx * ipahal_get_hw_tbl_hdr_width(); + cmd_pyld[num_cmds] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld[num_cmds]) { + IPAERR("fail construct dma_shared_mem cmd\n"); + retval = -ENOMEM; + goto free_empty_img; + } + desc[num_cmds].opcode = cmd_pyld[num_cmds]->opcode; + desc[num_cmds].pyld = cmd_pyld[num_cmds]->data; + desc[num_cmds].len = cmd_pyld[num_cmds]->len; + desc[num_cmds].type = IPA_IMM_CMD_DESC; + num_cmds++; + } + + flt_idx++; + } + + IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds); + retval = ipa3_send_cmd(num_cmds, desc); + if (retval) { + IPAERR("failed to send immediate command (err %d)\n", retval); + retval = -EFAULT; + } + +free_empty_img: + ipahal_free_dma_mem(&mem); +free_cmd_pyld: + for (index = 0; index < num_cmds; index++) + ipahal_destroy_imm_cmd(cmd_pyld[index]); + kfree(cmd_pyld); +free_desc: + kfree(desc); + return retval; +} + +static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip, + enum ipa_rule_type rlt) +{ + struct ipa3_desc *desc; + struct ipahal_imm_cmd_dma_shared_mem cmd = {0}; + struct ipahal_imm_cmd_pyld *cmd_pyld = NULL; + int retval = 0; + u32 modem_rt_index_lo; + u32 modem_rt_index_hi; + u32 lcl_addr_mem_part; + u32 lcl_hdr_sz; + struct ipa_mem_buffer mem; + + IPADBG("Entry\n"); + + if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) { + IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt); + return -EINVAL; + } + + if (ip == IPA_IP_v4) { + modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo); + modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi); + if (rlt == IPA_RULE_HASHABLE) { + lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size); + } else { + lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size); + } + } else { + modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo); + modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi); + if (rlt == IPA_RULE_HASHABLE) { + lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size); + } else { + lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst); + lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size); + } + } + + retval = ipahal_rt_generate_empty_img( + modem_rt_index_hi - modem_rt_index_lo + 1, + lcl_hdr_sz, lcl_hdr_sz, &mem, true); + if (retval) { + IPAERR("fail generate empty rt img\n"); + return -ENOMEM; + } + + desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL); + if (!desc) { + IPAERR("failed to allocate memory\n"); + goto free_empty_img; + } + + cmd.is_read = false; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + cmd.size = mem.size; + cmd.system_addr = mem.phys_base; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + lcl_addr_mem_part + + modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width(); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + retval = -ENOMEM; + goto free_desc; + } + desc->opcode = cmd_pyld->opcode; + desc->pyld = cmd_pyld->data; + desc->len = cmd_pyld->len; + desc->type = IPA_IMM_CMD_DESC; + + IPADBG("Sending 1 descriptor for rt tbl clearing\n"); + retval = ipa3_send_cmd(1, desc); + if (retval) { + IPAERR("failed to send immediate command (err %d)\n", retval); + retval = -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); +free_desc: + kfree(desc); +free_empty_img: + ipahal_free_dma_mem(&mem); + return retval; +} + +static int ipa3_q6_clean_q6_tables(void) +{ + struct ipa3_desc *desc; + struct ipahal_imm_cmd_pyld *cmd_pyld = NULL; + struct ipahal_imm_cmd_register_write reg_write_cmd = {0}; + int retval; + struct ipahal_reg_fltrt_hash_flush flush; + struct ipahal_reg_valmask valmask; + + IPADBG("Entry\n"); + + + if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) { + IPAERR("failed to clean q6 flt tbls (v4/hashable)\n"); + return -EFAULT; + } + if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) { + IPAERR("failed to clean q6 flt tbls (v6/hashable)\n"); + return -EFAULT; + } + if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) { + IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n"); + return -EFAULT; + } + if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) { + IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n"); + return -EFAULT; + } + + if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) { + IPAERR("failed to clean q6 rt tbls (v4/hashable)\n"); + return -EFAULT; + } + if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) { + IPAERR("failed to clean q6 rt tbls (v6/hashable)\n"); + return -EFAULT; + } + if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) { + IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n"); + return -EFAULT; + } + if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) { + IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n"); + return -EFAULT; + } + + /* Flush rules cache */ + desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + flush.v4_flt = true; + flush.v4_rt = true; + flush.v6_flt = true; + flush.v6_rt = true; + ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask); + reg_write_cmd.skip_pipeline_clear = false; + reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH); + reg_write_cmd.value = valmask.val; + reg_write_cmd.value_mask = valmask.mask; + cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + ®_write_cmd, false); + if (!cmd_pyld) { + IPAERR("fail construct register_write imm cmd\n"); + retval = -EFAULT; + goto bail_desc; + } + desc->opcode = cmd_pyld->opcode; + desc->pyld = cmd_pyld->data; + desc->len = cmd_pyld->len; + desc->type = IPA_IMM_CMD_DESC; + + IPADBG("Sending 1 descriptor for tbls flush\n"); + retval = ipa3_send_cmd(1, desc); + if (retval) { + IPAERR("failed to send immediate command (err %d)\n", retval); + retval = -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); + +bail_desc: + kfree(desc); + IPADBG("Done - retval = %d\n", retval); + return retval; +} + +static int ipa3_q6_set_ex_path_to_apps(void) +{ + int ep_idx; + int client_idx; + struct ipa3_desc *desc; + int num_descs = 0; + int index; + struct ipahal_imm_cmd_register_write reg_write; + struct ipahal_imm_cmd_pyld *cmd_pyld; + int retval; + struct ipahal_reg_valmask valmask; + + desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc), + GFP_KERNEL); + if (!desc) + return -ENOMEM; + + /* Set the exception path to AP */ + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + ep_idx = ipa3_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + if (ipa3_ctx->ep[ep_idx].valid && + ipa3_ctx->ep[ep_idx].skip_ep_cfg) { + if (num_descs >= ipa3_ctx->ipa_num_pipes) { + WARN_ON(1); + return -EFAULT; + } + reg_write.skip_pipeline_clear = false; + reg_write.pipeline_clear_options = + IPAHAL_HPS_CLEAR; + reg_write.offset = + ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n, + ep_idx); + ipahal_get_status_ep_valmask( + ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS), + &valmask); + reg_write.value = valmask.val; + reg_write.value_mask = valmask.mask; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, ®_write, false); + if (WARN(!cmd_pyld, "fail to create IMM")) + return -ENOMEM; + + desc[num_descs].opcode = cmd_pyld->opcode; + desc[num_descs].type = IPA_IMM_CMD_DESC; + desc[num_descs].callback = ipa3_destroy_imm; + desc[num_descs].user1 = cmd_pyld; + desc[num_descs].pyld = cmd_pyld->data; + desc[num_descs].len = cmd_pyld->len; + num_descs++; + } + + /* disable statuses for modem producers */ + if (IPA_CLIENT_IS_Q6_PROD(client_idx)) { + ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes); + + reg_write.skip_pipeline_clear = false; + reg_write.pipeline_clear_options = + IPAHAL_HPS_CLEAR; + reg_write.offset = + ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n, + ep_idx); + reg_write.value = 0; + reg_write.value_mask = ~0; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, ®_write, false); + if (!cmd_pyld) { + IPAERR("fail construct register_write cmd\n"); + ipa_assert(); + return -ENOMEM; + } + + desc[num_descs].opcode = cmd_pyld->opcode; + desc[num_descs].type = IPA_IMM_CMD_DESC; + desc[num_descs].callback = ipa3_destroy_imm; + desc[num_descs].user1 = cmd_pyld; + desc[num_descs].pyld = cmd_pyld->data; + desc[num_descs].len = cmd_pyld->len; + num_descs++; + } + } + + /* Will wait 500msecs for IPA tag process completion */ + retval = ipa3_tag_process(desc, num_descs, + msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT)); + if (retval) { + IPAERR("TAG process failed! (error %d)\n", retval); + /* For timeout error ipa3_destroy_imm cb will destroy user1 */ + if (retval != -ETIME) { + for (index = 0; index < num_descs; index++) + if (desc[index].callback) + desc[index].callback(desc[index].user1, + desc[index].user2); + retval = -EINVAL; + } + } + + kfree(desc); + + return retval; +} + +/** + * ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration + * in IPA HW. This is performed in case of SSR. + * + * This is a mandatory procedure, in case one of the steps fails, the + * AP needs to restart. + */ +void ipa3_q6_pre_shutdown_cleanup(void) +{ + IPADBG_LOW("ENTER\n"); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + ipa3_q6_pipe_delay(true); + ipa3_q6_avoid_holb(); + if (ipa3_q6_clean_q6_tables()) { + IPAERR("Failed to clean Q6 tables\n"); + /* + * Indicates IPA hardware is stalled, unexpected + * hardware state. + */ + BUG(); + } + if (ipa3_q6_set_ex_path_to_apps()) { + IPAERR("Failed to redirect exceptions to APPS\n"); + /* + * Indicates IPA hardware is stalled, unexpected + * hardware state. + */ + BUG(); + } + /* Remove delay from Q6 PRODs to avoid pending descriptors + * on pipe reset procedure + */ + ipa3_q6_pipe_delay(false); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPADBG_LOW("Exit with success\n"); +} + +/* + * ipa3_q6_post_shutdown_cleanup() - As part of this cleanup + * check if GSI channel related to Q6 producer client is empty. + * + * Q6 GSI channel emptiness is needed to garantee no descriptors with invalid + * info are injected into IPA RX from IPA_IF, while modem is restarting. + */ +void ipa3_q6_post_shutdown_cleanup(void) +{ + int client_idx; + int ep_idx; + + IPADBG_LOW("ENTER\n"); + + if (!ipa3_ctx->uc_ctx.uc_loaded) { + IPAERR("uC is not loaded. Skipping\n"); + return; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + /* Handle the issue where SUSPEND was removed for some reason */ + ipa3_q6_avoid_holb(); + ipa3_halt_q6_cons_gsi_channels(); + + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) + if (IPA_CLIENT_IS_Q6_PROD(client_idx)) { + ep_idx = ipa3_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + if (ipa3_uc_is_gsi_channel_empty(client_idx)) { + IPAERR("fail to validate Q6 ch emptiness %d\n", + client_idx); + /* + * Indicates GSI hardware is stalled, unexpected + * hardware state. + */ + BUG(); + } + } + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPADBG_LOW("Exit with success\n"); +} + +static inline void ipa3_sram_set_canary(u32 *sram_mmio, int offset) +{ + /* Set 4 bytes of CANARY before the offset */ + sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL; +} + +/** + * _ipa_init_sram_v3() - Initialize IPA local SRAM. + * + * Return codes: 0 for success, negative value for failure + */ +int _ipa_init_sram_v3(void) +{ + u32 *ipa_sram_mmio; + unsigned long phys_addr; + + phys_addr = ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, + ipa3_ctx->smem_restricted_bytes / 4); + + ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz); + if (!ipa_sram_mmio) { + IPAERR("fail to ioremap IPA SRAM\n"); + return -ENOMEM; + } + + /* Consult with ipa_i.h on the location of the CANARY values */ + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(v4_flt_nhash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_nhash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(v6_flt_nhash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_nhash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(modem_hdr_proc_ctx_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, + (ipa_get_hw_type() >= IPA_HW_v3_5) ? + IPA_MEM_PART(uc_event_ring_ofst) : + IPA_MEM_PART(end_ofst)); + + iounmap(ipa_sram_mmio); + + return 0; +} + +/** + * _ipa_init_hdr_v3_0() - Initialize IPA header block. + * + * Return codes: 0 for success, negative value for failure + */ +int _ipa_init_hdr_v3_0(void) +{ + struct ipa3_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_hdr_init_local cmd = {0}; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 }; + + mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size); + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + memset(mem.base, 0, mem.size); + + cmd.hdr_table_addr = mem.phys_base; + cmd.size_hdr_table = mem.size; + cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(modem_hdr_ofst); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false); + if (!cmd_pyld) { + IPAERR("fail to construct hdr_init_local imm cmd\n"); + dma_free_coherent(ipa3_ctx->pdev, + mem.size, mem.base, + mem.phys_base); + return -EFAULT; + } + desc.opcode = cmd_pyld->opcode; + desc.type = IPA_IMM_CMD_DESC; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa3_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + ipahal_destroy_imm_cmd(cmd_pyld); + dma_free_coherent(ipa3_ctx->pdev, + mem.size, mem.base, + mem.phys_base); + return -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + + mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) + + IPA_MEM_PART(apps_hdr_proc_ctx_size); + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + memset(mem.base, 0, mem.size); + memset(&desc, 0, sizeof(desc)); + + dma_cmd.is_read = false; + dma_cmd.skip_pipeline_clear = false; + dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + dma_cmd.system_addr = mem.phys_base; + dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(modem_hdr_proc_ctx_ofst); + dma_cmd.size = mem.size; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false); + if (!cmd_pyld) { + IPAERR("fail to construct dma_shared_mem imm\n"); + dma_free_coherent(ipa3_ctx->pdev, + mem.size, mem.base, + mem.phys_base); + return -ENOMEM; + } + desc.opcode = cmd_pyld->opcode; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa3_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + ipahal_destroy_imm_cmd(cmd_pyld); + dma_free_coherent(ipa3_ctx->pdev, + mem.size, + mem.base, + mem.phys_base); + return -EBUSY; + } + ipahal_destroy_imm_cmd(cmd_pyld); + + ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr); + + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + + return 0; +} + +/** + * _ipa_init_rt4_v3() - Initialize IPA routing block for IPv4. + * + * Return codes: 0 for success, negative value for failure + */ +int _ipa_init_rt4_v3(void) +{ + struct ipa3_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld; + int i; + int rc = 0; + + for (i = IPA_MEM_PART(v4_modem_rt_index_lo); + i <= IPA_MEM_PART(v4_modem_rt_index_hi); + i++) + ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i); + IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]); + + rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index), + IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size), + &mem, false); + if (rc) { + IPAERR("fail generate empty v4 rt img\n"); + return rc; + } + + v4_cmd.hash_rules_addr = mem.phys_base; + v4_cmd.hash_rules_size = mem.size; + v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_hash_ofst); + v4_cmd.nhash_rules_addr = mem.phys_base; + v4_cmd.nhash_rules_size = mem.size; + v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_nhash_ofst); + IPADBG("putting hashable routing IPv4 rules to phys 0x%x\n", + v4_cmd.hash_local_addr); + IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n", + v4_cmd.nhash_local_addr); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false); + if (!cmd_pyld) { + IPAERR("fail construct ip_v4_rt_init imm cmd\n"); + rc = -EPERM; + goto free_mem; + } + + desc.opcode = cmd_pyld->opcode; + desc.type = IPA_IMM_CMD_DESC; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa3_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); + +free_mem: + ipahal_free_dma_mem(&mem); + return rc; +} + +/** + * _ipa_init_rt6_v3() - Initialize IPA routing block for IPv6. + * + * Return codes: 0 for success, negative value for failure + */ +int _ipa_init_rt6_v3(void) +{ + struct ipa3_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld; + int i; + int rc = 0; + + for (i = IPA_MEM_PART(v6_modem_rt_index_lo); + i <= IPA_MEM_PART(v6_modem_rt_index_hi); + i++) + ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i); + IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]); + + rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index), + IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size), + &mem, false); + if (rc) { + IPAERR("fail generate empty v6 rt img\n"); + return rc; + } + + v6_cmd.hash_rules_addr = mem.phys_base; + v6_cmd.hash_rules_size = mem.size; + v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_hash_ofst); + v6_cmd.nhash_rules_addr = mem.phys_base; + v6_cmd.nhash_rules_size = mem.size; + v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_nhash_ofst); + IPADBG("putting hashable routing IPv6 rules to phys 0x%x\n", + v6_cmd.hash_local_addr); + IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n", + v6_cmd.nhash_local_addr); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false); + if (!cmd_pyld) { + IPAERR("fail construct ip_v6_rt_init imm cmd\n"); + rc = -EPERM; + goto free_mem; + } + + desc.opcode = cmd_pyld->opcode; + desc.type = IPA_IMM_CMD_DESC; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa3_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); + +free_mem: + ipahal_free_dma_mem(&mem); + return rc; +} + +/** + * _ipa_init_flt4_v3() - Initialize IPA filtering block for IPv4. + * + * Return codes: 0 for success, negative value for failure + */ +int _ipa_init_flt4_v3(void) +{ + struct ipa3_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld; + int rc; + + rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num, + IPA_MEM_PART(v4_flt_hash_size), + IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap, + &mem, false); + if (rc) { + IPAERR("fail generate empty v4 flt img\n"); + return rc; + } + + v4_cmd.hash_rules_addr = mem.phys_base; + v4_cmd.hash_rules_size = mem.size; + v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_hash_ofst); + v4_cmd.nhash_rules_addr = mem.phys_base; + v4_cmd.nhash_rules_size = mem.size; + v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_nhash_ofst); + IPADBG("putting hashable filtering IPv4 rules to phys 0x%x\n", + v4_cmd.hash_local_addr); + IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n", + v4_cmd.nhash_local_addr); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false); + if (!cmd_pyld) { + IPAERR("fail construct ip_v4_flt_init imm cmd\n"); + rc = -EPERM; + goto free_mem; + } + + desc.opcode = cmd_pyld->opcode; + desc.type = IPA_IMM_CMD_DESC; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa3_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); + +free_mem: + ipahal_free_dma_mem(&mem); + return rc; +} + +/** + * _ipa_init_flt6_v3() - Initialize IPA filtering block for IPv6. + * + * Return codes: 0 for success, negative value for failure + */ +int _ipa_init_flt6_v3(void) +{ + struct ipa3_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld; + int rc; + + rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num, + IPA_MEM_PART(v6_flt_hash_size), + IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap, + &mem, false); + if (rc) { + IPAERR("fail generate empty v6 flt img\n"); + return rc; + } + + v6_cmd.hash_rules_addr = mem.phys_base; + v6_cmd.hash_rules_size = mem.size; + v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_hash_ofst); + v6_cmd.nhash_rules_addr = mem.phys_base; + v6_cmd.nhash_rules_size = mem.size; + v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_nhash_ofst); + IPADBG("putting hashable filtering IPv6 rules to phys 0x%x\n", + v6_cmd.hash_local_addr); + IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n", + v6_cmd.nhash_local_addr); + + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false); + if (!cmd_pyld) { + IPAERR("fail construct ip_v6_flt_init imm cmd\n"); + rc = -EPERM; + goto free_mem; + } + + desc.opcode = cmd_pyld->opcode; + desc.type = IPA_IMM_CMD_DESC; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa3_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + ipahal_destroy_imm_cmd(cmd_pyld); + +free_mem: + ipahal_free_dma_mem(&mem); + return rc; +} + +static int ipa3_setup_flt_hash_tuple(void) +{ + int pipe_idx; + struct ipahal_reg_hash_tuple tuple; + + memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple)); + + for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) { + if (!ipa_is_ep_support_flt(pipe_idx)) + continue; + + if (ipa_is_modem_pipe(pipe_idx)) + continue; + + if (ipa3_set_flt_tuple_mask(pipe_idx, &tuple)) { + IPAERR("failed to setup pipe %d flt tuple\n", pipe_idx); + return -EFAULT; + } + } + + return 0; +} + +static int ipa3_setup_rt_hash_tuple(void) +{ + int tbl_idx; + struct ipahal_reg_hash_tuple tuple; + + memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple)); + + for (tbl_idx = 0; + tbl_idx < max(IPA_MEM_PART(v6_rt_num_index), + IPA_MEM_PART(v4_rt_num_index)); + tbl_idx++) { + + if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) && + tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi)) + continue; + + if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) && + tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi)) + continue; + + if (ipa3_set_rt_tuple_mask(tbl_idx, &tuple)) { + IPAERR("failed to setup tbl %d rt tuple\n", tbl_idx); + return -EFAULT; + } + } + + return 0; +} + +static int ipa3_setup_apps_pipes(void) +{ + struct ipa_sys_connect_params sys_in; + int result = 0; + + if (ipa3_ctx->gsi_ch20_wa) { + IPADBG("Allocating GSI physical channel 20\n"); + result = ipa_gsi_ch20_wa(); + if (result) { + IPAERR("ipa_gsi_ch20_wa failed %d\n", result); + goto fail_ch20_wa; + } + } + + /* allocate the common PROD event ring */ + if (ipa3_alloc_common_event_ring()) { + IPAERR("ipa3_alloc_common_event_ring failed.\n"); + result = -EPERM; + goto fail_ch20_wa; + } + + /* CMD OUT (AP->IPA) */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_APPS_CMD_PROD; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS; + if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) { + IPAERR(":setup sys pipe (APPS_CMD_PROD) failed.\n"); + result = -EPERM; + goto fail_ch20_wa; + } + IPADBG("Apps to IPA cmd pipe is connected\n"); + + ipa3_ctx->ctrl->ipa_init_sram(); + IPADBG("SRAM initialized\n"); + + ipa3_ctx->ctrl->ipa_init_hdr(); + IPADBG("HDR initialized\n"); + + ipa3_ctx->ctrl->ipa_init_rt4(); + IPADBG("V4 RT initialized\n"); + + ipa3_ctx->ctrl->ipa_init_rt6(); + IPADBG("V6 RT initialized\n"); + + ipa3_ctx->ctrl->ipa_init_flt4(); + IPADBG("V4 FLT initialized\n"); + + ipa3_ctx->ctrl->ipa_init_flt6(); + IPADBG("V6 FLT initialized\n"); + + if (ipa3_setup_flt_hash_tuple()) { + IPAERR(":fail to configure flt hash tuple\n"); + result = -EPERM; + goto fail_flt_hash_tuple; + } + IPADBG("flt hash tuple is configured\n"); + + if (ipa3_setup_rt_hash_tuple()) { + IPAERR(":fail to configure rt hash tuple\n"); + result = -EPERM; + goto fail_flt_hash_tuple; + } + IPADBG("rt hash tuple is configured\n"); + + if (ipa3_setup_exception_path()) { + IPAERR(":fail to setup excp path\n"); + result = -EPERM; + goto fail_flt_hash_tuple; + } + IPADBG("Exception path was successfully set"); + + if (ipa3_setup_dflt_rt_tables()) { + IPAERR(":fail to setup dflt routes\n"); + result = -EPERM; + goto fail_flt_hash_tuple; + } + IPADBG("default routing was set\n"); + + /* LAN IN (IPA->AP) */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_APPS_LAN_CONS; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + sys_in.notify = ipa3_lan_rx_cb; + sys_in.priv = NULL; + sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH; + sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false; + sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true; + sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD; + sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false; + sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0; + sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2; + sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL; + + /** + * ipa_lan_rx_cb() intended to notify the source EP about packet + * being received on the LAN_CONS via calling the source EP call-back. + * There could be a race condition with calling this call-back. Other + * thread may nullify it - e.g. on EP disconnect. + * This lock intended to protect the access to the source EP call-back + */ + spin_lock_init(&ipa3_ctx->disconnect_lock); + if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) { + IPAERR(":setup sys pipe (LAN_CONS) failed.\n"); + result = -EPERM; + goto fail_flt_hash_tuple; + } + + /* LAN OUT (AP->IPA) */ + if (!ipa3_ctx->ipa_config_is_mhi) { + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_APPS_LAN_PROD; + sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; + if (ipa3_setup_sys_pipe(&sys_in, + &ipa3_ctx->clnt_hdl_data_out)) { + IPAERR(":setup sys pipe (LAN_PROD) failed.\n"); + result = -EPERM; + goto fail_lan_data_out; + } + } + + return 0; + +fail_lan_data_out: + ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in); +fail_flt_hash_tuple: + if (ipa3_ctx->dflt_v6_rt_rule_hdl) + __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl); + if (ipa3_ctx->dflt_v4_rt_rule_hdl) + __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl); + if (ipa3_ctx->excp_hdr_hdl) + __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false); + ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd); +fail_ch20_wa: + return result; +} + +static void ipa3_teardown_apps_pipes(void) +{ + if (!ipa3_ctx->ipa_config_is_mhi) + ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out); + ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in); + __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl); + __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl); + __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false); + ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd); +} + +#ifdef CONFIG_COMPAT +long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int retval = 0; + struct ipa3_ioc_nat_alloc_mem32 nat_mem32; + struct ipa_ioc_nat_alloc_mem nat_mem; + + switch (cmd) { + case IPA_IOC_ADD_HDR32: + cmd = IPA_IOC_ADD_HDR; + break; + case IPA_IOC_DEL_HDR32: + cmd = IPA_IOC_DEL_HDR; + break; + case IPA_IOC_ADD_RT_RULE32: + cmd = IPA_IOC_ADD_RT_RULE; + break; + case IPA_IOC_DEL_RT_RULE32: + cmd = IPA_IOC_DEL_RT_RULE; + break; + case IPA_IOC_ADD_FLT_RULE32: + cmd = IPA_IOC_ADD_FLT_RULE; + break; + case IPA_IOC_DEL_FLT_RULE32: + cmd = IPA_IOC_DEL_FLT_RULE; + break; + case IPA_IOC_GET_RT_TBL32: + cmd = IPA_IOC_GET_RT_TBL; + break; + case IPA_IOC_COPY_HDR32: + cmd = IPA_IOC_COPY_HDR; + break; + case IPA_IOC_QUERY_INTF32: + cmd = IPA_IOC_QUERY_INTF; + break; + case IPA_IOC_QUERY_INTF_TX_PROPS32: + cmd = IPA_IOC_QUERY_INTF_TX_PROPS; + break; + case IPA_IOC_QUERY_INTF_RX_PROPS32: + cmd = IPA_IOC_QUERY_INTF_RX_PROPS; + break; + case IPA_IOC_QUERY_INTF_EXT_PROPS32: + cmd = IPA_IOC_QUERY_INTF_EXT_PROPS; + break; + case IPA_IOC_GET_HDR32: + cmd = IPA_IOC_GET_HDR; + break; + case IPA_IOC_ALLOC_NAT_MEM32: + if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg, + sizeof(struct ipa3_ioc_nat_alloc_mem32))) { + retval = -EFAULT; + goto ret; + } + memcpy(nat_mem.dev_name, nat_mem32.dev_name, + IPA_RESOURCE_NAME_MAX); + nat_mem.size = (size_t)nat_mem32.size; + nat_mem.offset = (off_t)nat_mem32.offset; + + /* null terminate the string */ + nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0'; + + if (ipa3_allocate_nat_device(&nat_mem)) { + retval = -EFAULT; + goto ret; + } + nat_mem32.offset = (compat_off_t)nat_mem.offset; + if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32, + sizeof(struct ipa3_ioc_nat_alloc_mem32))) { + retval = -EFAULT; + } +ret: + return retval; + case IPA_IOC_V4_INIT_NAT32: + cmd = IPA_IOC_V4_INIT_NAT; + break; + case IPA_IOC_NAT_DMA32: + cmd = IPA_IOC_NAT_DMA; + break; + case IPA_IOC_V4_DEL_NAT32: + cmd = IPA_IOC_V4_DEL_NAT; + break; + case IPA_IOC_GET_NAT_OFFSET32: + cmd = IPA_IOC_GET_NAT_OFFSET; + break; + case IPA_IOC_PULL_MSG32: + cmd = IPA_IOC_PULL_MSG; + break; + case IPA_IOC_RM_ADD_DEPENDENCY32: + cmd = IPA_IOC_RM_ADD_DEPENDENCY; + break; + case IPA_IOC_RM_DEL_DEPENDENCY32: + cmd = IPA_IOC_RM_DEL_DEPENDENCY; + break; + case IPA_IOC_GENERATE_FLT_EQ32: + cmd = IPA_IOC_GENERATE_FLT_EQ; + break; + case IPA_IOC_QUERY_RT_TBL_INDEX32: + cmd = IPA_IOC_QUERY_RT_TBL_INDEX; + break; + case IPA_IOC_WRITE_QMAPID32: + cmd = IPA_IOC_WRITE_QMAPID; + break; + case IPA_IOC_MDFY_FLT_RULE32: + cmd = IPA_IOC_MDFY_FLT_RULE; + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32: + cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD; + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32: + cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL; + break; + case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32: + cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED; + break; + case IPA_IOC_MDFY_RT_RULE32: + cmd = IPA_IOC_MDFY_RT_RULE; + break; + case IPA_IOC_COMMIT_HDR: + case IPA_IOC_RESET_HDR: + case IPA_IOC_COMMIT_RT: + case IPA_IOC_RESET_RT: + case IPA_IOC_COMMIT_FLT: + case IPA_IOC_RESET_FLT: + case IPA_IOC_DUMP: + case IPA_IOC_PUT_RT_TBL: + case IPA_IOC_PUT_HDR: + case IPA_IOC_SET_FLT: + case IPA_IOC_QUERY_EP_MAPPING: + break; + default: + return -ENOIOCTLCMD; + } + return ipa3_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); +} +#endif + +static ssize_t ipa3_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos); + +static const struct file_operations ipa3_drv_fops = { + .owner = THIS_MODULE, + .open = ipa3_open, + .read = ipa3_read, + .write = ipa3_write, + .unlocked_ioctl = ipa3_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_ipa3_ioctl, +#endif +}; + +static int ipa3_get_clks(struct device *dev) +{ + if (ipa3_res.use_bw_vote) { + IPADBG("Vote IPA clock by bw voting via bus scaling driver\n"); + ipa3_clk = NULL; + return 0; + } + + ipa3_clk = clk_get(dev, "core_clk"); + if (IS_ERR(ipa3_clk)) { + if (ipa3_clk != ERR_PTR(-EPROBE_DEFER)) + IPAERR("fail to get ipa clk\n"); + return PTR_ERR(ipa3_clk); + } + return 0; +} + +/** + * _ipa_enable_clks_v3_0() - Enable IPA clocks. + */ +void _ipa_enable_clks_v3_0(void) +{ + IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate); + if (ipa3_clk) { + IPADBG_LOW("enabling gcc_ipa_clk\n"); + clk_prepare(ipa3_clk); + clk_enable(ipa3_clk); + clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate); + } + + ipa3_uc_notify_clk_state(true); +} + +static unsigned int ipa3_get_bus_vote(void) +{ + unsigned int idx = 1; + + if (ipa3_ctx->curr_ipa_clk_rate == ipa3_ctx->ctrl->ipa_clk_rate_svs) { + idx = 1; + } else if (ipa3_ctx->curr_ipa_clk_rate == + ipa3_ctx->ctrl->ipa_clk_rate_nominal) { + if (ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases <= 2) + idx = 1; + else + idx = 2; + } else if (ipa3_ctx->curr_ipa_clk_rate == + ipa3_ctx->ctrl->ipa_clk_rate_turbo) { + idx = ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1; + } else { + WARN(1, "unexpected clock rate"); + } + + IPADBG("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx); + + return idx; +} + +/** + * ipa3_enable_clks() - Turn on IPA clocks + * + * Return codes: + * None + */ +void ipa3_enable_clks(void) +{ + IPADBG("enabling IPA clocks and bus voting\n"); + + if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, + ipa3_get_bus_vote())) + WARN(1, "bus scaling failed"); + + ipa3_ctx->ctrl->ipa3_enable_clks(); +} + + +/** + * _ipa_disable_clks_v3_0() - Disable IPA clocks. + */ +void _ipa_disable_clks_v3_0(void) +{ + ipa3_suspend_apps_pipes(true); + ipa3_uc_notify_clk_state(false); + if (ipa3_clk) { + IPADBG_LOW("disabling gcc_ipa_clk\n"); + clk_disable_unprepare(ipa3_clk); + } +} + +/** + * ipa3_disable_clks() - Turn off IPA clocks + * + * Return codes: + * None + */ +void ipa3_disable_clks(void) +{ + IPADBG("disabling IPA clocks and bus voting\n"); + + ipa3_ctx->ctrl->ipa3_disable_clks(); + + if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, 0)) + WARN(1, "bus scaling failed"); +} + +/** + * ipa3_start_tag_process() - Send TAG packet and wait for it to come back + * + * This function is called prior to clock gating when active client counter + * is 1. TAG process ensures that there are no packets inside IPA HW that + * were not submitted to the IPA client via the transport. During TAG process + * all aggregation frames are (force) closed. + * + * Return codes: + * None + */ +static void ipa3_start_tag_process(struct work_struct *work) +{ + int res; + + IPADBG("starting TAG process\n"); + /* close aggregation frames on all pipes */ + res = ipa3_tag_aggr_force_close(-1); + if (res) + IPAERR("ipa3_tag_aggr_force_close failed %d\n", res); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS"); + + IPADBG("TAG process done\n"); +} + +/** + * ipa3_active_clients_log_mod() - Log a modification in the active clients + * reference count + * + * This method logs any modification in the active clients reference count: + * It logs the modification in the circular history buffer + * It logs the modification in the hash table - looking for an entry, + * creating one if needed and deleting one if needed. + * + * @id: ipa3_active client logging info struct to hold the log information + * @inc: a boolean variable to indicate whether the modification is an increase + * or decrease + * @int_ctx: a boolean variable to indicate whether this call is being made from + * an interrupt context and therefore should allocate GFP_ATOMIC memory + * + * Method process: + * - Hash the unique identifier string + * - Find the hash in the table + * 1)If found, increase or decrease the reference count + * 2)If not found, allocate a new hash table entry struct and initialize it + * - Remove and deallocate unneeded data structure + * - Log the call in the circular history buffer (unless it is a simple call) + */ +void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id, + bool inc, bool int_ctx) +{ + char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]; + unsigned long long t; + unsigned long nanosec_rem; + struct ipa3_active_client_htable_entry *hentry; + struct ipa3_active_client_htable_entry *hfound; + u32 hkey; + char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN]; + unsigned long flags; + + spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags); + int_ctx = true; + hfound = NULL; + memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN); + strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN); + hkey = jhash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN, + 0); + hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable, + hentry, list, hkey) { + if (!strcmp(hentry->id_string, id->id_string)) { + hentry->count = hentry->count + (inc ? 1 : -1); + hfound = hentry; + } + } + if (hfound == NULL) { + hentry = NULL; + hentry = kzalloc(sizeof( + struct ipa3_active_client_htable_entry), + int_ctx ? GFP_ATOMIC : GFP_KERNEL); + if (hentry == NULL) { + spin_unlock_irqrestore( + &ipa3_ctx->ipa3_active_clients_logging.lock, + flags); + return; + } + hentry->type = id->type; + strlcpy(hentry->id_string, id->id_string, + IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN); + INIT_HLIST_NODE(&hentry->list); + hentry->count = inc ? 1 : -1; + hash_add(ipa3_ctx->ipa3_active_clients_logging.htable, + &hentry->list, hkey); + } else if (hfound->count == 0) { + hash_del(&hfound->list); + kfree(hfound); + } + + if (id->type != SIMPLE) { + t = local_clock(); + nanosec_rem = do_div(t, 1000000000) / 1000; + snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN, + inc ? "[%5lu.%06lu] ^ %s, %s: %d" : + "[%5lu.%06lu] v %s, %s: %d", + (unsigned long)t, nanosec_rem, + id->id_string, id->file, id->line); + ipa3_active_clients_log_insert(temp_str); + } + spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock, + flags); +} + +void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id, + bool int_ctx) +{ + ipa3_active_clients_log_mod(id, false, int_ctx); +} + +void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id, + bool int_ctx) +{ + ipa3_active_clients_log_mod(id, true, int_ctx); +} + +/** + * ipa3_inc_client_enable_clks() - Increase active clients counter, and + * enable ipa clocks if necessary + * + * Return codes: + * None + */ +void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id) +{ + int ret; + + ipa3_active_clients_log_inc(id, false); + ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt); + if (ret) { + IPADBG_LOW("active clients = %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); + return; + } + + mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex); + + /* somebody might voted to clocks meanwhile */ + ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt); + if (ret) { + mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex); + IPADBG_LOW("active clients = %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); + return; + } + + ipa3_enable_clks(); + atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt); + IPADBG_LOW("active clients = %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); + ipa3_suspend_apps_pipes(false); + mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex); +} + +/** + * ipa3_inc_client_enable_clks_no_block() - Only increment the number of active + * clients if no asynchronous actions should be done. Asynchronous actions are + * locking a mutex and waking up IPA HW. + * + * Return codes: 0 for success + * -EPERM if an asynchronous action should have been done + */ +int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info + *id) +{ + int ret; + + ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt); + if (ret) { + ipa3_active_clients_log_inc(id, true); + IPADBG_LOW("active clients = %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); + return 0; + } + + return -EPERM; +} + +static void __ipa3_dec_client_disable_clks(void) +{ + int ret; + + if (!atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)) { + IPAERR("trying to disable clocks with refcnt is 0\n"); + ipa_assert(); + return; + } + + ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1); + if (ret) + goto bail; + + /* seems like this is the only client holding the clocks */ + mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex); + if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 && + ipa3_ctx->tag_process_before_gating) { + ipa3_ctx->tag_process_before_gating = false; + /* + * When TAG process ends, active clients will be + * decreased + */ + queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work); + goto unlock_mutex; + } + + /* a different context might increase the clock reference meanwhile */ + ret = atomic_sub_return(1, &ipa3_ctx->ipa3_active_clients.cnt); + if (ret > 0) + goto unlock_mutex; + ipa3_disable_clks(); + +unlock_mutex: + mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex); +bail: + IPADBG_LOW("active clients = %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); +} + +/** + * ipa3_dec_client_disable_clks() - Decrease active clients counter + * + * In case that there are no active clients this function also starts + * TAG process. When TAG progress ends ipa clocks will be gated. + * start_tag_process_again flag is set during this function to signal TAG + * process to start again as there was another client that may send data to ipa + * + * Return codes: + * None + */ +void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id) +{ + ipa3_active_clients_log_dec(id, false); + __ipa3_dec_client_disable_clks(); +} + +static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work) +{ + __ipa3_dec_client_disable_clks(); +} + +/** + * ipa3_dec_client_disable_clks_no_block() - Decrease active clients counter + * if possible without blocking. If this is the last client then the desrease + * will happen from work queue context. + * + * Return codes: + * None + */ +void ipa3_dec_client_disable_clks_no_block( + struct ipa_active_client_logging_info *id) +{ + int ret; + + ipa3_active_clients_log_dec(id, true); + ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1); + if (ret) { + IPADBG_LOW("active clients = %d\n", + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)); + return; + } + + /* seems like this is the only client holding the clocks */ + queue_work(ipa3_ctx->power_mgmt_wq, + &ipa_dec_clients_disable_clks_on_wq_work); +} + +/** + * ipa3_inc_acquire_wakelock() - Increase active clients counter, and + * acquire wakelock if necessary + * + * Return codes: + * None + */ +void ipa3_inc_acquire_wakelock(void) +{ + unsigned long flags; + + spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags); + ipa3_ctx->wakelock_ref_cnt.cnt++; + if (ipa3_ctx->wakelock_ref_cnt.cnt == 1) + __pm_stay_awake(&ipa3_ctx->w_lock); + IPADBG_LOW("active wakelock ref cnt = %d\n", + ipa3_ctx->wakelock_ref_cnt.cnt); + spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags); +} + +/** + * ipa3_dec_release_wakelock() - Decrease active clients counter + * + * In case if the ref count is 0, release the wakelock. + * + * Return codes: + * None + */ +void ipa3_dec_release_wakelock(void) +{ + unsigned long flags; + + spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags); + ipa3_ctx->wakelock_ref_cnt.cnt--; + IPADBG_LOW("active wakelock ref cnt = %d\n", + ipa3_ctx->wakelock_ref_cnt.cnt); + if (ipa3_ctx->wakelock_ref_cnt.cnt == 0) + __pm_relax(&ipa3_ctx->w_lock); + spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags); +} + +int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage, + u32 bandwidth_mbps) +{ + enum ipa_voltage_level needed_voltage; + u32 clk_rate; + + IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u", + floor_voltage, bandwidth_mbps); + + if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED || + floor_voltage >= IPA_VOLTAGE_MAX) { + IPAERR("bad voltage\n"); + return -EINVAL; + } + + if (ipa3_ctx->enable_clock_scaling) { + IPADBG_LOW("Clock scaling is enabled\n"); + if (bandwidth_mbps >= + ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo) + needed_voltage = IPA_VOLTAGE_TURBO; + else if (bandwidth_mbps >= + ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal) + needed_voltage = IPA_VOLTAGE_NOMINAL; + else + needed_voltage = IPA_VOLTAGE_SVS; + } else { + IPADBG_LOW("Clock scaling is disabled\n"); + needed_voltage = IPA_VOLTAGE_NOMINAL; + } + + needed_voltage = max(needed_voltage, floor_voltage); + switch (needed_voltage) { + case IPA_VOLTAGE_SVS: + clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs; + break; + case IPA_VOLTAGE_NOMINAL: + clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal; + break; + case IPA_VOLTAGE_TURBO: + clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo; + break; + default: + IPAERR("bad voltage\n"); + WARN_ON(1); + return -EFAULT; + } + + if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) { + IPADBG_LOW("Same voltage\n"); + return 0; + } + + /* Hold the mutex to avoid race conditions with ipa3_enable_clocks() */ + mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex); + ipa3_ctx->curr_ipa_clk_rate = clk_rate; + IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate); + if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) { + if (ipa3_clk) + clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate); + if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, + ipa3_get_bus_vote())) + WARN_ON(1); + } else { + IPADBG_LOW("clocks are gated, not setting rate\n"); + } + mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex); + IPADBG_LOW("Done\n"); + + return 0; +} + +static void ipa3_process_irq_schedule_rel(void) +{ + queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq, + &ipa3_transport_release_resource_work, + msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC)); +} + +/** + * ipa3_suspend_handler() - Handles the suspend interrupt: + * wakes up the suspended peripheral by requesting its consumer + * @interrupt: Interrupt type + * @private_data: The client's private data + * @interrupt_data: Interrupt specific information data + */ +void ipa3_suspend_handler(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data) +{ + enum ipa_rm_resource_name resource; + u32 suspend_data = + ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints; + u32 bmsk = 1; + u32 i = 0; + int res; + struct ipa_ep_cfg_holb holb_cfg; + struct mutex *pm_mutex_ptr = &ipa3_ctx->transport_pm.transport_pm_mutex; + + IPADBG("interrupt=%d, interrupt_data=%u\n", + interrupt, suspend_data); + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.tmr_val = 0; + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) { + if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) { + /* + * pipe will be unsuspended as part of + * enabling IPA clocks + */ + mutex_lock(pm_mutex_ptr); + if (!atomic_read( + &ipa3_ctx->transport_pm.dec_clients) + ) { + IPA_ACTIVE_CLIENTS_INC_EP( + ipa3_ctx->ep[i].client); + IPADBG_LOW("Pipes un-suspended.\n"); + IPADBG_LOW("Enter poll mode.\n"); + atomic_set( + &ipa3_ctx->transport_pm.dec_clients, + 1); + ipa3_process_irq_schedule_rel(); + } + mutex_unlock(pm_mutex_ptr); + } else { + resource = ipa3_get_rm_resource_from_ep(i); + res = + ipa_rm_request_resource_with_timer(resource); + if (res == -EPERM && + IPA_CLIENT_IS_CONS( + ipa3_ctx->ep[i].client)) { + holb_cfg.en = 1; + res = ipa3_cfg_ep_holb_by_client( + ipa3_ctx->ep[i].client, &holb_cfg); + WARN(res, "holb en failed\n"); + } + } + } + bmsk = bmsk << 1; + } +} + +/** + * ipa3_restore_suspend_handler() - restores the original suspend IRQ handler + * as it was registered in the IPA init sequence. + * Return codes: + * 0: success + * -EPERM: failed to remove current handler or failed to add original handler + */ +int ipa3_restore_suspend_handler(void) +{ + int result = 0; + + result = ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ); + if (result) { + IPAERR("remove handler for suspend interrupt failed\n"); + return -EPERM; + } + + result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ, + ipa3_suspend_handler, false, NULL); + if (result) { + IPAERR("register handler for suspend interrupt failed\n"); + result = -EPERM; + } + + IPADBG("suspend handler successfully restored\n"); + + return result; +} + +static int ipa3_apps_cons_release_resource(void) +{ + return 0; +} + +static int ipa3_apps_cons_request_resource(void) +{ + return 0; +} + +static void ipa3_transport_release_resource(struct work_struct *work) +{ + mutex_lock(&ipa3_ctx->transport_pm.transport_pm_mutex); + /* check whether still need to decrease client usage */ + if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) { + if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) { + IPADBG("EOT pending Re-scheduling\n"); + ipa3_process_irq_schedule_rel(); + } else { + atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TRANSPORT_RESOURCE"); + } + } + atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0); + mutex_unlock(&ipa3_ctx->transport_pm.transport_pm_mutex); +} + +int ipa3_create_apps_resource(void) +{ + struct ipa_rm_create_params apps_cons_create_params; + struct ipa_rm_perf_profile profile; + int result = 0; + + memset(&apps_cons_create_params, 0, + sizeof(apps_cons_create_params)); + apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS; + apps_cons_create_params.request_resource = + ipa3_apps_cons_request_resource; + apps_cons_create_params.release_resource = + ipa3_apps_cons_release_resource; + result = ipa_rm_create_resource(&apps_cons_create_params); + if (result) { + IPAERR("ipa_rm_create_resource failed\n"); + return result; + } + + profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; + ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile); + + return result; +} + +/** + * ipa3_init_interrupts() - Register to IPA IRQs + * + * Return codes: 0 in success, negative in failure + * + */ +int ipa3_init_interrupts(void) +{ + int result; + + /*register IPA IRQ handler*/ + result = ipa3_interrupts_init(ipa3_res.ipa_irq, 0, + master_dev); + if (result) { + IPAERR("ipa interrupts initialization failed\n"); + return -ENODEV; + } + + /*add handler for suspend interrupt*/ + result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ, + ipa3_suspend_handler, false, NULL); + if (result) { + IPAERR("register handler for suspend interrupt failed\n"); + result = -ENODEV; + goto fail_add_interrupt_handler; + } + + return 0; + +fail_add_interrupt_handler: + free_irq(ipa3_res.ipa_irq, master_dev); + return result; +} + +/** + * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables + * The idr strcuture per filtering table is intended for rule id generation + * per filtering rule. + */ +static void ipa3_destroy_flt_tbl_idrs(void) +{ + int i; + struct ipa3_flt_tbl *flt_tbl; + + idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v4]); + idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v6]); + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa_is_ep_support_flt(i)) + continue; + + flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4]; + flt_tbl->rule_ids = NULL; + flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6]; + flt_tbl->rule_ids = NULL; + } +} + +static void ipa3_freeze_clock_vote_and_notify_modem(void) +{ + int res; + struct ipa_active_client_logging_info log_info; + + if (ipa3_ctx->smp2p_info.res_sent) + return; + + if (ipa3_ctx->smp2p_info.out_base_id == 0) { + IPAERR("smp2p out gpio not assigned\n"); + return; + } + + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE"); + res = ipa3_inc_client_enable_clks_no_block(&log_info); + if (res) + ipa3_ctx->smp2p_info.ipa_clk_on = false; + else + ipa3_ctx->smp2p_info.ipa_clk_on = true; + + gpio_set_value(ipa3_ctx->smp2p_info.out_base_id + + IPA_GPIO_OUT_CLK_VOTE_IDX, + ipa3_ctx->smp2p_info.ipa_clk_on); + gpio_set_value(ipa3_ctx->smp2p_info.out_base_id + + IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 1); + + ipa3_ctx->smp2p_info.res_sent = true; + IPADBG("IPA clocks are %s\n", + ipa3_ctx->smp2p_info.ipa_clk_on ? "ON" : "OFF"); +} + +void ipa3_reset_freeze_vote(void) +{ + if (ipa3_ctx->smp2p_info.res_sent == false) + return; + + if (ipa3_ctx->smp2p_info.ipa_clk_on) + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("FREEZE_VOTE"); + + gpio_set_value(ipa3_ctx->smp2p_info.out_base_id + + IPA_GPIO_OUT_CLK_VOTE_IDX, 0); + gpio_set_value(ipa3_ctx->smp2p_info.out_base_id + + IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 0); + + ipa3_ctx->smp2p_info.res_sent = false; + ipa3_ctx->smp2p_info.ipa_clk_on = false; +} + +static int ipa3_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr) +{ + int res; + + ipa3_freeze_clock_vote_and_notify_modem(); + + IPADBG("Calling uC panic handler\n"); + res = ipa3_uc_panic_notifier(this, event, ptr); + if (res) + IPAERR("uC panic handler failed %d\n", res); + + return NOTIFY_DONE; +} + +static struct notifier_block ipa3_panic_blk = { + .notifier_call = ipa3_panic_notifier, + /* IPA panic handler needs to run before modem shuts down */ + .priority = INT_MAX, +}; + +static void ipa3_register_panic_hdlr(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, + &ipa3_panic_blk); +} + +static void ipa3_trigger_ipa_ready_cbs(void) +{ + struct ipa3_ready_cb_info *info; + + mutex_lock(&ipa3_ctx->lock); + + /* Call all the CBs */ + list_for_each_entry(info, &ipa3_ctx->ipa_ready_cb_list, link) + if (info->ready_cb) + info->ready_cb(info->user_data); + + mutex_unlock(&ipa3_ctx->lock); +} + +static int ipa3_gsi_pre_fw_load_init(void) +{ + int result; + + result = gsi_configure_regs(ipa3_res.transport_mem_base, + ipa3_res.transport_mem_size, + ipa3_res.ipa_mem_base); + if (result) { + IPAERR("Failed to configure GSI registers\n"); + return -EINVAL; + } + + return 0; +} + +static void ipa3_uc_is_loaded(void) +{ + IPADBG("\n"); + complete_all(&ipa3_ctx->uc_loaded_completion_obj); +} + +static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type) +{ + enum gsi_ver gsi_ver; + + switch (ipa_hw_type) { + case IPA_HW_v3_0: + case IPA_HW_v3_1: + gsi_ver = GSI_VER_1_0; + break; + case IPA_HW_v3_5: + gsi_ver = GSI_VER_1_2; + break; + case IPA_HW_v3_5_1: + gsi_ver = GSI_VER_1_3; + break; + case IPA_HW_v4_0: + gsi_ver = GSI_VER_2_0; + break; + default: + IPAERR("No GSI version for ipa type %d\n", ipa_hw_type); + WARN_ON(1); + gsi_ver = GSI_VER_ERR; + } + + IPADBG("GSI version %d\n", gsi_ver); + + return gsi_ver; +} + +/** + * ipa3_post_init() - Initialize the IPA Driver (Part II). + * This part contains all initialization which requires interaction with + * IPA HW (via GSI). + * + * @resource_p: contain platform specific values from DST file + * @pdev: The platform device structure representing the IPA driver + * + * Function initialization process: + * - Initialize endpoints bitmaps + * - Initialize resource groups min and max values + * - Initialize filtering lists heads and idr + * - Initialize interrupts + * - Register GSI + * - Setup APPS pipes + * - Initialize tethering bridge + * - Initialize IPA debugfs + * - Initialize IPA uC interface + * - Initialize WDI interface + * - Initialize USB interface + * - Register for panic handler + * - Trigger IPA ready callbacks (to all subscribers) + * - Trigger IPA completion object (to all who wait on it) + */ +static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, + struct device *ipa_dev) +{ + int result; + struct gsi_per_props gsi_props; + struct ipa3_uc_hdlrs uc_hdlrs = { 0 }; + struct ipa3_flt_tbl *flt_tbl; + int i; + struct idr *idr; + + if (ipa3_ctx == NULL) { + IPADBG("IPA driver haven't initialized\n"); + return -ENXIO; + } + + /* Prevent consequent calls from trying to load the FW again. */ + if (ipa3_ctx->ipa_initialization_complete) + return 0; + + /* + * indication whether working in MHI config or non MHI config is given + * in ipa3_write which is launched before ipa3_post_init. i.e. from + * this point it is safe to use ipa3_ep_mapping array and the correct + * entry will be returned from ipa3_get_hw_type_index() + */ + ipa_init_ep_flt_bitmap(); + IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n", + ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num); + + /* Assign resource limitation to each group */ + ipa3_set_resorce_groups_min_max_limits(); + + idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v4]); + idr_init(idr); + idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v6]); + idr_init(idr); + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa_is_ep_support_flt(i)) + continue; + + flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4]; + INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list); + flt_tbl->in_sys[IPA_RULE_HASHABLE] = + !ipa3_ctx->ip4_flt_tbl_hash_lcl; + flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] = + !ipa3_ctx->ip4_flt_tbl_nhash_lcl; + flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v4]; + + flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6]; + INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list); + flt_tbl->in_sys[IPA_RULE_HASHABLE] = + !ipa3_ctx->ip6_flt_tbl_hash_lcl; + flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] = + !ipa3_ctx->ip6_flt_tbl_nhash_lcl; + flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v6]; + } + + if (!ipa3_ctx->apply_rg10_wa) { + result = ipa3_init_interrupts(); + if (result) { + IPAERR("ipa initialization of interrupts failed\n"); + result = -ENODEV; + goto fail_register_device; + } + } else { + IPADBG("Initialization of ipa interrupts skipped\n"); + } + + /* + * IPAv3.5 and above requires to disable prefetch for USB in order + * to allow MBIM to work, currently MBIM is not needed in MHI mode. + */ + if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5 + && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) && + (!ipa3_ctx->ipa_config_is_mhi)) + ipa3_disable_prefetch(IPA_CLIENT_USB_CONS); + + memset(&gsi_props, 0, sizeof(gsi_props)); + gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type); + gsi_props.ee = resource_p->ee; + gsi_props.intr = GSI_INTR_IRQ; + gsi_props.irq = resource_p->transport_irq; + gsi_props.phys_addr = resource_p->transport_mem_base; + gsi_props.size = resource_p->transport_mem_size; + gsi_props.notify_cb = ipa_gsi_notify_cb; + gsi_props.req_clk_cb = NULL; + gsi_props.rel_clk_cb = NULL; + + result = gsi_register_device(&gsi_props, + &ipa3_ctx->gsi_dev_hdl); + if (result != GSI_STATUS_SUCCESS) { + IPAERR(":gsi register error - %d\n", result); + result = -ENODEV; + goto fail_register_device; + } + IPADBG("IPA gsi is registered\n"); + + /* setup the AP-IPA pipes */ + if (ipa3_setup_apps_pipes()) { + IPAERR(":failed to setup IPA-Apps pipes\n"); + result = -ENODEV; + goto fail_setup_apps_pipes; + } + IPADBG("IPA GPI pipes were connected\n"); + + if (ipa3_ctx->use_ipa_teth_bridge) { + /* Initialize the tethering bridge driver */ + result = ipa3_teth_bridge_driver_init(); + if (result) { + IPAERR(":teth_bridge init failed (%d)\n", -result); + result = -ENODEV; + goto fail_teth_bridge_driver_init; + } + IPADBG("teth_bridge initialized"); + } + + ipa3_debugfs_init(); + + result = ipa3_uc_interface_init(); + if (result) + IPAERR(":ipa Uc interface init failed (%d)\n", -result); + else + IPADBG(":ipa Uc interface init ok\n"); + + uc_hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_is_loaded; + ipa3_uc_register_handlers(IPA_HW_FEATURE_COMMON, &uc_hdlrs); + + result = ipa3_wdi_init(); + if (result) + IPAERR(":wdi init failed (%d)\n", -result); + else + IPADBG(":wdi init ok\n"); + + result = ipa3_ntn_init(); + if (result) + IPAERR(":ntn init failed (%d)\n", -result); + else + IPADBG(":ntn init ok\n"); + + result = ipa_hw_stats_init(); + if (result) + IPAERR("fail to init stats %d\n", result); + else + IPADBG(":stats init ok\n"); + + ipa3_register_panic_hdlr(); + + ipa3_ctx->q6_proxy_clk_vote_valid = true; + + mutex_lock(&ipa3_ctx->lock); + ipa3_ctx->ipa_initialization_complete = true; + mutex_unlock(&ipa3_ctx->lock); + + ipa3_trigger_ipa_ready_cbs(); + complete_all(&ipa3_ctx->init_completion_obj); + pr_info("IPA driver initialization was successful.\n"); + + return 0; + +fail_teth_bridge_driver_init: + ipa3_teardown_apps_pipes(); +fail_setup_apps_pipes: + gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false); +fail_register_device: + ipa3_destroy_flt_tbl_idrs(); + return result; +} + +static void ipa3_post_init_wq(struct work_struct *work) +{ + ipa3_post_init(&ipa3_res, ipa3_ctx->dev); +} + +static int ipa3_trigger_fw_loading_mdms(void) +{ + int result; + const struct firmware *fw; + + IPADBG("FW loading process initiated\n"); + + result = request_firmware(&fw, IPA_FWS_PATH, ipa3_ctx->dev); + if (result < 0) { + IPAERR("request_firmware failed, error %d\n", result); + return result; + } + + IPADBG("FWs are available for loading\n"); + + result = ipa3_load_fws(fw, ipa3_res.transport_mem_base); + if (result) { + IPAERR("IPA FWs loading has failed\n"); + release_firmware(fw); + return result; + } + + result = gsi_enable_fw(ipa3_res.transport_mem_base, + ipa3_res.transport_mem_size, + ipa3_get_gsi_ver(ipa3_res.ipa_hw_type)); + if (result) { + IPAERR("Failed to enable GSI FW\n"); + release_firmware(fw); + return result; + } + + release_firmware(fw); + + IPADBG("FW loading process is complete\n"); + return 0; +} + +static int ipa3_trigger_fw_loading_msms(void) +{ + void *subsystem_get_retval = NULL; + + IPADBG("FW loading process initiated\n"); + + subsystem_get_retval = subsystem_get(IPA_SUBSYSTEM_NAME); + if (IS_ERR_OR_NULL(subsystem_get_retval)) { + IPAERR("Unable to trigger PIL process for FW loading\n"); + return -EINVAL; + } + + IPADBG("FW loading process is complete\n"); + return 0; +} + +static ssize_t ipa3_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long missing; + int result = -EINVAL; + + char dbg_buff[16] = { 0 }; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + + if (missing) { + IPAERR("Unable to copy data from user\n"); + return -EFAULT; + } + + /* Prevent consequent calls from trying to load the FW again. */ + if (ipa3_is_ready()) + return count; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + if (ipa3_is_msm_device()) { + result = ipa3_trigger_fw_loading_msms(); + } else { + if (!strcasecmp(dbg_buff, "MHI")) { + ipa3_ctx->ipa_config_is_mhi = true; + pr_info( + "IPA is loading with MHI configuration\n"); + } else { + pr_info( + "IPA is loading with non MHI configuration\n"); + } + result = ipa3_trigger_fw_loading_mdms(); + } + /* No IPAv3.x chipsets that don't support FW loading */ + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + if (result) { + IPAERR("FW loading process has failed\n"); + return result; + } else { + queue_work(ipa3_ctx->transport_power_mgmt_wq, + &ipa3_post_init_work); + } + return count; +} + +static int ipa3_tz_unlock_reg(struct ipa3_context *ipa3_ctx) +{ + int i, size, ret, resp; + struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec; + struct tz_smmu_ipa_protect_region_s cmd_buf; + struct scm_desc desc = { 0 }; + + if (ipa3_ctx && ipa3_ctx->ipa_tz_unlock_reg_num > 0) { + size = ipa3_ctx->ipa_tz_unlock_reg_num * + sizeof(struct tz_smmu_ipa_protect_region_iovec_s); + ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL); + if (ipa_tz_unlock_vec == NULL) + return -ENOMEM; + + for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) { + ipa_tz_unlock_vec[i].input_addr = + ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^ + (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr & + 0xFFF); + ipa_tz_unlock_vec[i].output_addr = + ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^ + (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr & + 0xFFF); + ipa_tz_unlock_vec[i].size = + ipa3_ctx->ipa_tz_unlock_reg[i].size; + ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE; + } + + /* pass physical address of command buffer */ + cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec); + cmd_buf.size_bytes = size; + + /* flush cache to DDR */ + __cpuc_flush_dcache_area((void *)ipa_tz_unlock_vec, size); + outer_flush_range(cmd_buf.iovec_buf, cmd_buf.iovec_buf + size); + + desc.args[0] = virt_to_phys((void *)ipa_tz_unlock_vec); + desc.args[1] = size; + desc.arginfo = SCM_ARGS(2); + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, + TZ_MEM_PROTECT_REGION_ID), &desc); + + if (ret) { + IPAERR("scm call SCM_SVC_MP failed: %d\n", ret); + kfree(ipa_tz_unlock_vec); + return -EFAULT; + } + kfree(ipa_tz_unlock_vec); + } + return 0; +} + +static int ipa3_alloc_pkt_init(void) +{ + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipahal_imm_cmd_ip_packet_init cmd = {0}; + int i; + + cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT, + &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct IMM cmd\n"); + return -ENOMEM; + } + ipa3_ctx->pkt_init_imm_opcode = cmd_pyld->opcode; + + mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes; + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, + &mem.phys_base, GFP_KERNEL); + if (!mem.base) { + IPAERR("failed to alloc DMA buff of size %d\n", mem.size); + ipahal_destroy_imm_cmd(cmd_pyld); + return -ENOMEM; + } + ipahal_destroy_imm_cmd(cmd_pyld); + + memset(mem.base, 0, mem.size); + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + cmd.destination_pipe_index = i; + cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT, + &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct IMM cmd\n"); + dma_free_coherent(ipa3_ctx->pdev, + mem.size, + mem.base, + mem.phys_base); + return -ENOMEM; + } + memcpy(mem.base + i * cmd_pyld->len, cmd_pyld->data, + cmd_pyld->len); + ipa3_ctx->pkt_init_imm[i] = mem.phys_base + i * cmd_pyld->len; + ipahal_destroy_imm_cmd(cmd_pyld); + } + + return 0; +} + +/** + * ipa3_pre_init() - Initialize the IPA Driver. + * This part contains all initialization which doesn't require IPA HW, such + * as structure allocations and initializations, register writes, etc. + * + * @resource_p: contain platform specific values from DST file + * @pdev: The platform device structure representing the IPA driver + * + * Function initialization process: + * Allocate memory for the driver context data struct + * Initializing the ipa3_ctx with : + * 1)parsed values from the dts file + * 2)parameters passed to the module initialization + * 3)read HW values(such as core memory size) + * Map IPA core registers to CPU memory + * Restart IPA core(HW reset) + * Initialize the look-aside caches(kmem_cache/slab) for filter, + * routing and IPA-tree + * Create memory pool with 4 objects for DMA operations(each object + * is 512Bytes long), this object will be use for tx(A5->IPA) + * Initialize lists head(routing, hdr, system pipes) + * Initialize mutexes (for ipa_ctx and NAT memory mutexes) + * Initialize spinlocks (for list related to A5<->IPA pipes) + * Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq" + * Initialize Red-Black-Tree(s) for handles of header,routing rule, + * routing table ,filtering rule + * Initialize the filter block by committing IPV4 and IPV6 default rules + * Create empty routing table in system memory(no committing) + * Create a char-device for IPA + * Initialize IPA RM (resource manager) + * Configure GSI registers (in GSI case) + */ +static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, + struct device *ipa_dev) +{ + int result = 0; + int i; + struct ipa3_rt_tbl_set *rset; + struct ipa_active_client_logging_info log_info; + + IPADBG("IPA Driver initialization started\n"); + + ipa3_ctx = kzalloc(sizeof(*ipa3_ctx), GFP_KERNEL); + if (!ipa3_ctx) { + result = -ENOMEM; + goto fail_mem_ctx; + } + + ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0); + if (ipa3_ctx->logbuf == NULL) + IPAERR("failed to create IPC log, continue...\n"); + + ipa3_ctx->pdev = ipa_dev; + ipa3_ctx->uc_pdev = ipa_dev; + ipa3_ctx->smmu_present = smmu_info.present; + if (!ipa3_ctx->smmu_present) + ipa3_ctx->smmu_s1_bypass = true; + else + ipa3_ctx->smmu_s1_bypass = smmu_info.s1_bypass; + ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base; + ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size; + ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type; + ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode; + ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge; + ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt; + ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2; + ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask; + ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size; + ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size; + ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset; + ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control; + ipa3_ctx->ee = resource_p->ee; + ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa; + ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa; + ipa3_ctx->ipa3_active_clients_logging.log_rdy = false; + if (resource_p->ipa_tz_unlock_reg) { + ipa3_ctx->ipa_tz_unlock_reg_num = + resource_p->ipa_tz_unlock_reg_num; + ipa3_ctx->ipa_tz_unlock_reg = kcalloc( + ipa3_ctx->ipa_tz_unlock_reg_num, + sizeof(*ipa3_ctx->ipa_tz_unlock_reg), + GFP_KERNEL); + if (ipa3_ctx->ipa_tz_unlock_reg == NULL) { + result = -ENOMEM; + goto fail_tz_unlock_reg; + } + for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) { + ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr = + resource_p->ipa_tz_unlock_reg[i].reg_addr; + ipa3_ctx->ipa_tz_unlock_reg[i].size = + resource_p->ipa_tz_unlock_reg[i].size; + } + } + + /* unlock registers for uc */ + ipa3_tz_unlock_reg(ipa3_ctx); + + /* default aggregation parameters */ + ipa3_ctx->aggregation_type = IPA_MBIM_16; + ipa3_ctx->aggregation_byte_limit = 1; + ipa3_ctx->aggregation_time_limit = 0; + + ipa3_ctx->ctrl = kzalloc(sizeof(*ipa3_ctx->ctrl), GFP_KERNEL); + if (!ipa3_ctx->ctrl) { + result = -ENOMEM; + goto fail_mem_ctrl; + } + result = ipa3_controller_static_bind(ipa3_ctx->ctrl, + ipa3_ctx->ipa_hw_type); + if (result) { + IPAERR("fail to static bind IPA ctrl\n"); + result = -EFAULT; + goto fail_bind; + } + + result = ipa3_init_mem_partition(master_dev->of_node); + if (result) { + IPAERR(":ipa3_init_mem_partition failed\n"); + result = -ENODEV; + goto fail_init_mem_partition; + } + + if (ipa3_bus_scale_table) { + IPADBG("Use bus scaling info from device tree #usecases=%d\n", + ipa3_bus_scale_table->num_usecases); + ipa3_ctx->ctrl->msm_bus_data_ptr = ipa3_bus_scale_table; + } + + /* get BUS handle */ + ipa3_ctx->ipa_bus_hdl = + msm_bus_scale_register_client( + ipa3_ctx->ctrl->msm_bus_data_ptr); + if (!ipa3_ctx->ipa_bus_hdl) { + IPAERR("fail to register with bus mgr\n"); + result = -ENODEV; + goto fail_bus_reg; + } + + /* get IPA clocks */ + result = ipa3_get_clks(master_dev); + if (result) + goto fail_clk; + + /* init active_clients_log after getting ipa-clk */ + if (ipa3_active_clients_log_init()) + goto fail_init_active_client; + + /* Enable ipa3_ctx->enable_clock_scaling */ + ipa3_ctx->enable_clock_scaling = 1; + ipa3_ctx->curr_ipa_clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo; + + /* enable IPA clocks explicitly to allow the initialization */ + ipa3_enable_clks(); + + /* setup IPA register access */ + IPADBG("Mapping 0x%x\n", resource_p->ipa_mem_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst); + ipa3_ctx->mmio = ioremap(resource_p->ipa_mem_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst, + resource_p->ipa_mem_size); + if (!ipa3_ctx->mmio) { + IPAERR(":ipa-base ioremap err\n"); + result = -EFAULT; + goto fail_remap; + } + + if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio, + ipa3_ctx->pdev)) { + IPAERR("fail to init ipahal\n"); + result = -EFAULT; + goto fail_ipahal; + } + + result = ipa3_init_hw(); + if (result) { + IPAERR(":error initializing HW\n"); + result = -ENODEV; + goto fail_init_hw; + } + IPADBG("IPA HW initialization sequence completed"); + + ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes(); + if (ipa3_ctx->ipa_num_pipes > IPA3_MAX_NUM_PIPES) { + IPAERR("IPA has more pipes then supported! has %d, max %d\n", + ipa3_ctx->ipa_num_pipes, IPA3_MAX_NUM_PIPES); + result = -ENODEV; + goto fail_init_hw; + } + + ipa3_ctx->ctrl->ipa_sram_read_settings(); + IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n", + ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes); + + IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n", + ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl, + ipa3_ctx->ip4_rt_tbl_nhash_lcl); + + IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n", + ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl); + + IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n", + ipa3_ctx->ip4_flt_tbl_hash_lcl, + ipa3_ctx->ip4_flt_tbl_nhash_lcl); + + IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n", + ipa3_ctx->ip6_flt_tbl_hash_lcl, + ipa3_ctx->ip6_flt_tbl_nhash_lcl); + + if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) { + IPAERR("SW expect more core memory, needed %d, avail %d\n", + ipa3_ctx->smem_reqd_sz, ipa3_ctx->smem_sz); + result = -ENOMEM; + goto fail_init_hw; + } + + mutex_init(&ipa3_ctx->ipa3_active_clients.mutex); + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE"); + ipa3_active_clients_log_inc(&log_info, false); + atomic_set(&ipa3_ctx->ipa3_active_clients.cnt, 1); + + /* Create workqueues for power management */ + ipa3_ctx->power_mgmt_wq = + create_singlethread_workqueue("ipa_power_mgmt"); + if (!ipa3_ctx->power_mgmt_wq) { + IPAERR("failed to create power mgmt wq\n"); + result = -ENOMEM; + goto fail_init_hw; + } + + ipa3_ctx->transport_power_mgmt_wq = + create_singlethread_workqueue("transport_power_mgmt"); + if (!ipa3_ctx->transport_power_mgmt_wq) { + IPAERR("failed to create transport power mgmt wq\n"); + result = -ENOMEM; + goto fail_create_transport_wq; + } + + mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex); + + /* init the lookaside cache */ + ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT", + sizeof(struct ipa3_flt_entry), 0, 0, NULL); + if (!ipa3_ctx->flt_rule_cache) { + IPAERR(":ipa flt cache create failed\n"); + result = -ENOMEM; + goto fail_flt_rule_cache; + } + ipa3_ctx->rt_rule_cache = kmem_cache_create("IPA_RT", + sizeof(struct ipa3_rt_entry), 0, 0, NULL); + if (!ipa3_ctx->rt_rule_cache) { + IPAERR(":ipa rt cache create failed\n"); + result = -ENOMEM; + goto fail_rt_rule_cache; + } + ipa3_ctx->hdr_cache = kmem_cache_create("IPA_HDR", + sizeof(struct ipa3_hdr_entry), 0, 0, NULL); + if (!ipa3_ctx->hdr_cache) { + IPAERR(":ipa hdr cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_cache; + } + ipa3_ctx->hdr_offset_cache = + kmem_cache_create("IPA_HDR_OFFSET", + sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL); + if (!ipa3_ctx->hdr_offset_cache) { + IPAERR(":ipa hdr off cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_offset_cache; + } + ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX", + sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL); + if (!ipa3_ctx->hdr_proc_ctx_cache) { + IPAERR(":ipa hdr proc ctx cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_proc_ctx_cache; + } + ipa3_ctx->hdr_proc_ctx_offset_cache = + kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET", + sizeof(struct ipa3_hdr_proc_ctx_offset_entry), 0, 0, NULL); + if (!ipa3_ctx->hdr_proc_ctx_offset_cache) { + IPAERR(":ipa hdr proc ctx off cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_proc_ctx_offset_cache; + } + ipa3_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL", + sizeof(struct ipa3_rt_tbl), 0, 0, NULL); + if (!ipa3_ctx->rt_tbl_cache) { + IPAERR(":ipa rt tbl cache create failed\n"); + result = -ENOMEM; + goto fail_rt_tbl_cache; + } + ipa3_ctx->tx_pkt_wrapper_cache = + kmem_cache_create("IPA_TX_PKT_WRAPPER", + sizeof(struct ipa3_tx_pkt_wrapper), 0, 0, NULL); + if (!ipa3_ctx->tx_pkt_wrapper_cache) { + IPAERR(":ipa tx pkt wrapper cache create failed\n"); + result = -ENOMEM; + goto fail_tx_pkt_wrapper_cache; + } + ipa3_ctx->rx_pkt_wrapper_cache = + kmem_cache_create("IPA_RX_PKT_WRAPPER", + sizeof(struct ipa3_rx_pkt_wrapper), 0, 0, NULL); + if (!ipa3_ctx->rx_pkt_wrapper_cache) { + IPAERR(":ipa rx pkt wrapper cache create failed\n"); + result = -ENOMEM; + goto fail_rx_pkt_wrapper_cache; + } + + /* allocate memory for DMA_TASK workaround */ + result = ipa3_allocate_dma_task_for_gsi(); + if (result) { + IPAERR("failed to allocate dma task\n"); + goto fail_dma_task; + } + + /* init the various list heads */ + INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list); + for (i = 0; i < IPA_HDR_BIN_MAX; i++) { + INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]); + INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]); + } + INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list); + for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) { + INIT_LIST_HEAD( + &ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i]); + INIT_LIST_HEAD( + &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i]); + } + INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list); + idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids); + INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list); + idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids); + + rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4]; + INIT_LIST_HEAD(&rset->head_rt_tbl_list); + idr_init(&rset->rule_ids); + rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6]; + INIT_LIST_HEAD(&rset->head_rt_tbl_list); + idr_init(&rset->rule_ids); + + INIT_LIST_HEAD(&ipa3_ctx->intf_list); + INIT_LIST_HEAD(&ipa3_ctx->msg_list); + INIT_LIST_HEAD(&ipa3_ctx->pull_msg_list); + init_waitqueue_head(&ipa3_ctx->msg_waitq); + mutex_init(&ipa3_ctx->msg_lock); + + mutex_init(&ipa3_ctx->lock); + mutex_init(&ipa3_ctx->nat_mem.lock); + + idr_init(&ipa3_ctx->ipa_idr); + spin_lock_init(&ipa3_ctx->idr_lock); + + /* wlan related member */ + memset(&ipa3_ctx->wc_memb, 0, sizeof(ipa3_ctx->wc_memb)); + spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock); + spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock); + INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list); + + ipa3_ctx->class = class_create(THIS_MODULE, DRV_NAME); + + result = alloc_chrdev_region(&ipa3_ctx->dev_num, 0, 1, DRV_NAME); + if (result) { + IPAERR("alloc_chrdev_region err\n"); + result = -ENODEV; + goto fail_alloc_chrdev_region; + } + + ipa3_ctx->dev = device_create(ipa3_ctx->class, NULL, ipa3_ctx->dev_num, + ipa3_ctx, DRV_NAME); + if (IS_ERR(ipa3_ctx->dev)) { + IPAERR(":device_create err\n"); + result = -ENODEV; + goto fail_device_create; + } + + if (ipa3_create_nat_device()) { + IPAERR("unable to create nat device\n"); + result = -ENODEV; + goto fail_nat_dev_add; + } + + /* Create a wakeup source. */ + wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS"); + spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock); + + /* Initialize IPA RM (resource manager) */ + result = ipa_rm_initialize(); + if (result) { + IPAERR("RM initialization failed (%d)\n", -result); + result = -ENODEV; + goto fail_ipa_rm_init; + } + IPADBG("IPA resource manager initialized"); + + result = ipa3_create_apps_resource(); + if (result) { + IPAERR("Failed to create APPS_CONS resource\n"); + result = -ENODEV; + goto fail_create_apps_resource; + } + + result = ipa3_alloc_pkt_init(); + if (result) { + IPAERR("Failed to alloc pkt_init payload\n"); + result = -ENODEV; + goto fail_create_apps_resource; + } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) + ipa3_enable_dcd(); + + INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list); + + init_completion(&ipa3_ctx->init_completion_obj); + init_completion(&ipa3_ctx->uc_loaded_completion_obj); + + /* + * We can't register the GSI driver yet, as it expects + * the GSI FW to be up and running before the registration. + * + * For IPA3.0, the GSI configuration is done by the GSI driver. + * For IPA3.1 (and on), the GSI configuration is done by TZ. + */ + if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) { + result = ipa3_gsi_pre_fw_load_init(); + if (result) { + IPAERR("gsi pre FW loading config failed\n"); + result = -ENODEV; + goto fail_ipa_init_interrupts; + } + } + + cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops); + ipa3_ctx->cdev.owner = THIS_MODULE; + ipa3_ctx->cdev.ops = &ipa3_drv_fops; /* from LDD3 */ + + result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1); + if (result) { + IPAERR(":cdev_add err=%d\n", -result); + result = -ENODEV; + goto fail_cdev_add; + } + IPADBG("ipa cdev added successful. major:%d minor:%d\n", + MAJOR(ipa3_ctx->dev_num), + MINOR(ipa3_ctx->dev_num)); + return 0; + +fail_cdev_add: +fail_ipa_init_interrupts: + ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS); +fail_create_apps_resource: + ipa_rm_exit(); +fail_ipa_rm_init: +fail_nat_dev_add: + device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num); +fail_device_create: + unregister_chrdev_region(ipa3_ctx->dev_num, 1); +fail_alloc_chrdev_region: + rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6]; + idr_destroy(&rset->rule_ids); + rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4]; + idr_destroy(&rset->rule_ids); + idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids); + idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids); + ipa3_free_dma_task_for_gsi(); +fail_dma_task: + idr_destroy(&ipa3_ctx->ipa_idr); + kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache); +fail_rx_pkt_wrapper_cache: + kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache); +fail_tx_pkt_wrapper_cache: + kmem_cache_destroy(ipa3_ctx->rt_tbl_cache); +fail_rt_tbl_cache: + kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache); +fail_hdr_proc_ctx_offset_cache: + kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache); +fail_hdr_proc_ctx_cache: + kmem_cache_destroy(ipa3_ctx->hdr_offset_cache); +fail_hdr_offset_cache: + kmem_cache_destroy(ipa3_ctx->hdr_cache); +fail_hdr_cache: + kmem_cache_destroy(ipa3_ctx->rt_rule_cache); +fail_rt_rule_cache: + kmem_cache_destroy(ipa3_ctx->flt_rule_cache); +fail_flt_rule_cache: + destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq); +fail_create_transport_wq: + destroy_workqueue(ipa3_ctx->power_mgmt_wq); +fail_init_hw: + ipahal_destroy(); +fail_ipahal: + iounmap(ipa3_ctx->mmio); +fail_remap: + ipa3_disable_clks(); + ipa3_active_clients_log_destroy(); +fail_init_active_client: + if (ipa3_clk) + clk_put(ipa3_clk); + ipa3_clk = NULL; +fail_clk: + msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl); +fail_bus_reg: + if (ipa3_bus_scale_table) { + msm_bus_cl_clear_pdata(ipa3_bus_scale_table); + ipa3_bus_scale_table = NULL; + } +fail_init_mem_partition: +fail_bind: + kfree(ipa3_ctx->ctrl); +fail_mem_ctrl: + kfree(ipa3_ctx->ipa_tz_unlock_reg); +fail_tz_unlock_reg: + if (ipa3_ctx->logbuf) + ipc_log_context_destroy(ipa3_ctx->logbuf); + kfree(ipa3_ctx); + ipa3_ctx = NULL; +fail_mem_ctx: + return result; +} + +static int get_ipa_dts_configuration(struct platform_device *pdev, + struct ipa3_plat_drv_res *ipa_drv_res) +{ + int i, result, pos; + struct resource *resource; + u32 *ipa_tz_unlock_reg; + int elem_num; + + /* initialize ipa3_res */ + ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST; + ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE; + ipa_drv_res->ipa_hw_type = 0; + ipa_drv_res->ipa3_hw_mode = 0; + ipa_drv_res->modem_cfg_emb_pipe_flt = false; + ipa_drv_res->ipa_wdi2 = false; + ipa_drv_res->use_64_bit_dma_mask = false; + ipa_drv_res->use_bw_vote = false; + ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ; + ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ; + ipa_drv_res->apply_rg10_wa = false; + ipa_drv_res->gsi_ch20_wa = false; + ipa_drv_res->ipa_tz_unlock_reg_num = 0; + ipa_drv_res->ipa_tz_unlock_reg = NULL; + + /* Get IPA HW Version */ + result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver", + &ipa_drv_res->ipa_hw_type); + if ((result) || (ipa_drv_res->ipa_hw_type == 0)) { + IPAERR(":get resource failed for ipa-hw-ver\n"); + return -ENODEV; + } + IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type); + + if (ipa_drv_res->ipa_hw_type < IPA_HW_v3_0) { + IPAERR(":IPA version below 3.0 not supported\n"); + return -ENODEV; + } + + /* Get IPA HW mode */ + result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode", + &ipa_drv_res->ipa3_hw_mode); + if (result) + IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n"); + else + IPADBG(": found ipa_drv_res->ipa3_hw_mode = %d", + ipa_drv_res->ipa3_hw_mode); + + /* Get IPA WAN / LAN RX pool size */ + result = of_property_read_u32(pdev->dev.of_node, + "qcom,wan-rx-ring-size", + &ipa_drv_res->wan_rx_ring_size); + if (result) + IPADBG("using default for wan-rx-ring-size = %u\n", + ipa_drv_res->wan_rx_ring_size); + else + IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u", + ipa_drv_res->wan_rx_ring_size); + + result = of_property_read_u32(pdev->dev.of_node, + "qcom,lan-rx-ring-size", + &ipa_drv_res->lan_rx_ring_size); + if (result) + IPADBG("using default for lan-rx-ring-size = %u\n", + ipa_drv_res->lan_rx_ring_size); + else + IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u", + ipa_drv_res->lan_rx_ring_size); + + ipa_drv_res->use_ipa_teth_bridge = + of_property_read_bool(pdev->dev.of_node, + "qcom,use-ipa-tethering-bridge"); + IPADBG(": using TBDr = %s", + ipa_drv_res->use_ipa_teth_bridge + ? "True" : "False"); + + ipa_drv_res->modem_cfg_emb_pipe_flt = + of_property_read_bool(pdev->dev.of_node, + "qcom,modem-cfg-emb-pipe-flt"); + IPADBG(": modem configure embedded pipe filtering = %s\n", + ipa_drv_res->modem_cfg_emb_pipe_flt + ? "True" : "False"); + + ipa_drv_res->ipa_wdi2 = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-wdi2"); + IPADBG(": WDI-2.0 = %s\n", + ipa_drv_res->ipa_wdi2 + ? "True" : "False"); + + ipa_drv_res->use_64_bit_dma_mask = + of_property_read_bool(pdev->dev.of_node, + "qcom,use-64-bit-dma-mask"); + IPADBG(": use_64_bit_dma_mask = %s\n", + ipa_drv_res->use_64_bit_dma_mask + ? "True" : "False"); + + ipa_drv_res->use_bw_vote = + of_property_read_bool(pdev->dev.of_node, + "qcom,bandwidth-vote-for-ipa"); + IPADBG(": use_bw_vote = %s\n", + ipa_drv_res->use_bw_vote + ? "True" : "False"); + + ipa_drv_res->skip_uc_pipe_reset = + of_property_read_bool(pdev->dev.of_node, + "qcom,skip-uc-pipe-reset"); + IPADBG(": skip uC pipe reset = %s\n", + ipa_drv_res->skip_uc_pipe_reset + ? "True" : "False"); + + ipa_drv_res->tethered_flow_control = + of_property_read_bool(pdev->dev.of_node, + "qcom,tethered-flow-control"); + IPADBG(": Use apps based flow control = %s\n", + ipa_drv_res->tethered_flow_control + ? "True" : "False"); + + /* Get IPA wrapper address */ + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ipa-base"); + if (!resource) { + IPAERR(":get resource failed for ipa-base!\n"); + return -ENODEV; + } + ipa_drv_res->ipa_mem_base = resource->start; + ipa_drv_res->ipa_mem_size = resource_size(resource); + IPADBG(": ipa-base = 0x%x, size = 0x%x\n", + ipa_drv_res->ipa_mem_base, + ipa_drv_res->ipa_mem_size); + + smmu_info.ipa_base = ipa_drv_res->ipa_mem_base; + smmu_info.ipa_size = ipa_drv_res->ipa_mem_size; + + /* Get IPA GSI address */ + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "gsi-base"); + if (!resource) { + IPAERR(":get resource failed for gsi-base\n"); + return -ENODEV; + } + ipa_drv_res->transport_mem_base = resource->start; + ipa_drv_res->transport_mem_size = resource_size(resource); + IPADBG(": gsi-base = 0x%x, size = 0x%x\n", + ipa_drv_res->transport_mem_base, + ipa_drv_res->transport_mem_size); + + /* Get IPA GSI IRQ number */ + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + "gsi-irq"); + if (!resource) { + IPAERR(":get resource failed for gsi-irq\n"); + return -ENODEV; + } + ipa_drv_res->transport_irq = resource->start; + IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq); + + /* Get IPA pipe mem start ofst */ + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ipa-pipe-mem"); + if (!resource) { + IPADBG(":not using pipe memory - resource nonexisting\n"); + } else { + ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start; + ipa_drv_res->ipa_pipe_mem_size = resource_size(resource); + IPADBG(":using pipe memory - at 0x%x of size 0x%x\n", + ipa_drv_res->ipa_pipe_mem_start_ofst, + ipa_drv_res->ipa_pipe_mem_size); + } + + /* Get IPA IRQ number */ + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + "ipa-irq"); + if (!resource) { + IPAERR(":get resource failed for ipa-irq\n"); + return -ENODEV; + } + ipa_drv_res->ipa_irq = resource->start; + IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq); + + result = of_property_read_u32(pdev->dev.of_node, "qcom,ee", + &ipa_drv_res->ee); + if (result) + ipa_drv_res->ee = 0; + + ipa_drv_res->apply_rg10_wa = + of_property_read_bool(pdev->dev.of_node, + "qcom,use-rg10-limitation-mitigation"); + IPADBG(": Use Register Group 10 limitation mitigation = %s\n", + ipa_drv_res->apply_rg10_wa + ? "True" : "False"); + + ipa_drv_res->gsi_ch20_wa = + of_property_read_bool(pdev->dev.of_node, + "qcom,do-not-use-ch-gsi-20"); + IPADBG(": GSI CH 20 WA is = %s\n", + ipa_drv_res->apply_rg10_wa + ? "Needed" : "Not needed"); + + elem_num = of_property_count_elems_of_size(pdev->dev.of_node, + "qcom,ipa-tz-unlock-reg", sizeof(u32)); + + if (elem_num > 0 && elem_num % 2 == 0) { + ipa_drv_res->ipa_tz_unlock_reg_num = elem_num / 2; + + ipa_tz_unlock_reg = kcalloc(elem_num, sizeof(u32), GFP_KERNEL); + if (ipa_tz_unlock_reg == NULL) + return -ENOMEM; + + ipa_drv_res->ipa_tz_unlock_reg = kcalloc( + ipa_drv_res->ipa_tz_unlock_reg_num, + sizeof(*ipa_drv_res->ipa_tz_unlock_reg), + GFP_KERNEL); + if (ipa_drv_res->ipa_tz_unlock_reg == NULL) { + kfree(ipa_tz_unlock_reg); + return -ENOMEM; + } + + if (of_property_read_u32_array(pdev->dev.of_node, + "qcom,ipa-tz-unlock-reg", ipa_tz_unlock_reg, + elem_num)) { + IPAERR("failed to read register addresses\n"); + kfree(ipa_tz_unlock_reg); + kfree(ipa_drv_res->ipa_tz_unlock_reg); + return -EFAULT; + } + + pos = 0; + for (i = 0; i < ipa_drv_res->ipa_tz_unlock_reg_num; i++) { + ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr = + ipa_tz_unlock_reg[pos++]; + ipa_drv_res->ipa_tz_unlock_reg[i].size = + ipa_tz_unlock_reg[pos++]; + IPADBG("tz unlock reg %d: addr 0x%pa size %d\n", i, + &ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr, + ipa_drv_res->ipa_tz_unlock_reg[i].size); + } + kfree(ipa_tz_unlock_reg); + } + return 0; +} + +static int ipa_smmu_wlan_cb_probe(struct device *dev) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx(); + int atomic_ctx = 1; + int fast = 1; + int bypass = 1; + int ret; + u32 add_map_size; + const u32 *add_map; + int i; + + IPADBG("sub pdev=%pK\n", dev); + + cb->dev = dev; + cb->iommu = iommu_domain_alloc(dev->bus); + if (!cb->iommu) { + IPAERR("could not alloc iommu domain\n"); + /* assume this failure is because iommu driver is not ready */ + return -EPROBE_DEFER; + } + cb->valid = true; + + if (smmu_info.s1_bypass) { + if (iommu_domain_set_attr(cb->iommu, + DOMAIN_ATTR_S1_BYPASS, + &bypass)) { + IPAERR("couldn't set bypass\n"); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU S1 BYPASS\n"); + } else { + if (iommu_domain_set_attr(cb->iommu, + DOMAIN_ATTR_ATOMIC, + &atomic_ctx)) { + IPAERR("couldn't disable coherent HTW\n"); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU ATTR ATOMIC\n"); + + if (smmu_info.fast_map) { + if (iommu_domain_set_attr(cb->iommu, + DOMAIN_ATTR_FAST, + &fast)) { + IPAERR("couldn't set fast map\n"); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU fast map set\n"); + } + } + + ret = iommu_attach_device(cb->iommu, dev); + if (ret) { + IPAERR("could not attach device ret=%d\n", ret); + cb->valid = false; + return ret; + } + /* MAP ipa-uc ram */ + add_map = of_get_property(dev->of_node, + "qcom,additional-mapping", &add_map_size); + if (add_map) { + /* mapping size is an array of 3-tuple of u32 */ + if (add_map_size % (3 * sizeof(u32))) { + IPAERR("wrong additional mapping format\n"); + cb->valid = false; + return -EFAULT; + } + + /* iterate of each entry of the additional mapping array */ + for (i = 0; i < add_map_size / sizeof(u32); i += 3) { + u32 iova = be32_to_cpu(add_map[i]); + u32 pa = be32_to_cpu(add_map[i + 1]); + u32 size = be32_to_cpu(add_map[i + 2]); + unsigned long iova_p; + phys_addr_t pa_p; + u32 size_p; + + IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, + iova_p, pa_p, size_p); + IPADBG("mapping 0x%lx to 0x%pa size %d\n", + iova_p, &pa_p, size_p); + ipa3_iommu_map(cb->iommu, + iova_p, pa_p, size_p, + IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); + } + } + return 0; +} + +static int ipa_smmu_uc_cb_probe(struct device *dev) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx(); + int atomic_ctx = 1; + int bypass = 1; + int fast = 1; + int ret; + u32 iova_ap_mapping[2]; + + IPADBG("UC CB PROBE sub pdev=%pK\n", dev); + + ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping", + iova_ap_mapping, 2); + if (ret) { + IPAERR("Fail to read UC start/size iova addresses\n"); + return ret; + } + cb->va_start = iova_ap_mapping[0]; + cb->va_size = iova_ap_mapping[1]; + cb->va_end = cb->va_start + cb->va_size; + IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size); + + if (smmu_info.use_64_bit_dma_mask) { + if (dma_set_mask(dev, DMA_BIT_MASK(64)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) { + IPAERR("DMA set 64bit mask failed\n"); + return -EOPNOTSUPP; + } + } else { + if (dma_set_mask(dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { + IPAERR("DMA set 32bit mask failed\n"); + return -EOPNOTSUPP; + } + } + IPADBG("UC CB PROBE=%pK create IOMMU mapping\n", dev); + + cb->dev = dev; + cb->mapping = arm_iommu_create_mapping(dev->bus, + cb->va_start, cb->va_size); + if (IS_ERR_OR_NULL(cb->mapping)) { + IPADBG("Fail to create mapping\n"); + /* assume this failure is because iommu driver is not ready */ + return -EPROBE_DEFER; + } + IPADBG("SMMU mapping created\n"); + cb->valid = true; + + IPADBG("UC CB PROBE sub pdev=%pK set attribute\n", dev); + if (smmu_info.s1_bypass) { + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_S1_BYPASS, + &bypass)) { + IPAERR("couldn't set bypass\n"); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU S1 BYPASS\n"); + } else { + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_ATOMIC, + &atomic_ctx)) { + IPAERR("couldn't set domain as atomic\n"); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU atomic set\n"); + + if (smmu_info.fast_map) { + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_FAST, + &fast)) { + IPAERR("couldn't set fast map\n"); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU fast map set\n"); + } + } + + IPADBG("UC CB PROBE sub pdev=%pK attaching IOMMU device\n", dev); + ret = arm_iommu_attach_device(cb->dev, cb->mapping); + if (ret) { + IPAERR("could not attach device ret=%d\n", ret); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return ret; + } + + cb->next_addr = cb->va_end; + ipa3_ctx->uc_pdev = dev; + + return 0; +} + +static int ipa_smmu_ap_cb_probe(struct device *dev) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(); + int result; + int atomic_ctx = 1; + int fast = 1; + int bypass = 1; + u32 iova_ap_mapping[2]; + u32 add_map_size; + const u32 *add_map; + void *smem_addr; + size_t smem_size; + int ret; + int i; + unsigned long iova_p; + phys_addr_t pa_p; + u32 size_p; + phys_addr_t iova; + phys_addr_t pa; + + IPADBG("AP CB probe: sub pdev=%pK\n", dev); + + result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping", + iova_ap_mapping, 2); + if (result) { + IPAERR("Fail to read AP start/size iova addresses\n"); + return result; + } + cb->va_start = iova_ap_mapping[0]; + cb->va_size = iova_ap_mapping[1]; + cb->va_end = cb->va_start + cb->va_size; + IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size); + + if (smmu_info.use_64_bit_dma_mask) { + if (dma_set_mask(dev, DMA_BIT_MASK(64)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) { + IPAERR("DMA set 64bit mask failed\n"); + return -EOPNOTSUPP; + } + } else { + if (dma_set_mask(dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { + IPAERR("DMA set 32bit mask failed\n"); + return -EOPNOTSUPP; + } + } + + cb->dev = dev; + cb->mapping = arm_iommu_create_mapping(dev->bus, + cb->va_start, cb->va_size); + if (IS_ERR_OR_NULL(cb->mapping)) { + IPADBG("Fail to create mapping\n"); + /* assume this failure is because iommu driver is not ready */ + return -EPROBE_DEFER; + } + IPADBG("SMMU mapping created\n"); + cb->valid = true; + + if (smmu_info.s1_bypass) { + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_S1_BYPASS, + &bypass)) { + IPAERR("couldn't set bypass\n"); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU S1 BYPASS\n"); + } else { + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_ATOMIC, + &atomic_ctx)) { + IPAERR("couldn't set domain as atomic\n"); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU atomic set\n"); + + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_FAST, + &fast)) { + IPAERR("couldn't set fast map\n"); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU fast map set\n"); + } + + result = arm_iommu_attach_device(cb->dev, cb->mapping); + if (result) { + IPAERR("couldn't attach to IOMMU ret=%d\n", result); + cb->valid = false; + return result; + } + + add_map = of_get_property(dev->of_node, + "qcom,additional-mapping", &add_map_size); + if (add_map) { + /* mapping size is an array of 3-tuple of u32 */ + if (add_map_size % (3 * sizeof(u32))) { + IPAERR("wrong additional mapping format\n"); + cb->valid = false; + return -EFAULT; + } + + /* iterate of each entry of the additional mapping array */ + for (i = 0; i < add_map_size / sizeof(u32); i += 3) { + u32 iova = be32_to_cpu(add_map[i]); + u32 pa = be32_to_cpu(add_map[i + 1]); + u32 size = be32_to_cpu(add_map[i + 2]); + unsigned long iova_p; + phys_addr_t pa_p; + u32 size_p; + + IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, + iova_p, pa_p, size_p); + IPADBG("mapping 0x%lx to 0x%pa size %d\n", + iova_p, &pa_p, size_p); + ipa3_iommu_map(cb->mapping->domain, + iova_p, pa_p, size_p, + IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); + } + } + + /* map SMEM memory for IPA table accesses */ + ret = qcom_smem_alloc(SMEM_MODEM, + SMEM_IPA_FILTER_TABLE, + IPA_SMEM_SIZE); + + if (ret < 0 && ret != -EEXIST) { + IPAERR("unable to allocate smem MODEM entry\n"); + cb->valid = false; + return -EFAULT; + } + smem_addr = qcom_smem_get(SMEM_MODEM, + SMEM_IPA_FILTER_TABLE, + &smem_size); + if (IS_ERR(smem_addr)) { + IPAERR("unable to acquire smem MODEM entry\n"); + cb->valid = false; + return -EFAULT; + } + + iova = qcom_smem_virt_to_phys(smem_addr); + pa = iova; + + IPA_SMMU_ROUND_TO_PAGE(iova, pa, IPA_SMEM_SIZE, + iova_p, pa_p, size_p); + IPADBG("mapping 0x%lx to 0x%pa size %d\n", + iova_p, &pa_p, size_p); + ipa3_iommu_map(cb->mapping->domain, + iova_p, pa_p, size_p, + IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); + + smmu_info.present = true; + + if (!ipa3_bus_scale_table) + ipa3_bus_scale_table = msm_bus_cl_get_pdata(ipa3_pdev); + + /* Proceed to real initialization */ + result = ipa3_pre_init(&ipa3_res, dev); + if (result) { + IPAERR("ipa_init failed\n"); + arm_iommu_detach_device(cb->dev); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return result; + } + + return result; +} + +static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt) +{ + ipa3_freeze_clock_vote_and_notify_modem(); + + return IRQ_HANDLED; +} + +static int ipa3_smp2p_probe(struct device *dev) +{ + struct device_node *node = dev->of_node; + int res; + + if (ipa3_ctx == NULL) { + IPAERR("ipa3_ctx was not initialized\n"); + return -ENXIO; + } + IPADBG("node->name=%s\n", node->name); + if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) { + res = of_get_gpio(node, 0); + if (res < 0) { + IPADBG("of_get_gpio returned %d\n", res); + return res; + } + + ipa3_ctx->smp2p_info.out_base_id = res; + IPADBG("smp2p out_base_id=%d\n", + ipa3_ctx->smp2p_info.out_base_id); + } else if (strcmp("qcom,smp2pgpio_map_ipa_1_in", node->name) == 0) { + int irq; + + res = of_get_gpio(node, 0); + if (res < 0) { + IPADBG("of_get_gpio returned %d\n", res); + return res; + } + + ipa3_ctx->smp2p_info.in_base_id = res; + IPADBG("smp2p in_base_id=%d\n", + ipa3_ctx->smp2p_info.in_base_id); + + /* register for modem clk query */ + irq = gpio_to_irq(ipa3_ctx->smp2p_info.in_base_id + + IPA_GPIO_IN_QUERY_CLK_IDX); + if (irq < 0) { + IPAERR("gpio_to_irq failed %d\n", irq); + return -ENODEV; + } + IPADBG("smp2p irq#=%d\n", irq); + res = request_irq(irq, + (irq_handler_t)ipa3_smp2p_modem_clk_query_isr, + IRQF_TRIGGER_RISING, "ipa_smp2p_clk_vote", dev); + if (res) { + IPAERR("fail to register smp2p irq=%d\n", irq); + return -ENODEV; + } + res = enable_irq_wake(ipa3_ctx->smp2p_info.in_base_id + + IPA_GPIO_IN_QUERY_CLK_IDX); + if (res) + IPAERR("failed to enable irq wake\n"); + } + + return 0; +} + +int ipa3_plat_drv_probe(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + int result; + struct device *dev = &pdev_p->dev; + + IPADBG("IPA driver probing started\n"); + IPADBG("dev->of_node->name = %s\n", dev->of_node->name); + + if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb")) + return ipa_smmu_ap_cb_probe(dev); + + if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb")) + return ipa_smmu_wlan_cb_probe(dev); + + if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb")) + return ipa_smmu_uc_cb_probe(dev); + + if (of_device_is_compatible(dev->of_node, + "qcom,smp2pgpio-map-ipa-1-in")) + return ipa3_smp2p_probe(dev); + + if (of_device_is_compatible(dev->of_node, + "qcom,smp2pgpio-map-ipa-1-out")) + return ipa3_smp2p_probe(dev); + + master_dev = dev; + if (!ipa3_pdev) + ipa3_pdev = pdev_p; + + result = get_ipa_dts_configuration(pdev_p, &ipa3_res); + if (result) { + IPAERR("IPA dts parsing failed\n"); + return result; + } + + result = ipa3_bind_api_controller(ipa3_res.ipa_hw_type, api_ctrl); + if (result) { + IPAERR("IPA API binding failed\n"); + return result; + } + + if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) { + if (of_property_read_bool(pdev_p->dev.of_node, + "qcom,smmu-s1-bypass")) + smmu_info.s1_bypass = true; + if (of_property_read_bool(pdev_p->dev.of_node, + "qcom,smmu-fast-map")) + smmu_info.fast_map = true; + if (of_property_read_bool(pdev_p->dev.of_node, + "qcom,use-64-bit-dma-mask")) + smmu_info.use_64_bit_dma_mask = true; + smmu_info.arm_smmu = true; + pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n", + smmu_info.s1_bypass, smmu_info.fast_map); + } else if (of_property_read_bool(pdev_p->dev.of_node, + "qcom,msm-smmu")) { + IPAERR("Legacy IOMMU not supported\n"); + result = -EOPNOTSUPP; + } else { + if (of_property_read_bool(pdev_p->dev.of_node, + "qcom,use-64-bit-dma-mask")) { + if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) || + dma_set_coherent_mask(&pdev_p->dev, + DMA_BIT_MASK(64))) { + IPAERR("DMA set 64bit mask failed\n"); + return -EOPNOTSUPP; + } + } else { + if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(&pdev_p->dev, + DMA_BIT_MASK(32))) { + IPAERR("DMA set 32bit mask failed\n"); + return -EOPNOTSUPP; + } + } + + if (!ipa3_bus_scale_table) + ipa3_bus_scale_table = msm_bus_cl_get_pdata(pdev_p); + /* Proceed to real initialization */ + result = ipa3_pre_init(&ipa3_res, dev); + if (result) { + IPAERR("ipa3_init failed\n"); + return result; + } + } + + result = of_platform_populate(pdev_p->dev.of_node, + pdrv_match, NULL, &pdev_p->dev); + if (result) { + IPAERR("failed to populate platform\n"); + return result; + } + + return result; +} + +/** + * ipa3_ap_suspend() - suspend callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP suspend + * operation is invoked, usually by pressing a suspend button. + * + * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP. + * This will postpone the suspend operation until IPA is no longer used by AP. + */ +int ipa3_ap_suspend(struct device *dev) +{ + int i; + + IPADBG("Enter...\n"); + + /* In case there is a tx/rx handler in polling mode fail to suspend */ + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (ipa3_ctx->ep[i].sys && + atomic_read(&ipa3_ctx->ep[i].sys->curr_polling_state)) { + IPAERR("EP %d is in polling state, do not suspend\n", + i); + return -EAGAIN; + } + } + + /* + * Release transport IPA resource without waiting for inactivity timer + */ + atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0); + ipa3_transport_release_resource(NULL); + IPADBG("Exit\n"); + + return 0; +} + +/** + * ipa3_ap_resume() - resume callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP resume + * operation is invoked. + * + * Always returns 0 since resume should always succeed. + */ +int ipa3_ap_resume(struct device *dev) +{ + return 0; +} + +struct ipa3_context *ipa3_get_ctx(void) +{ + return ipa3_ctx; +} + +static void ipa_gsi_notify_cb(struct gsi_per_notify *notify) +{ + /* + * These values are reported by hardware. Any error indicates + * hardware unexpected state. + */ + switch (notify->evt_id) { + case GSI_PER_EVT_GLOB_ERROR: + IPAERR("Got GSI_PER_EVT_GLOB_ERROR\n"); + IPAERR("Err_desc = 0x%04x\n", notify->data.err_desc); + break; + case GSI_PER_EVT_GLOB_GP1: + IPAERR("Got GSI_PER_EVT_GLOB_GP1\n"); + BUG(); + break; + case GSI_PER_EVT_GLOB_GP2: + IPAERR("Got GSI_PER_EVT_GLOB_GP2\n"); + BUG(); + break; + case GSI_PER_EVT_GLOB_GP3: + IPAERR("Got GSI_PER_EVT_GLOB_GP3\n"); + BUG(); + break; + case GSI_PER_EVT_GENERAL_BREAK_POINT: + IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n"); + break; + case GSI_PER_EVT_GENERAL_BUS_ERROR: + IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n"); + BUG(); + break; + case GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW: + IPAERR("Got GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW\n"); + BUG(); + break; + case GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW: + IPAERR("Got GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW\n"); + BUG(); + break; + default: + IPAERR("Received unexpected evt: %d\n", + notify->evt_id); + BUG(); + } +} + +int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data) +{ + struct ipa3_ready_cb_info *cb_info = NULL; + + /* check ipa3_ctx existed or not */ + if (!ipa3_ctx) { + IPADBG("IPA driver haven't initialized\n"); + return -ENXIO; + } + mutex_lock(&ipa3_ctx->lock); + if (ipa3_ctx->ipa_initialization_complete) { + mutex_unlock(&ipa3_ctx->lock); + IPADBG("IPA driver finished initialization already\n"); + return -EEXIST; + } + + cb_info = kmalloc(sizeof(struct ipa3_ready_cb_info), GFP_KERNEL); + if (!cb_info) { + mutex_unlock(&ipa3_ctx->lock); + return -ENOMEM; + } + + cb_info->ready_cb = ipa_ready_cb; + cb_info->user_data = user_data; + + list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list); + mutex_unlock(&ipa3_ctx->lock); + + return 0; +} + +int ipa3_iommu_map(struct iommu_domain *domain, + unsigned long iova, phys_addr_t paddr, size_t size, int prot) +{ + struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx(); + struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_uc_smmu_ctx(); + + IPADBG("domain =0x%pK iova 0x%lx\n", domain, iova); + IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size); + + /* make sure no overlapping */ + if (domain == ipa3_get_smmu_domain()) { + if (iova >= ap_cb->va_start && iova < ap_cb->va_end) { + IPAERR("iommu AP overlap addr 0x%lx\n", iova); + ipa_assert(); + return -EFAULT; + } + } else if (domain == ipa3_get_wlan_smmu_domain()) { + /* wlan is one time map */ + } else if (domain == ipa3_get_uc_smmu_domain()) { + if (iova >= uc_cb->va_start && iova < uc_cb->va_end) { + IPAERR("iommu uC overlap addr 0x%lx\n", iova); + ipa_assert(); + return -EFAULT; + } + } else { + IPAERR("Unexpected domain 0x%pK\n", domain); + ipa_assert(); + return -EFAULT; + } + + return iommu_map(domain, iova, paddr, size, prot); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA HW device driver"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c new file mode 100644 index 000000000000..8bb053b3360a --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c @@ -0,0 +1,1501 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include "ipa_i.h" +#include "linux/msm_gsi.h" + +/* + * These values were determined empirically and shows good E2E bi- + * directional throughputs + */ +#define IPA_HOLB_TMR_EN 0x1 +#define IPA_HOLB_TMR_DIS 0x0 +#define IPA_HOLB_TMR_DEFAULT_VAL 0x1ff +#define IPA_POLL_AGGR_STATE_RETRIES_NUM 3 +#define IPA_POLL_AGGR_STATE_SLEEP_MSEC 1 + +#define IPA_PKT_FLUSH_TO_US 100 + +#define IPA_POLL_FOR_EMPTINESS_NUM 50 +#define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20 +#define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5 +#define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200 + +/* xfer_rsc_idx should be 7 bits */ +#define IPA_XFER_RSC_IDX_MAX 127 + +static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep, + bool *is_empty); + +int ipa3_enable_data_path(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl]; + struct ipa_ep_cfg_holb holb_cfg; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + int res = 0; + struct ipahal_reg_endp_init_rsrc_grp rsrc_grp; + + /* Assign the resource group for pipe */ + memset(&rsrc_grp, 0, sizeof(rsrc_grp)); + rsrc_grp.rsrc_grp = ipa_get_ep_group(ep->client); + if (rsrc_grp.rsrc_grp == -1) { + IPAERR("invalid group for client %d\n", ep->client); + WARN_ON(1); + return -EFAULT; + } + + IPADBG("Setting group %d for pipe %d\n", + rsrc_grp.rsrc_grp, clnt_hdl); + ipahal_write_reg_n_fields(IPA_ENDP_INIT_RSRC_GRP_n, clnt_hdl, + &rsrc_grp); + + IPADBG("Enabling data path\n"); + if (IPA_CLIENT_IS_CONS(ep->client)) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_DIS; + holb_cfg.tmr_val = 0; + res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg); + } + + /* Enable the pipe */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + if (IPA_CLIENT_IS_CONS(ep->client) && + (ep->keep_ipa_awake || + ipa3_ctx->resume_on_connect[ep->client] || + !ipa3_should_pipe_be_suspended(ep->client))) { + memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_suspend = false; + res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } + } + + return res; +} + +int ipa3_disable_data_path(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl]; + struct ipa_ep_cfg_holb holb_cfg; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + struct ipa_ep_cfg_aggr ep_aggr; + int res = 0; + + IPADBG("Disabling data path\n"); + if (IPA_CLIENT_IS_CONS(ep->client)) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_EN; + holb_cfg.tmr_val = 0; + res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg); + } + + /* + * for IPA 4.0 and above aggregation frame is closed together with + * channel STOP + */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + /* Suspend the pipe */ + if (IPA_CLIENT_IS_CONS(ep->client)) { + /* + * for RG10 workaround uC needs to be loaded before + * pipe can be suspended in this case. + */ + if (ipa3_ctx->apply_rg10_wa && ipa3_uc_state_check()) { + IPADBG("uC is not loaded yet, waiting...\n"); + res = wait_for_completion_timeout( + &ipa3_ctx->uc_loaded_completion_obj, + 60 * HZ); + if (res == 0) + IPADBG("timeout waiting for uC load\n"); + } + + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_suspend = true; + res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } + + udelay(IPA_PKT_FLUSH_TO_US); + ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, + &ep_aggr); + if (ep_aggr.aggr_en) { + res = ipa3_tag_aggr_force_close(clnt_hdl); + if (res) { + IPAERR("tag process timeout client:%d err:%d\n", + clnt_hdl, res); + ipa_assert(); + } + } + } + + return res; +} + +static void ipa_chan_err_cb(struct gsi_chan_err_notify *notify) +{ + /* + * These are the errors that hardware has returned, + * which indicates hardware unexpected state. + */ + if (notify) { + switch (notify->evt_id) { + case GSI_CHAN_INVALID_TRE_ERR: + IPAERR("Received GSI_CHAN_INVALID_TRE_ERR\n"); + break; + case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR: + IPAERR("Received GSI_CHAN_NON_ALLOC_EVT_ACCESS_ERR\n"); + break; + case GSI_CHAN_OUT_OF_BUFFERS_ERR: + IPAERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n"); + break; + case GSI_CHAN_OUT_OF_RESOURCES_ERR: + IPAERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n"); + break; + case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR: + IPAERR("Received GSI_CHAN_UNSUPP_INTER_EE_OP_ERR\n"); + break; + case GSI_CHAN_HWO_1_ERR: + IPAERR("Received GSI_CHAN_HWO_1_ERR\n"); + break; + default: + IPAERR("Unexpected err evt: %d\n", notify->evt_id); + } + BUG(); + } +} + +static void ipa_xfer_cb(struct gsi_chan_xfer_notify *notify) +{ +} + +static int ipa3_reconfigure_channel_to_gpi(struct ipa3_ep_context *ep, + struct gsi_chan_props *orig_chan_props, + struct ipa_mem_buffer *chan_dma) +{ + struct gsi_chan_props chan_props; + enum gsi_status gsi_res; + dma_addr_t chan_dma_addr; + int result; + + /* Set up channel properties */ + memset(&chan_props, 0, sizeof(struct gsi_chan_props)); + chan_props.prot = GSI_CHAN_PROT_GPI; + chan_props.dir = GSI_CHAN_DIR_FROM_GSI; + chan_props.ch_id = orig_chan_props->ch_id; + chan_props.evt_ring_hdl = orig_chan_props->evt_ring_hdl; + chan_props.re_size = GSI_CHAN_RE_SIZE_16B; + chan_props.ring_len = 2 * GSI_CHAN_RE_SIZE_16B; + chan_props.ring_base_vaddr = + dma_alloc_coherent(ipa3_ctx->pdev, chan_props.ring_len, + &chan_dma_addr, 0); + chan_props.ring_base_addr = chan_dma_addr; + chan_dma->base = chan_props.ring_base_vaddr; + chan_dma->phys_base = chan_props.ring_base_addr; + chan_dma->size = chan_props.ring_len; + chan_props.use_db_eng = GSI_CHAN_DIRECT_MODE; + chan_props.max_prefetch = GSI_ONE_PREFETCH_SEG; + chan_props.low_weight = 1; + chan_props.chan_user_data = NULL; + chan_props.err_cb = ipa_chan_err_cb; + chan_props.xfer_cb = ipa_xfer_cb; + + gsi_res = gsi_set_channel_cfg(ep->gsi_chan_hdl, &chan_props, NULL); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error setting channel properties\n"); + result = -EFAULT; + goto set_chan_cfg_fail; + } + + return 0; + +set_chan_cfg_fail: + dma_free_coherent(ipa3_ctx->pdev, chan_dma->size, + chan_dma->base, chan_dma->phys_base); + return result; + +} + +static int ipa3_restore_channel_properties(struct ipa3_ep_context *ep, + struct gsi_chan_props *chan_props, + union gsi_channel_scratch *chan_scratch) +{ + enum gsi_status gsi_res; + + gsi_res = gsi_set_channel_cfg(ep->gsi_chan_hdl, chan_props, + chan_scratch); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error restoring channel properties\n"); + return -EFAULT; + } + + return 0; +} + +static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl, + struct ipa3_ep_context *ep) +{ + int result = -EFAULT; + enum gsi_status gsi_res; + struct gsi_chan_props orig_chan_props; + union gsi_channel_scratch orig_chan_scratch; + struct ipa_mem_buffer chan_dma; + void *buff; + dma_addr_t dma_addr; + struct gsi_xfer_elem xfer_elem; + int i; + int aggr_active_bitmap = 0; + bool pipe_suspended = false; + struct ipa_ep_cfg_ctrl ctrl; + + IPADBG("Applying reset channel with open aggregation frame WA\n"); + ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl)); + + /* Reset channel */ + gsi_res = gsi_reset_channel(ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error resetting channel: %d\n", gsi_res); + return -EFAULT; + } + + /* Reconfigure channel to dummy GPI channel */ + memset(&orig_chan_props, 0, sizeof(struct gsi_chan_props)); + memset(&orig_chan_scratch, 0, sizeof(union gsi_channel_scratch)); + gsi_res = gsi_get_channel_cfg(ep->gsi_chan_hdl, &orig_chan_props, + &orig_chan_scratch); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error getting channel properties: %d\n", gsi_res); + return -EFAULT; + } + memset(&chan_dma, 0, sizeof(struct ipa_mem_buffer)); + result = ipa3_reconfigure_channel_to_gpi(ep, &orig_chan_props, + &chan_dma); + if (result) + return -EFAULT; + + ipahal_read_reg_n_fields(IPA_ENDP_INIT_CTRL_n, clnt_hdl, &ctrl); + if (ctrl.ipa_ep_suspend) { + IPADBG("pipe is suspended, remove suspend\n"); + pipe_suspended = true; + ctrl.ipa_ep_suspend = false; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, + clnt_hdl, &ctrl); + } + + /* Start channel and put 1 Byte descriptor on it */ + gsi_res = gsi_start_channel(ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error starting channel: %d\n", gsi_res); + goto start_chan_fail; + } + + memset(&xfer_elem, 0, sizeof(struct gsi_xfer_elem)); + buff = dma_alloc_coherent(ipa3_ctx->pdev, 1, &dma_addr, + GFP_KERNEL); + xfer_elem.addr = dma_addr; + xfer_elem.len = 1; + xfer_elem.flags = GSI_XFER_FLAG_EOT; + xfer_elem.type = GSI_XFER_ELEM_DATA; + + gsi_res = gsi_queue_xfer(ep->gsi_chan_hdl, 1, &xfer_elem, + true); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error queueing xfer: %d\n", gsi_res); + result = -EFAULT; + goto queue_xfer_fail; + } + + /* Wait for aggregation frame to be closed and stop channel*/ + for (i = 0; i < IPA_POLL_AGGR_STATE_RETRIES_NUM; i++) { + aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE); + if (!(aggr_active_bitmap & (1 << clnt_hdl))) + break; + msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC); + } + + if (aggr_active_bitmap & (1 << clnt_hdl)) { + IPAERR("Failed closing aggr frame for client: %d\n", + clnt_hdl); + /* Unexpected hardware state */ + BUG(); + } + + dma_free_coherent(ipa3_ctx->pdev, 1, buff, dma_addr); + + result = ipa3_stop_gsi_channel(clnt_hdl); + if (result) { + IPAERR("Error stopping channel: %d\n", result); + goto start_chan_fail; + } + + /* Reset channel */ + gsi_res = gsi_reset_channel(ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error resetting channel: %d\n", gsi_res); + result = -EFAULT; + goto start_chan_fail; + } + + /* + * Need to sleep for 1ms as required by H/W verified + * sequence for resetting GSI channel + */ + msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC); + + if (pipe_suspended) { + IPADBG("suspend the pipe again\n"); + ctrl.ipa_ep_suspend = true; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, + clnt_hdl, &ctrl); + } + + /* Restore channels properties */ + result = ipa3_restore_channel_properties(ep, &orig_chan_props, + &orig_chan_scratch); + if (result) + goto restore_props_fail; + dma_free_coherent(ipa3_ctx->pdev, chan_dma.size, + chan_dma.base, chan_dma.phys_base); + + return 0; + +queue_xfer_fail: + ipa3_stop_gsi_channel(clnt_hdl); + dma_free_coherent(ipa3_ctx->pdev, 1, buff, dma_addr); +start_chan_fail: + if (pipe_suspended) { + IPADBG("suspend the pipe again\n"); + ctrl.ipa_ep_suspend = true; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, + clnt_hdl, &ctrl); + } + ipa3_restore_channel_properties(ep, &orig_chan_props, + &orig_chan_scratch); +restore_props_fail: + dma_free_coherent(ipa3_ctx->pdev, chan_dma.size, + chan_dma.base, chan_dma.phys_base); + return result; +} + +int ipa3_reset_gsi_channel(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + int result = -EFAULT; + enum gsi_status gsi_res; + int aggr_active_bitmap = 0; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameter\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + /* + * Check for open aggregation frame on Consumer EP - + * reset with open aggregation frame WA + */ + if (IPA_CLIENT_IS_CONS(ep->client)) { + aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE); + if (aggr_active_bitmap & (1 << clnt_hdl)) { + result = ipa3_reset_with_open_aggr_frame_wa(clnt_hdl, + ep); + if (result) + goto reset_chan_fail; + goto finish_reset; + } + } + + /* + * Reset channel + * If the reset called after stop, need to wait 1ms + */ + msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC); + gsi_res = gsi_reset_channel(ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error resetting channel: %d\n", gsi_res); + result = -EFAULT; + goto reset_chan_fail; + } + +finish_reset: + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("exit\n"); + return 0; + +reset_chan_fail: + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + +int ipa3_reset_gsi_event_ring(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + int result = -EFAULT; + enum gsi_status gsi_res; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + /* Reset event ring */ + gsi_res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error resetting event: %d\n", gsi_res); + result = -EFAULT; + goto reset_evt_fail; + } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("exit\n"); + return 0; + +reset_evt_fail: + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + +static bool ipa3_is_legal_params(struct ipa_request_gsi_channel_params *params) +{ + if (params->client >= IPA_CLIENT_MAX) + return false; + else + return true; +} + +int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map) +{ + struct iommu_domain *smmu_domain; + int res; + + if (ipa3_ctx->smmu_s1_bypass) + return 0; + + smmu_domain = ipa3_get_smmu_domain(); + if (!smmu_domain) { + IPAERR("invalid smmu domain\n"); + return -EINVAL; + } + + if (map) { + res = ipa3_iommu_map(smmu_domain, phys_addr, phys_addr, + PAGE_SIZE, IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); + } else { + res = iommu_unmap(smmu_domain, phys_addr, PAGE_SIZE); + res = (res != PAGE_SIZE); + } + if (res) { + IPAERR("Fail to %s reg 0x%pa\n", map ? "map" : "unmap", + &phys_addr); + return -EINVAL; + } + + IPADBG("Peer reg 0x%pa %s\n", &phys_addr, map ? "map" : "unmap"); + + return 0; +} + +int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr, u32 size, bool map) +{ + struct iommu_domain *smmu_domain; + int res; + + if (ipa3_ctx->smmu_s1_bypass) + return 0; + + smmu_domain = ipa3_get_smmu_domain(); + if (!smmu_domain) { + IPAERR("invalid smmu domain\n"); + return -EINVAL; + } + + if (map) { + res = ipa3_iommu_map(smmu_domain, + rounddown(iova, PAGE_SIZE), + rounddown(phys_addr, PAGE_SIZE), + roundup(size + iova - rounddown(iova, PAGE_SIZE), + PAGE_SIZE), + IOMMU_READ | IOMMU_WRITE); + if (res) { + IPAERR("Fail to map 0x%llx->0x%pa\n", iova, &phys_addr); + return -EINVAL; + } + } else { + res = iommu_unmap(smmu_domain, + rounddown(iova, PAGE_SIZE), + roundup(size + iova - rounddown(iova, PAGE_SIZE), + PAGE_SIZE)); + if (res != roundup(size + iova - rounddown(iova, PAGE_SIZE), + PAGE_SIZE)) { + IPAERR("Fail to unmap 0x%llx->0x%pa\n", + iova, &phys_addr); + return -EINVAL; + } + } + + IPADBG("Peer buff %s 0x%llx->0x%pa\n", map ? "map" : "unmap", + iova, &phys_addr); + + return 0; +} + + +int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params, + struct ipa_req_chan_out_params *out_params) +{ + int ipa_ep_idx; + int result = -EFAULT; + struct ipa3_ep_context *ep; + struct ipahal_reg_ep_cfg_status ep_status; + unsigned long gsi_dev_hdl; + enum gsi_status gsi_res; + const struct ipa_gsi_ep_config *gsi_ep_cfg_ptr; + + IPADBG("entry\n"); + if (params == NULL || out_params == NULL || + !ipa3_is_legal_params(params)) { + IPAERR("bad parameters\n"); + return -EINVAL; + } + + ipa_ep_idx = ipa3_get_ep_mapping(params->client); + if (ipa_ep_idx == -1) { + IPAERR("fail to alloc EP.\n"); + goto fail; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (ep->valid) { + IPAERR("EP already allocated.\n"); + goto fail; + } + + memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + ep->skip_ep_cfg = params->skip_ep_cfg; + ep->valid = 1; + ep->client = params->client; + ep->client_notify = params->notify; + ep->priv = params->priv; + ep->keep_ipa_awake = params->keep_ipa_awake; + + if (!ep->skip_ep_cfg) { + if (ipa3_cfg_ep(ipa_ep_idx, ¶ms->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto ipa_cfg_ep_fail; + } + /* Setting EP status 0 */ + memset(&ep_status, 0, sizeof(ep_status)); + if (ipa3_cfg_ep_status(ipa_ep_idx, &ep_status)) { + IPAERR("fail to configure status of EP.\n"); + goto ipa_cfg_ep_fail; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("Skipping endpoint configuration.\n"); + } + + out_params->clnt_hdl = ipa_ep_idx; + + result = ipa3_enable_data_path(out_params->clnt_hdl); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", result, + out_params->clnt_hdl); + goto ipa_cfg_ep_fail; + } + + gsi_dev_hdl = ipa3_ctx->gsi_dev_hdl; + gsi_res = gsi_alloc_evt_ring(¶ms->evt_ring_params, gsi_dev_hdl, + &ep->gsi_evt_ring_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error allocating event ring: %d\n", gsi_res); + result = -EFAULT; + goto ipa_cfg_ep_fail; + } + + gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl, + params->evt_scratch); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error writing event ring scratch: %d\n", gsi_res); + result = -EFAULT; + goto write_evt_scratch_fail; + } + + gsi_ep_cfg_ptr = ipa3_get_gsi_ep_info(ep->client); + if (gsi_ep_cfg_ptr == NULL) { + IPAERR("Error ipa3_get_gsi_ep_info ret NULL\n"); + result = -EFAULT; + goto write_evt_scratch_fail; + } + + params->chan_params.evt_ring_hdl = ep->gsi_evt_ring_hdl; + params->chan_params.ch_id = gsi_ep_cfg_ptr->ipa_gsi_chan_num; + gsi_res = gsi_alloc_channel(¶ms->chan_params, gsi_dev_hdl, + &ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error allocating channel: %d, chan_id: %d\n", gsi_res, + params->chan_params.ch_id); + result = -EFAULT; + goto write_evt_scratch_fail; + } + + memcpy(&ep->chan_scratch, ¶ms->chan_scratch, + sizeof(union __packed gsi_channel_scratch)); + ep->chan_scratch.xdci.max_outstanding_tre = + params->chan_params.re_size * gsi_ep_cfg_ptr->ipa_if_tlv; + gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl, + params->chan_scratch); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error writing channel scratch: %d\n", gsi_res); + result = -EFAULT; + goto write_chan_scratch_fail; + } + + gsi_res = gsi_query_channel_db_addr(ep->gsi_chan_hdl, + &out_params->db_reg_phs_addr_lsb, + &out_params->db_reg_phs_addr_msb); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error querying channel DB registers addresses: %d\n", + gsi_res); + result = -EFAULT; + goto write_chan_scratch_fail; + } + + ep->gsi_mem_info.evt_ring_len = params->evt_ring_params.ring_len; + ep->gsi_mem_info.evt_ring_base_addr = + params->evt_ring_params.ring_base_addr; + ep->gsi_mem_info.evt_ring_base_vaddr = + params->evt_ring_params.ring_base_vaddr; + ep->gsi_mem_info.chan_ring_len = params->chan_params.ring_len; + ep->gsi_mem_info.chan_ring_base_addr = + params->chan_params.ring_base_addr; + ep->gsi_mem_info.chan_ring_base_vaddr = + params->chan_params.ring_base_vaddr; + + ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(params->client)) + ipa3_install_dflt_flt_rules(ipa_ep_idx); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + IPADBG("client %d (ep: %d) connected\n", params->client, ipa_ep_idx); + IPADBG("exit\n"); + + return 0; + +write_chan_scratch_fail: + gsi_dealloc_channel(ep->gsi_chan_hdl); +write_evt_scratch_fail: + gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); +ipa_cfg_ep_fail: + memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +fail: + return result; +} + +int ipa3_set_usb_max_packet_size( + enum ipa_usb_max_usb_packet_size usb_max_packet_size) +{ + struct gsi_device_scratch dev_scratch; + enum gsi_status gsi_res; + + IPADBG("entry\n"); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&dev_scratch, 0, sizeof(struct gsi_device_scratch)); + dev_scratch.mhi_base_chan_idx_valid = false; + dev_scratch.max_usb_pkt_size_valid = true; + dev_scratch.max_usb_pkt_size = usb_max_packet_size; + + gsi_res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl, + &dev_scratch); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error writing device scratch: %d\n", gsi_res); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + IPADBG("exit\n"); + return 0; +} + +int ipa3_xdci_connect(u32 clnt_hdl) +{ + int result; + struct ipa3_ep_context *ep; + + IPADBG("entry\n"); + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + result = ipa3_start_gsi_channel(clnt_hdl); + if (result) { + IPAERR("failed to start gsi channel clnt_hdl=%u\n", clnt_hdl); + goto exit; + } + + result = ipa3_enable_data_path(clnt_hdl); + if (result) { + IPAERR("enable data path failed res=%d clnt_hdl=%d.\n", result, + clnt_hdl); + goto stop_ch; + } + + IPADBG("exit\n"); + goto exit; + +stop_ch: + (void)ipa3_stop_gsi_channel(clnt_hdl); +exit: + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + +int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid) +{ + struct ipa3_ep_context *ep; + int result = -EFAULT; + enum gsi_status gsi_res; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || + xferrscidx > IPA_XFER_RSC_IDX_MAX) { + IPAERR("Bad parameters.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + if (xferrscidx_valid) { + ep->chan_scratch.xdci.xferrscidx = xferrscidx; + gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl, + ep->chan_scratch); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error writing channel scratch: %d\n", gsi_res); + goto write_chan_scratch_fail; + } + } + gsi_res = gsi_start_channel(ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error starting channel: %d\n", gsi_res); + goto write_chan_scratch_fail; + } + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("exit\n"); + return 0; + +write_chan_scratch_fail: + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + +static int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info, + unsigned long chan_hdl) +{ + enum gsi_status gsi_res; + + memset(gsi_chan_info, 0, sizeof(struct gsi_chan_info)); + gsi_res = gsi_query_channel_info(chan_hdl, gsi_chan_info); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error querying channel info: %d\n", gsi_res); + return -EFAULT; + } + if (!gsi_chan_info->evt_valid) { + IPAERR("Event info invalid\n"); + return -EFAULT; + } + + return 0; +} + +static bool ipa3_is_xdci_channel_with_given_info_empty( + struct ipa3_ep_context *ep, struct gsi_chan_info *chan_info) +{ + bool is_empty = false; + + if (!IPA_CLIENT_IS_CONS(ep->client)) { + /* For UL channel: chan.RP == chan.WP */ + is_empty = (chan_info->rp == chan_info->wp); + } else { + /* For DL channel: */ + if (chan_info->wp != + (ep->gsi_mem_info.chan_ring_base_addr + + ep->gsi_mem_info.chan_ring_len - + GSI_CHAN_RE_SIZE_16B)) { + /* if chan.WP != LINK TRB: chan.WP == evt.RP */ + is_empty = (chan_info->wp == chan_info->evt_rp); + } else { + /* + * if chan.WP == LINK TRB: chan.base_xfer_ring_addr + * == evt.RP + */ + is_empty = (ep->gsi_mem_info.chan_ring_base_addr == + chan_info->evt_rp); + } + } + + return is_empty; +} + +static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep, + bool *is_empty) +{ + struct gsi_chan_info chan_info; + int res; + + if (!ep || !is_empty || !ep->valid) { + IPAERR("Input Error\n"); + return -EFAULT; + } + + res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl); + if (res) { + IPAERR("Failed to get GSI channel info\n"); + return -EFAULT; + } + + *is_empty = ipa3_is_xdci_channel_with_given_info_empty(ep, &chan_info); + + return 0; +} + +int ipa3_enable_force_clear(u32 request_id, bool throttle_source, + u32 source_pipe_bitmask) +{ + struct ipa_enable_force_clear_datapath_req_msg_v01 req; + int result; + + memset(&req, 0, sizeof(req)); + req.request_id = request_id; + req.source_pipe_bitmask = source_pipe_bitmask; + if (throttle_source) { + req.throttle_source_valid = 1; + req.throttle_source = 1; + } + result = ipa3_qmi_enable_force_clear_datapath_send(&req); + if (result) { + IPAERR("ipa3_qmi_enable_force_clear_datapath_send failed %d\n", + result); + return result; + } + + return 0; +} + +int ipa3_disable_force_clear(u32 request_id) +{ + struct ipa_disable_force_clear_datapath_req_msg_v01 req; + int result; + + memset(&req, 0, sizeof(req)); + req.request_id = request_id; + result = ipa3_qmi_disable_force_clear_datapath_send(&req); + if (result) { + IPAERR("ipa3_qmi_disable_force_clear_datapath_send failed %d\n", + result); + return result; + } + + return 0; +} + +/* Clocks should be voted before invoking this function */ +static int ipa3_xdci_stop_gsi_channel(u32 clnt_hdl, bool *stop_in_proc) +{ + int res; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || + !stop_in_proc) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + res = ipa3_stop_gsi_channel(clnt_hdl); + if (res != 0 && res != -GSI_STATUS_AGAIN && + res != -GSI_STATUS_TIMED_OUT) { + IPAERR("xDCI stop channel failed res=%d\n", res); + return -EFAULT; + } + + if (res) + *stop_in_proc = true; + else + *stop_in_proc = false; + + IPADBG("xDCI channel is %s (result=%d)\n", + res ? "STOP_IN_PROC/TimeOut" : "STOP", res); + + IPADBG("exit\n"); + return 0; +} + +/* Clocks should be voted before invoking this function */ +static int ipa3_xdci_stop_gsi_ch_brute_force(u32 clnt_hdl, + bool *stop_in_proc) +{ + unsigned long jiffies_start; + unsigned long jiffies_timeout = + msecs_to_jiffies(IPA_CHANNEL_STOP_IN_PROC_TO_MSEC); + int res; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || + !stop_in_proc) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + jiffies_start = jiffies; + while (1) { + res = ipa3_xdci_stop_gsi_channel(clnt_hdl, + stop_in_proc); + if (res) { + IPAERR("failed to stop xDCI channel hdl=%d\n", + clnt_hdl); + return res; + } + + if (!*stop_in_proc) { + IPADBG("xDCI channel STOP hdl=%d\n", clnt_hdl); + return res; + } + + /* + * Give chance to the previous stop request to be accomplished + * before the retry + */ + udelay(IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC); + + if (time_after(jiffies, jiffies_start + jiffies_timeout)) { + IPADBG("timeout waiting for xDCI channel emptiness\n"); + return res; + } + } +} + +/* Clocks should be voted for before invoking this function */ +static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id, + u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl) +{ + int result; + bool is_empty = false; + int i; + bool stop_in_proc; + struct ipa3_ep_context *ep; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + /* first try to stop the channel */ + result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl, + &stop_in_proc); + if (result) { + IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + goto exit; + } + if (!stop_in_proc) + goto exit; + + /* if stop_in_proc, lets wait for emptiness */ + for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) { + result = ipa3_is_xdci_channel_empty(ep, &is_empty); + if (result) + goto exit; + if (is_empty) + break; + udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC); + } + /* In case of empty, lets try to stop the channel again */ + if (is_empty) { + result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl, + &stop_in_proc); + if (result) { + IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + goto exit; + } + if (!stop_in_proc) + goto exit; + } + /* if still stop_in_proc or not empty, activate force clear */ + if (should_force_clear) { + result = ipa3_enable_force_clear(qmi_req_id, false, + source_pipe_bitmask); + if (result) { + struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 }; + + /* + * assuming here modem SSR\shutdown, AP can remove + * the delay in this case + */ + IPAERR( + "failed to force clear %d, remove delay from SCND reg\n" + , result); + ep_ctrl_scnd.endp_delay = false; + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_CTRL_SCND_n, clnt_hdl, + &ep_ctrl_scnd); + } + } + /* with force clear, wait for emptiness */ + for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) { + result = ipa3_is_xdci_channel_empty(ep, &is_empty); + if (result) + goto disable_force_clear_and_exit; + if (is_empty) + break; + + udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC); + } + /* try to stop for the last time */ + result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl, + &stop_in_proc); + if (result) { + IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + goto disable_force_clear_and_exit; + } + result = stop_in_proc ? -EFAULT : 0; + +disable_force_clear_and_exit: + if (should_force_clear) + ipa3_disable_force_clear(qmi_req_id); +exit: + return result; +} + +int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id) +{ + struct ipa3_ep_context *ep; + int result; + u32 source_pipe_bitmask = 0; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipa3_disable_data_path(clnt_hdl); + + if (!IPA_CLIENT_IS_CONS(ep->client)) { + IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + source_pipe_bitmask = 1 << + ipa3_get_ep_mapping(ep->client); + result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id, + source_pipe_bitmask, should_force_clear, clnt_hdl); + if (result) { + IPAERR("Fail to stop UL channel with data drain\n"); + WARN_ON(1); + goto stop_chan_fail; + } + } else { + IPADBG("Stopping CONS channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + result = ipa3_stop_gsi_channel(clnt_hdl); + if (result) { + IPAERR("Error stopping channel (CONS client): %d\n", + result); + goto stop_chan_fail; + } + } + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("exit\n"); + return 0; + +stop_chan_fail: + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + +int ipa3_release_gsi_channel(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + int result = -EFAULT; + enum gsi_status gsi_res; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + gsi_res = gsi_dealloc_channel(ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error deallocating channel: %d\n", gsi_res); + goto dealloc_chan_fail; + } + + gsi_res = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error deallocating event: %d\n", gsi_res); + goto dealloc_chan_fail; + } + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) + ipa3_delete_dflt_flt_rules(clnt_hdl); + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context)); + + IPADBG("exit\n"); + return 0; + +dealloc_chan_fail: + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + +int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + bool should_force_clear, u32 qmi_req_id, bool is_dpl) +{ + struct ipa3_ep_context *ul_ep = NULL; + struct ipa3_ep_context *dl_ep; + int result = -EFAULT; + u32 source_pipe_bitmask = 0; + bool dl_data_pending = true; + bool ul_data_pending = true; + int i; + bool is_empty = false; + struct gsi_chan_info ul_gsi_chan_info, dl_gsi_chan_info; + int aggr_active_bitmap = 0; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + + /* In case of DPL, dl is the DPL channel/client */ + + IPADBG("entry\n"); + if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[dl_clnt_hdl].valid == 0 || + (!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + dl_ep = &ipa3_ctx->ep[dl_clnt_hdl]; + if (!is_dpl) + ul_ep = &ipa3_ctx->ep[ul_clnt_hdl]; + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl)); + + result = ipa3_get_gsi_chan_info(&dl_gsi_chan_info, + dl_ep->gsi_chan_hdl); + if (result) + goto disable_clk_and_exit; + + if (!is_dpl) { + result = ipa3_get_gsi_chan_info(&ul_gsi_chan_info, + ul_ep->gsi_chan_hdl); + if (result) + goto disable_clk_and_exit; + } + + for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) { + if (!dl_data_pending && !ul_data_pending) + break; + result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty); + if (result) + goto disable_clk_and_exit; + if (!is_empty) { + dl_data_pending = true; + break; + } + dl_data_pending = false; + if (!is_dpl) { + result = ipa3_is_xdci_channel_empty(ul_ep, &is_empty); + if (result) + goto disable_clk_and_exit; + ul_data_pending = !is_empty; + } else { + ul_data_pending = false; + } + + udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC); + } + + if (!dl_data_pending) { + aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE); + if (aggr_active_bitmap & (1 << dl_clnt_hdl)) { + IPADBG("DL/DPL data pending due to open aggr. frame\n"); + dl_data_pending = true; + } + } + if (dl_data_pending) { + IPAERR("DL/DPL data pending, can't suspend\n"); + result = -EFAULT; + goto disable_clk_and_exit; + } + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + /* Suspend the DL/DPL EP */ + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_suspend = true; + ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl); + } + + /* + * Check if DL/DPL channel is empty again, data could enter the channel + * before its IPA EP was suspended + */ + result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty); + if (result) + goto unsuspend_dl_and_exit; + if (!is_empty) { + IPAERR("DL/DPL data pending, can't suspend\n"); + result = -EFAULT; + goto unsuspend_dl_and_exit; + } + + /* Stop DL channel */ + result = ipa3_stop_gsi_channel(dl_clnt_hdl); + if (result) { + IPAERR("Error stopping DL/DPL channel: %d\n", result); + result = -EFAULT; + goto unsuspend_dl_and_exit; + } + + /* STOP UL channel */ + if (!is_dpl) { + source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client); + result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id, + source_pipe_bitmask, should_force_clear, ul_clnt_hdl); + if (result) { + IPAERR("Error stopping UL channel: result = %d\n", + result); + goto start_dl_and_exit; + } + } + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl)); + + IPADBG("exit\n"); + return 0; + +start_dl_and_exit: + gsi_start_channel(dl_ep->gsi_chan_hdl); +unsuspend_dl_and_exit: + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + /* Unsuspend the DL EP */ + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_suspend = false; + ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl); + } +disable_clk_and_exit: + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl)); + return result; +} + +int ipa3_start_gsi_channel(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + int result = -EFAULT; + enum gsi_status gsi_res; + + IPADBG("entry\n"); + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameters.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + gsi_res = gsi_start_channel(ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) { + IPAERR("Error starting channel: %d\n", gsi_res); + goto start_chan_fail; + } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("exit\n"); + return 0; + +start_chan_fail: + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + +int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl) +{ + struct ipa3_ep_context *ul_ep = NULL; + struct ipa3_ep_context *dl_ep = NULL; + enum gsi_status gsi_res; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + + /* In case of DPL, dl is the DPL channel/client */ + + IPADBG("entry\n"); + if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[dl_clnt_hdl].valid == 0 || + (!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + dl_ep = &ipa3_ctx->ep[dl_clnt_hdl]; + if (!is_dpl) + ul_ep = &ipa3_ctx->ep[ul_clnt_hdl]; + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl)); + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + /* Unsuspend the DL/DPL EP */ + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_suspend = false; + ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl); + } + + /* Start DL channel */ + gsi_res = gsi_start_channel(dl_ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) + IPAERR("Error starting DL channel: %d\n", gsi_res); + + /* Start UL channel */ + if (!is_dpl) { + gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl); + if (gsi_res != GSI_STATUS_SUCCESS) + IPAERR("Error starting UL channel: %d\n", gsi_res); + } + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl)); + + IPADBG("exit\n"); + return 0; +} +/** + * ipa3_clear_endpoint_delay() - Remove ep delay set on the IPA pipe before + * client disconnect. + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Should be called by the driver of the peripheral that wants to remove + * ep delay on IPA consumer ipe before disconnect in non GPI mode. this api + * expects caller to take responsibility to free any needed headers, routing + * and filtering tables and rules as needed. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_clear_endpoint_delay(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + struct ipa_ep_cfg_ctrl ep_ctrl = {0}; + struct ipa_enable_force_clear_datapath_req_msg_v01 req = {0}; + int res; + + if (unlikely(!ipa3_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ipa3_ctx->tethered_flow_control) { + IPADBG("APPS flow control is not enabled\n"); + /* Send a message to modem to disable flow control honoring. */ + req.request_id = clnt_hdl; + req.source_pipe_bitmask = 1 << clnt_hdl; + res = ipa3_qmi_enable_force_clear_datapath_send(&req); + if (res) { + IPADBG("enable_force_clear_datapath failed %d\n", + res); + } + ep->qmi_request_sent = true; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + /* Set disconnect in progress flag so further flow control events are + * not honored. + */ + spin_lock(&ipa3_ctx->disconnect_lock); + ep->disconnect_in_progress = true; + spin_unlock(&ipa3_ctx->disconnect_lock); + + /* If flow is disabled at this point, restore the ep state.*/ + ep_ctrl.ipa_ep_delay = false; + ep_ctrl.ipa_ep_suspend = false; + ipa3_cfg_ep_ctrl(clnt_hdl, &ep_ctrl); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl); + + return 0; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c new file mode 100644 index 000000000000..ce9dbb508fbe --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -0,0 +1,2195 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifdef CONFIG_DEBUG_FS + +#include +#include +#include +#include "ipa_i.h" +#include "../ipa_rm_i.h" + +#define IPA_MAX_MSG_LEN 4096 +#define IPA_DBG_MAX_RULE_IN_TBL 128 +#define IPA_DBG_ACTIVE_CLIENT_BUF_SIZE ((IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN \ + * IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) + IPA_MAX_MSG_LEN) + +#define IPA_DUMP_STATUS_FIELD(f) \ + pr_err(#f "=0x%x\n", status->f) + +const char *ipa3_excp_name[] = { + __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD0), + __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD1), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IHL), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_TAG), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_NAT), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP), +}; + +const char *ipa3_event_name[] = { + __stringify(WLAN_CLIENT_CONNECT), + __stringify(WLAN_CLIENT_DISCONNECT), + __stringify(WLAN_CLIENT_POWER_SAVE_MODE), + __stringify(WLAN_CLIENT_NORMAL_MODE), + __stringify(SW_ROUTING_ENABLE), + __stringify(SW_ROUTING_DISABLE), + __stringify(WLAN_AP_CONNECT), + __stringify(WLAN_AP_DISCONNECT), + __stringify(WLAN_STA_CONNECT), + __stringify(WLAN_STA_DISCONNECT), + __stringify(WLAN_CLIENT_CONNECT_EX), + __stringify(WLAN_SWITCH_TO_SCC), + __stringify(WLAN_SWITCH_TO_MCC), + __stringify(WLAN_WDI_ENABLE), + __stringify(WLAN_WDI_DISABLE), + __stringify(WAN_UPSTREAM_ROUTE_ADD), + __stringify(WAN_UPSTREAM_ROUTE_DEL), + __stringify(WAN_EMBMS_CONNECT), + __stringify(WAN_XLAT_CONNECT), + __stringify(ECM_CONNECT), + __stringify(ECM_DISCONNECT), + __stringify(IPA_TETHERING_STATS_UPDATE_STATS), + __stringify(IPA_TETHERING_STATS_UPDATE_NETWORK_STATS), + __stringify(IPA_QUOTA_REACH), + __stringify(IPA_SSR_BEFORE_SHUTDOWN), + __stringify(IPA_SSR_AFTER_POWERUP), + __stringify(ADD_VLAN_IFACE), + __stringify(DEL_VLAN_IFACE), + __stringify(ADD_L2TP_VLAN_MAPPING), + __stringify(DEL_L2TP_VLAN_MAPPING) +}; + +const char *ipa3_hdr_l2_type_name[] = { + __stringify(IPA_HDR_L2_NONE), + __stringify(IPA_HDR_L2_ETHERNET_II), + __stringify(IPA_HDR_L2_802_3), +}; + +const char *ipa3_hdr_proc_type_name[] = { + __stringify(IPA_HDR_PROC_NONE), + __stringify(IPA_HDR_PROC_ETHII_TO_ETHII), + __stringify(IPA_HDR_PROC_ETHII_TO_802_3), + __stringify(IPA_HDR_PROC_802_3_TO_ETHII), + __stringify(IPA_HDR_PROC_802_3_TO_802_3), + __stringify(IPA_HDR_PROC_L2TP_HEADER_ADD), + __stringify(IPA_HDR_PROC_L2TP_HEADER_REMOVE), +}; + +static struct dentry *dent; +static struct dentry *dfile_gen_reg; +static struct dentry *dfile_ep_reg; +static struct dentry *dfile_keep_awake; +static struct dentry *dfile_ep_holb; +static struct dentry *dfile_hdr; +static struct dentry *dfile_proc_ctx; +static struct dentry *dfile_ip4_rt; +static struct dentry *dfile_ip4_rt_hw; +static struct dentry *dfile_ip6_rt; +static struct dentry *dfile_ip6_rt_hw; +static struct dentry *dfile_ip4_flt; +static struct dentry *dfile_ip4_flt_hw; +static struct dentry *dfile_ip6_flt; +static struct dentry *dfile_ip6_flt_hw; +static struct dentry *dfile_stats; +static struct dentry *dfile_wstats; +static struct dentry *dfile_wdi_stats; +static struct dentry *dfile_ntn_stats; +static struct dentry *dfile_dbg_cnt; +static struct dentry *dfile_msg; +static struct dentry *dfile_ip4_nat; +static struct dentry *dfile_rm_stats; +static struct dentry *dfile_status_stats; +static struct dentry *dfile_active_clients; +static char dbg_buff[IPA_MAX_MSG_LEN]; +static char *active_clients_buf; + +static s8 ep_reg_idx; +static void *ipa_ipc_low_buff; + + +static ssize_t ipa3_read_gen_reg(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + struct ipahal_reg_shared_mem_size smem_sz; + + memset(&smem_sz, 0, sizeof(smem_sz)); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz); + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA_VERSION=0x%x\n" + "IPA_COMP_HW_VERSION=0x%x\n" + "IPA_ROUTE=0x%x\n" + "IPA_SHARED_MEM_RESTRICTED=0x%x\n" + "IPA_SHARED_MEM_SIZE=0x%x\n", + ipahal_read_reg(IPA_VERSION), + ipahal_read_reg(IPA_COMP_HW_VERSION), + ipahal_read_reg(IPA_ROUTE), + smem_sz.shared_mem_baddr, + smem_sz.shared_mem_sz); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa3_write_ep_holb(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct ipa_ep_cfg_holb holb; + u32 en; + u32 tmr_val; + u32 ep_idx; + unsigned long missing; + char *sptr, *token; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + + sptr = dbg_buff; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &ep_idx)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &en)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &tmr_val)) + return -EINVAL; + + holb.en = en; + holb.tmr_val = tmr_val; + + ipa3_cfg_ep_holb(ep_idx, &holb); + + return count; +} + +static ssize_t ipa3_write_ep_reg(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + if (option >= ipa3_ctx->ipa_num_pipes) { + IPAERR("bad pipe specified %u\n", option); + return count; + } + + ep_reg_idx = option; + + return count; +} + +/** + * _ipa_read_ep_reg_v3_0() - Reads and prints endpoint configuration registers + * + * Returns the number of characters printed + */ +int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe) +{ + return scnprintf( + dbg_buff, IPA_MAX_MSG_LEN, + "IPA_ENDP_INIT_NAT_%u=0x%x\n" + "IPA_ENDP_INIT_HDR_%u=0x%x\n" + "IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n" + "IPA_ENDP_INIT_MODE_%u=0x%x\n" + "IPA_ENDP_INIT_AGGR_%u=0x%x\n" + "IPA_ENDP_INIT_ROUTE_%u=0x%x\n" + "IPA_ENDP_INIT_CTRL_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_EN_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n" + "IPA_ENDP_INIT_DEAGGR_%u=0x%x\n" + "IPA_ENDP_INIT_CFG_%u=0x%x\n", + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_NAT_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_EXT_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_MODE_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_AGGR_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_ROUTE_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CTRL_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_DEAGGR_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe)); +} + +/** + * _ipa_read_ep_reg_v4_0() - Reads and prints endpoint configuration registers + * + * Returns the number of characters printed + * Removed IPA_ENDP_INIT_ROUTE_n from v3 + */ +int _ipa_read_ep_reg_v4_0(char *buf, int max_len, int pipe) +{ + return scnprintf( + dbg_buff, IPA_MAX_MSG_LEN, + "IPA_ENDP_INIT_NAT_%u=0x%x\n" + "IPA_ENDP_INIT_CONN_TRACK_n%u=0x%x\n" + "IPA_ENDP_INIT_HDR_%u=0x%x\n" + "IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n" + "IPA_ENDP_INIT_MODE_%u=0x%x\n" + "IPA_ENDP_INIT_AGGR_%u=0x%x\n" + "IPA_ENDP_INIT_CTRL_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_EN_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n" + "IPA_ENDP_INIT_DEAGGR_%u=0x%x\n" + "IPA_ENDP_INIT_CFG_%u=0x%x\n", + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_NAT_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CONN_TRACK_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_EXT_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_MODE_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_AGGR_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CTRL_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_DEAGGR_n, pipe), + pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe)); +} + +static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + int i; + int start_idx; + int end_idx; + int size = 0; + int ret; + loff_t pos; + + /* negative ep_reg_idx means all registers */ + if (ep_reg_idx < 0) { + start_idx = 0; + end_idx = ipa3_ctx->ipa_num_pipes; + } else { + start_idx = ep_reg_idx; + end_idx = start_idx + 1; + } + pos = *ppos; + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + for (i = start_idx; i < end_idx; i++) { + + nbytes = ipa3_ctx->ctrl->ipa3_read_ep_reg(dbg_buff, + IPA_MAX_MSG_LEN, i); + + *ppos = pos; + ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff, + nbytes); + if (ret < 0) { + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return ret; + } + + size += ret; + ubuf += nbytes; + count -= nbytes; + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + *ppos = pos + size; + return size; +} + +static ssize_t ipa3_write_keep_awake(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + if (option == 1) + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + else if (option == 0) + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + else + return -EFAULT; + + return count; +} + +static ssize_t ipa3_read_keep_awake(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + + mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex); + if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)) + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA APPS power state is ON\n"); + else + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA APPS power state is OFF\n"); + mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa3_read_hdr(struct file *file, char __user *ubuf, size_t count, + loff_t *ppos) +{ + int nbytes = 0; + int i = 0; + struct ipa3_hdr_entry *entry; + + mutex_lock(&ipa3_ctx->lock); + + if (ipa3_ctx->hdr_tbl_lcl) + pr_err("Table resides on local memory\n"); + else + pr_err("Table resides on system (ddr) memory\n"); + + list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list, + link) { + nbytes = scnprintf( + dbg_buff, + IPA_MAX_MSG_LEN, + "name:%s len=%d ref=%d partial=%d type=%s ", + entry->name, + entry->hdr_len, + entry->ref_cnt, + entry->is_partial, + ipa3_hdr_l2_type_name[entry->type]); + + if (entry->is_hdr_proc_ctx) { + nbytes += scnprintf( + dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "phys_base=0x%pa ", + &entry->phys_base); + } else { + nbytes += scnprintf( + dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "ofst=%u ", + entry->offset_entry->offset >> 2); + } + for (i = 0; i < entry->hdr_len; i++) { + scnprintf(dbg_buff + nbytes + i * 2, + IPA_MAX_MSG_LEN - nbytes - i * 2, + "%02x", entry->hdr[i]); + } + scnprintf(dbg_buff + nbytes + entry->hdr_len * 2, + IPA_MAX_MSG_LEN - nbytes - entry->hdr_len * 2, + "\n"); + pr_err("%s", dbg_buff); + } + mutex_unlock(&ipa3_ctx->lock); + + return 0; +} + +static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib, + enum ipa_ip_type ip) +{ + uint32_t addr[4]; + uint32_t mask[4]; + int i; + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) + pr_err("tos_value:%d ", attrib->tos_value); + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) + pr_err("tos_mask:%d ", attrib->tos_mask); + + if (attrib->attrib_mask & IPA_FLT_PROTOCOL) + pr_err("protocol:%d ", attrib->u.v4.protocol); + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (ip == IPA_IP_v4) { + addr[0] = htonl(attrib->u.v4.src_addr); + mask[0] = htonl(attrib->u.v4.src_addr_mask); + pr_err( + "src_addr:%pI4 src_addr_mask:%pI4 ", + addr + 0, mask + 0); + } else if (ip == IPA_IP_v6) { + for (i = 0; i < 4; i++) { + addr[i] = htonl(attrib->u.v6.src_addr[i]); + mask[i] = htonl(attrib->u.v6.src_addr_mask[i]); + } + pr_err( + "src_addr:%pI6 src_addr_mask:%pI6 ", + addr + 0, mask + 0); + } + } + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (ip == IPA_IP_v4) { + addr[0] = htonl(attrib->u.v4.dst_addr); + mask[0] = htonl(attrib->u.v4.dst_addr_mask); + pr_err( + "dst_addr:%pI4 dst_addr_mask:%pI4 ", + addr + 0, mask + 0); + } else if (ip == IPA_IP_v6) { + for (i = 0; i < 4; i++) { + addr[i] = htonl(attrib->u.v6.dst_addr[i]); + mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]); + } + pr_err( + "dst_addr:%pI6 dst_addr_mask:%pI6 ", + addr + 0, mask + 0); + } + } + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + pr_err("src_port_range:%u %u ", + attrib->src_port_lo, + attrib->src_port_hi); + } + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + pr_err("dst_port_range:%u %u ", + attrib->dst_port_lo, + attrib->dst_port_hi); + } + if (attrib->attrib_mask & IPA_FLT_TYPE) + pr_err("type:%d ", attrib->type); + + if (attrib->attrib_mask & IPA_FLT_CODE) + pr_err("code:%d ", attrib->code); + + if (attrib->attrib_mask & IPA_FLT_SPI) + pr_err("spi:%x ", attrib->spi); + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) + pr_err("src_port:%u ", attrib->src_port); + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) + pr_err("dst_port:%u ", attrib->dst_port); + + if (attrib->attrib_mask & IPA_FLT_TC) + pr_err("tc:%d ", attrib->u.v6.tc); + + if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) + pr_err("flow_label:%x ", attrib->u.v6.flow_label); + + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) + pr_err("next_hdr:%d ", attrib->u.v6.next_hdr); + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + pr_err( + "metadata:%x metadata_mask:%x ", + attrib->meta_data, attrib->meta_data_mask); + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) + pr_err("frg "); + + if ((attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) || + (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3)) { + pr_err("src_mac_addr:%pM ", attrib->src_mac_addr); + } + + if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) || + (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) || + (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP)) { + pr_err("dst_mac_addr:%pM ", attrib->dst_mac_addr); + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) + pr_err("ether_type:%x ", attrib->ether_type); + + pr_err("\n"); + return 0; +} + +static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib) +{ + uint8_t addr[16]; + uint8_t mask[16]; + int i; + int j; + + if (attrib->tos_eq_present) + pr_err("tos_value:%d ", attrib->tos_eq); + + if (attrib->protocol_eq_present) + pr_err("protocol:%d ", attrib->protocol_eq); + + if (attrib->tc_eq_present) + pr_err("tc:%d ", attrib->tc_eq); + + for (i = 0; i < attrib->num_offset_meq_128; i++) { + for (j = 0; j < 16; j++) { + addr[j] = attrib->offset_meq_128[i].value[j]; + mask[j] = attrib->offset_meq_128[i].mask[j]; + } + pr_err( + "(ofst_meq128: ofst:%d mask:%pI6 val:%pI6) ", + attrib->offset_meq_128[i].offset, + mask, addr); + } + + for (i = 0; i < attrib->num_offset_meq_32; i++) + pr_err( + "(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ", + attrib->offset_meq_32[i].offset, + attrib->offset_meq_32[i].mask, + attrib->offset_meq_32[i].value); + + for (i = 0; i < attrib->num_ihl_offset_meq_32; i++) + pr_err( + "(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ", + attrib->ihl_offset_meq_32[i].offset, + attrib->ihl_offset_meq_32[i].mask, + attrib->ihl_offset_meq_32[i].value); + + if (attrib->metadata_meq32_present) + pr_err( + "(metadata: ofst:%u mask:0x%x val:0x%x) ", + attrib->metadata_meq32.offset, + attrib->metadata_meq32.mask, + attrib->metadata_meq32.value); + + for (i = 0; i < attrib->num_ihl_offset_range_16; i++) + pr_err( + "(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ", + attrib->ihl_offset_range_16[i].offset, + attrib->ihl_offset_range_16[i].range_low, + attrib->ihl_offset_range_16[i].range_high); + + if (attrib->ihl_offset_eq_32_present) + pr_err( + "(ihl_ofst_eq32:%d val:0x%x) ", + attrib->ihl_offset_eq_32.offset, + attrib->ihl_offset_eq_32.value); + + if (attrib->ihl_offset_eq_16_present) + pr_err( + "(ihl_ofst_eq16:%d val:0x%x) ", + attrib->ihl_offset_eq_16.offset, + attrib->ihl_offset_eq_16.value); + + if (attrib->fl_eq_present) + pr_err("flow_label:%d ", attrib->fl_eq); + + if (attrib->ipv4_frag_eq_present) + pr_err("frag "); + + pr_err("\n"); + return 0; +} + +static int ipa3_open_dbg(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t ipa3_read_rt(struct file *file, char __user *ubuf, size_t count, + loff_t *ppos) +{ + int i = 0; + struct ipa3_rt_tbl *tbl; + struct ipa3_rt_entry *entry; + struct ipa3_rt_tbl_set *set; + enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data; + u32 ofst; + u32 ofst_words; + + set = &ipa3_ctx->rt_tbl_set[ip]; + + mutex_lock(&ipa3_ctx->lock); + + if (ip == IPA_IP_v6) { + if (ipa3_ctx->ip6_rt_tbl_hash_lcl) + pr_err("Hashable table resides on local memory\n"); + else + pr_err("Hashable table resides on system (ddr) memory\n"); + if (ipa3_ctx->ip6_rt_tbl_nhash_lcl) + pr_err("Non-Hashable table resides on local memory\n"); + else + pr_err("Non-Hashable table resides on system (ddr) memory\n"); + } else if (ip == IPA_IP_v4) { + if (ipa3_ctx->ip4_rt_tbl_hash_lcl) + pr_err("Hashable table resides on local memory\n"); + else + pr_err("Hashable table resides on system (ddr) memory\n"); + if (ipa3_ctx->ip4_rt_tbl_nhash_lcl) + pr_err("Non-Hashable table resides on local memory\n"); + else + pr_err("Non-Hashable table resides on system (ddr) memory\n"); + } + + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + i = 0; + list_for_each_entry(entry, &tbl->head_rt_rule_list, link) { + if (entry->proc_ctx) { + ofst = entry->proc_ctx->offset_entry->offset; + ofst_words = + (ofst + + ipa3_ctx->hdr_proc_ctx_tbl.start_offset) + >> 5; + + pr_err("tbl_idx:%d tbl_name:%s tbl_ref:%u ", + entry->tbl->idx, entry->tbl->name, + entry->tbl->ref_cnt); + pr_err("rule_idx:%d dst:%d ep:%d S:%u ", + i, entry->rule.dst, + ipa3_get_ep_mapping(entry->rule.dst), + !ipa3_ctx->hdr_proc_ctx_tbl_lcl); + pr_err("proc_ctx[32B]:%u attrib_mask:%08x ", + ofst_words, + entry->rule.attrib.attrib_mask); + pr_err("rule_id:%u max_prio:%u prio:%u ", + entry->rule_id, entry->rule.max_prio, + entry->prio); + pr_err("hashable:%u retain_hdr:%u ", + entry->rule.hashable, + entry->rule.retain_hdr); + } else { + if (entry->hdr) + ofst = entry->hdr->offset_entry->offset; + else + ofst = 0; + + pr_err("tbl_idx:%d tbl_name:%s tbl_ref:%u ", + entry->tbl->idx, entry->tbl->name, + entry->tbl->ref_cnt); + pr_err("rule_idx:%d dst:%d ep:%d S:%u ", + i, entry->rule.dst, + ipa3_get_ep_mapping(entry->rule.dst), + !ipa3_ctx->hdr_tbl_lcl); + pr_err("hdr_ofst[words]:%u attrib_mask:%08x ", + ofst >> 2, + entry->rule.attrib.attrib_mask); + pr_err("rule_id:%u max_prio:%u prio:%u ", + entry->rule_id, entry->rule.max_prio, + entry->prio); + pr_err("hashable:%u retain_hdr:%u ", + entry->rule.hashable, + entry->rule.retain_hdr); + } + + ipa3_attrib_dump(&entry->rule.attrib, ip); + i++; + } + } + mutex_unlock(&ipa3_ctx->lock); + + return 0; +} + +static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data; + int tbls_num; + int rules_num; + int tbl; + int rl; + int res = 0; + struct ipahal_rt_rule_entry *rules = NULL; + + switch (ip) { + case IPA_IP_v4: + tbls_num = IPA_MEM_PART(v4_rt_num_index); + break; + case IPA_IP_v6: + tbls_num = IPA_MEM_PART(v6_rt_num_index); + break; + default: + IPAERR("ip type error %d\n", ip); + return -EINVAL; + }; + + IPADBG("Tring to parse %d H/W routing tables - IP=%d\n", tbls_num, ip); + + rules = kzalloc(sizeof(*rules) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL); + if (!rules) { + IPAERR("failed to allocate mem for tbl rules\n"); + return -ENOMEM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + mutex_lock(&ipa3_ctx->lock); + + for (tbl = 0 ; tbl < tbls_num ; tbl++) { + pr_err("=== Routing Table %d = Hashable Rules ===\n", tbl); + rules_num = IPA_DBG_MAX_RULE_IN_TBL; + res = ipa3_rt_read_tbl_from_hw(tbl, ip, true, rules, + &rules_num); + if (res) { + pr_err("ERROR - Check the logs\n"); + IPAERR("failed reading tbl from hw\n"); + goto bail; + } + if (!rules_num) + pr_err("-->No rules. Empty tbl or modem system table\n"); + + for (rl = 0 ; rl < rules_num ; rl++) { + pr_err("rule_idx:%d dst ep:%d L:%u ", + rl, rules[rl].dst_pipe_idx, rules[rl].hdr_lcl); + + if (rules[rl].hdr_type == IPAHAL_RT_RULE_HDR_PROC_CTX) + pr_err("proc_ctx:%u attrib_mask:%08x ", + rules[rl].hdr_ofst, + rules[rl].eq_attrib.rule_eq_bitmap); + else + pr_err("hdr_ofst:%u attrib_mask:%08x ", + rules[rl].hdr_ofst, + rules[rl].eq_attrib.rule_eq_bitmap); + + pr_err("rule_id:%u prio:%u retain_hdr:%u ", + rules[rl].id, rules[rl].priority, + rules[rl].retain_hdr); + ipa3_attrib_dump_eq(&rules[rl].eq_attrib); + } + + pr_err("=== Routing Table %d = Non-Hashable Rules ===\n", tbl); + rules_num = IPA_DBG_MAX_RULE_IN_TBL; + res = ipa3_rt_read_tbl_from_hw(tbl, ip, false, rules, + &rules_num); + if (res) { + pr_err("ERROR - Check the logs\n"); + IPAERR("failed reading tbl from hw\n"); + goto bail; + } + if (!rules_num) + pr_err("-->No rules. Empty tbl or modem system table\n"); + + for (rl = 0 ; rl < rules_num ; rl++) { + pr_err("rule_idx:%d dst ep:%d L:%u ", + rl, rules[rl].dst_pipe_idx, rules[rl].hdr_lcl); + + if (rules[rl].hdr_type == IPAHAL_RT_RULE_HDR_PROC_CTX) + pr_err("proc_ctx:%u attrib_mask:%08x ", + rules[rl].hdr_ofst, + rules[rl].eq_attrib.rule_eq_bitmap); + else + pr_err("hdr_ofst:%u attrib_mask:%08x ", + rules[rl].hdr_ofst, + rules[rl].eq_attrib.rule_eq_bitmap); + + pr_err("rule_id:%u prio:%u retain_hdr:%u\n", + rules[rl].id, rules[rl].priority, + rules[rl].retain_hdr); + ipa3_attrib_dump_eq(&rules[rl].eq_attrib); + } + pr_err("\n"); + } + +bail: + mutex_unlock(&ipa3_ctx->lock); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + kfree(rules); + return res; +} + +static ssize_t ipa3_read_proc_ctx(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes = 0; + struct ipa3_hdr_proc_ctx_tbl *tbl; + struct ipa3_hdr_proc_ctx_entry *entry; + u32 ofst_words; + + tbl = &ipa3_ctx->hdr_proc_ctx_tbl; + + mutex_lock(&ipa3_ctx->lock); + + if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) + pr_info("Table resides on local memory\n"); + else + pr_info("Table resides on system(ddr) memory\n"); + + list_for_each_entry(entry, &tbl->head_proc_ctx_entry_list, link) { + ofst_words = (entry->offset_entry->offset + + ipa3_ctx->hdr_proc_ctx_tbl.start_offset) + >> 5; + if (entry->hdr->is_hdr_proc_ctx) { + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "id:%u hdr_proc_type:%s proc_ctx[32B]:%u ", + entry->id, + ipa3_hdr_proc_type_name[entry->type], + ofst_words); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "hdr_phys_base:0x%pa\n", + &entry->hdr->phys_base); + } else { + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "id:%u hdr_proc_type:%s proc_ctx[32B]:%u ", + entry->id, + ipa3_hdr_proc_type_name[entry->type], + ofst_words); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "hdr[words]:%u\n", + entry->hdr->offset_entry->offset >> 2); + } + } + mutex_unlock(&ipa3_ctx->lock); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count, + loff_t *ppos) +{ + int i; + int j; + struct ipa3_flt_tbl *tbl; + struct ipa3_flt_entry *entry; + enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data; + struct ipa3_rt_tbl *rt_tbl; + u32 rt_tbl_idx; + u32 bitmap; + bool eq; + + mutex_lock(&ipa3_ctx->lock); + + for (j = 0; j < ipa3_ctx->ipa_num_pipes; j++) { + if (!ipa_is_ep_support_flt(j)) + continue; + tbl = &ipa3_ctx->flt_tbl[j][ip]; + i = 0; + list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { + if (entry->rule.eq_attrib_type) { + rt_tbl_idx = entry->rule.rt_tbl_idx; + bitmap = entry->rule.eq_attrib.rule_eq_bitmap; + eq = true; + } else { + rt_tbl = ipa3_id_find(entry->rule.rt_tbl_hdl); + if (rt_tbl) + rt_tbl_idx = rt_tbl->idx; + else + rt_tbl_idx = ~0; + bitmap = entry->rule.attrib.attrib_mask; + eq = false; + } + pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ", + j, i, entry->rule.action, rt_tbl_idx); + pr_err("attrib_mask:%08x retain_hdr:%d eq:%d ", + bitmap, entry->rule.retain_hdr, eq); + pr_err("hashable:%u rule_id:%u max_prio:%u prio:%u ", + entry->rule.hashable, entry->rule_id, + entry->rule.max_prio, entry->prio); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + pr_err("pdn index %d, set metadata %d ", + entry->rule.pdn_idx, + entry->rule.set_metadata); + if (eq) + ipa3_attrib_dump_eq( + &entry->rule.eq_attrib); + else + ipa3_attrib_dump( + &entry->rule.attrib, ip); + i++; + } + } + mutex_unlock(&ipa3_ctx->lock); + + return 0; +} + +static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int pipe; + int rl; + int rules_num; + struct ipahal_flt_rule_entry *rules; + enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data; + u32 rt_tbl_idx; + u32 bitmap; + int res = 0; + + IPADBG("Tring to parse %d H/W filtering tables - IP=%d\n", + ipa3_ctx->ep_flt_num, ip); + + rules = kzalloc(sizeof(*rules) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL); + if (!rules) + return -ENOMEM; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + mutex_lock(&ipa3_ctx->lock); + for (pipe = 0; pipe < ipa3_ctx->ipa_num_pipes; pipe++) { + if (!ipa_is_ep_support_flt(pipe)) + continue; + pr_err("=== Filtering Table ep:%d = Hashable Rules ===\n", + pipe); + rules_num = IPA_DBG_MAX_RULE_IN_TBL; + res = ipa3_flt_read_tbl_from_hw(pipe, ip, true, rules, + &rules_num); + if (res) { + pr_err("ERROR - Check the logs\n"); + IPAERR("failed reading tbl from hw\n"); + goto bail; + } + if (!rules_num) + pr_err("-->No rules. Empty tbl or modem sys table\n"); + + for (rl = 0; rl < rules_num; rl++) { + rt_tbl_idx = rules[rl].rule.rt_tbl_idx; + bitmap = rules[rl].rule.eq_attrib.rule_eq_bitmap; + pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ", + pipe, rl, rules[rl].rule.action, rt_tbl_idx); + pr_err("attrib_mask:%08x retain_hdr:%d ", + bitmap, rules[rl].rule.retain_hdr); + pr_err("rule_id:%u prio:%u ", + rules[rl].id, rules[rl].priority); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + pr_err("pdn: %u, set_metadata: %u ", + rules[rl].rule.pdn_idx, + rules[rl].rule.set_metadata); + ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib); + } + + pr_err("=== Filtering Table ep:%d = Non-Hashable Rules ===\n", + pipe); + rules_num = IPA_DBG_MAX_RULE_IN_TBL; + res = ipa3_flt_read_tbl_from_hw(pipe, ip, false, rules, + &rules_num); + if (res) { + IPAERR("failed reading tbl from hw\n"); + goto bail; + } + if (!rules_num) + pr_err("-->No rules. Empty tbl or modem sys table\n"); + for (rl = 0; rl < rules_num; rl++) { + rt_tbl_idx = rules[rl].rule.rt_tbl_idx; + bitmap = rules[rl].rule.eq_attrib.rule_eq_bitmap; + pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ", + pipe, rl, rules[rl].rule.action, rt_tbl_idx); + pr_err("attrib_mask:%08x retain_hdr:%d ", + bitmap, rules[rl].rule.retain_hdr); + pr_err("rule_id:%u prio:%u ", + rules[rl].id, rules[rl].priority); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + pr_err("pdn: %u, set_metadata: %u ", + rules[rl].rule.pdn_idx, + rules[rl].rule.set_metadata); + ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib); + } + pr_err("\n"); + } + +bail: + mutex_unlock(&ipa3_ctx->lock); + kfree(rules); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + int i; + int cnt = 0; + uint connect = 0; + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) + connect |= (ipa3_ctx->ep[i].valid << i); + + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "sw_tx=%u\n" + "hw_tx=%u\n" + "tx_non_linear=%u\n" + "tx_compl=%u\n" + "wan_rx=%u\n" + "stat_compl=%u\n" + "lan_aggr_close=%u\n" + "wan_aggr_close=%u\n" + "act_clnt=%u\n" + "con_clnt_bmap=0x%x\n" + "wan_rx_empty=%u\n" + "wan_repl_rx_empty=%u\n" + "lan_rx_empty=%u\n" + "lan_repl_rx_empty=%u\n" + "flow_enable=%u\n" + "flow_disable=%u\n", + ipa3_ctx->stats.tx_sw_pkts, + ipa3_ctx->stats.tx_hw_pkts, + ipa3_ctx->stats.tx_non_linear, + ipa3_ctx->stats.tx_pkts_compl, + ipa3_ctx->stats.rx_pkts, + ipa3_ctx->stats.stat_compl, + ipa3_ctx->stats.aggr_close, + ipa3_ctx->stats.wan_aggr_close, + atomic_read(&ipa3_ctx->ipa3_active_clients.cnt), + connect, + ipa3_ctx->stats.wan_rx_empty, + ipa3_ctx->stats.wan_repl_rx_empty, + ipa3_ctx->stats.lan_rx_empty, + ipa3_ctx->stats.lan_repl_rx_empty, + ipa3_ctx->stats.flow_enable, + ipa3_ctx->stats.flow_disable); + cnt += nbytes; + + for (i = 0; i < IPAHAL_PKT_STATUS_EXCEPTION_MAX; i++) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, + "lan_rx_excp[%u:%20s]=%u\n", i, + ipahal_pkt_status_exception_str(i), + ipa3_ctx->stats.rx_excp_pkts[i]); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa3_read_wstats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + +#define HEAD_FRMT_STR "%25s\n" +#define FRMT_STR "%25s %10u\n" +#define FRMT_STR1 "%25s %10u\n\n" + + int cnt = 0; + int nbytes; + int ipa_ep_idx; + enum ipa_client_type client = IPA_CLIENT_WLAN1_PROD; + struct ipa3_ep_context *ep; + + do { + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + HEAD_FRMT_STR, "Client IPA_CLIENT_WLAN1_PROD Stats:"); + cnt += nbytes; + + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + break; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + if (ep->valid != 1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + break; + } + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Avail Fifo Desc:", + atomic_read(&ep->avail_fifo_desc)); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx Pkts Rcvd:", ep->wstats.rx_pkts_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx Pkts Status Rcvd:", + ep->wstats.rx_pkts_status_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx DH Rcvd:", ep->wstats.rx_hd_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx DH Processed:", + ep->wstats.rx_hd_processed); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx DH Sent Back:", ep->wstats.rx_hd_reply); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx Pkt Leak:", ep->wstats.rx_pkt_leak); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR1, "Rx DP Fail:", ep->wstats.rx_dp_fail); + cnt += nbytes; + + } while (0); + + client = IPA_CLIENT_WLAN1_CONS; + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN1_CONS Stats:"); + cnt += nbytes; + while (1) { + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + goto nxt_clnt_cons; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + if (ep->valid != 1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + goto nxt_clnt_cons; + } + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Tx Pkts Received:", ep->wstats.tx_pkts_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Tx Pkts Sent:", ep->wstats.tx_pkts_sent); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR1, "Tx Pkts Dropped:", + ep->wstats.tx_pkts_dropped); + cnt += nbytes; + +nxt_clnt_cons: + switch (client) { + case IPA_CLIENT_WLAN1_CONS: + client = IPA_CLIENT_WLAN2_CONS; + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN2_CONS Stats:"); + cnt += nbytes; + continue; + case IPA_CLIENT_WLAN2_CONS: + client = IPA_CLIENT_WLAN3_CONS; + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN3_CONS Stats:"); + cnt += nbytes; + continue; + case IPA_CLIENT_WLAN3_CONS: + client = IPA_CLIENT_WLAN4_CONS; + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN4_CONS Stats:"); + cnt += nbytes; + continue; + case IPA_CLIENT_WLAN4_CONS: + default: + break; + } + break; + } + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "\n"HEAD_FRMT_STR, "All Wlan Consumer pipes stats:"); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR, + "Tx Comm Buff Allocated:", + ipa3_ctx->wc_memb.wlan_comm_total_cnt); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR, + "Tx Comm Buff Avail:", ipa3_ctx->wc_memb.wlan_comm_free_cnt); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR1, + "Total Tx Pkts Freed:", ipa3_ctx->wc_memb.total_tx_pkts_freed); + cnt += nbytes; + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa3_read_ntn(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ +#define TX_STATS(y) \ + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y +#define RX_STATS(y) \ + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y + + struct Ipa3HwStatsNTNInfoData_t stats; + int nbytes; + int cnt = 0; + + if (!ipa3_get_ntn_stats(&stats)) { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "TX num_pkts_processed=%u\n" + "TX ringFull=%u\n" + "TX ringEmpty=%u\n" + "TX ringUsageHigh=%u\n" + "TX ringUsageLow=%u\n" + "TX RingUtilCount=%u\n" + "TX bamFifoFull=%u\n" + "TX bamFifoEmpty=%u\n" + "TX bamFifoUsageHigh=%u\n" + "TX bamFifoUsageLow=%u\n" + "TX bamUtilCount=%u\n" + "TX num_db=%u\n" + "TX num_qmb_int_handled=%u\n" + "TX ipa_pipe_number=%u\n", + TX_STATS(num_pkts_processed), + TX_STATS(ring_stats.ringFull), + TX_STATS(ring_stats.ringEmpty), + TX_STATS(ring_stats.ringUsageHigh), + TX_STATS(ring_stats.ringUsageLow), + TX_STATS(ring_stats.RingUtilCount), + TX_STATS(gsi_stats.bamFifoFull), + TX_STATS(gsi_stats.bamFifoEmpty), + TX_STATS(gsi_stats.bamFifoUsageHigh), + TX_STATS(gsi_stats.bamFifoUsageLow), + TX_STATS(gsi_stats.bamUtilCount), + TX_STATS(num_db), + TX_STATS(num_qmb_int_handled), + TX_STATS(ipa_pipe_number)); + cnt += nbytes; + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "RX num_pkts_processed=%u\n" + "RX ringFull=%u\n" + "RX ringEmpty=%u\n" + "RX ringUsageHigh=%u\n" + "RX ringUsageLow=%u\n" + "RX RingUtilCount=%u\n" + "RX bamFifoFull=%u\n" + "RX bamFifoEmpty=%u\n" + "RX bamFifoUsageHigh=%u\n" + "RX bamFifoUsageLow=%u\n" + "RX bamUtilCount=%u\n" + "RX num_db=%u\n" + "RX num_qmb_int_handled=%u\n" + "RX ipa_pipe_number=%u\n", + RX_STATS(num_pkts_processed), + RX_STATS(ring_stats.ringFull), + RX_STATS(ring_stats.ringEmpty), + RX_STATS(ring_stats.ringUsageHigh), + RX_STATS(ring_stats.ringUsageLow), + RX_STATS(ring_stats.RingUtilCount), + RX_STATS(gsi_stats.bamFifoFull), + RX_STATS(gsi_stats.bamFifoEmpty), + RX_STATS(gsi_stats.bamFifoUsageHigh), + RX_STATS(gsi_stats.bamFifoUsageLow), + RX_STATS(gsi_stats.bamUtilCount), + RX_STATS(num_db), + RX_STATS(num_qmb_int_handled), + RX_STATS(ipa_pipe_number)); + cnt += nbytes; + } else { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "Fail to read NTN stats\n"); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa3_read_wdi(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct IpaHwStatsWDIInfoData_t stats; + int nbytes; + int cnt = 0; + struct IpaHwStatsWDITxInfoData_t *tx_ch_ptr; + + if (!ipa3_get_wdi_stats(&stats)) { + tx_ch_ptr = &stats.tx_ch_stats; + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "TX num_pkts_processed=%u\n" + "TX copy_engine_doorbell_value=%u\n" + "TX num_db_fired=%u\n" + "TX ringFull=%u\n" + "TX ringEmpty=%u\n" + "TX ringUsageHigh=%u\n" + "TX ringUsageLow=%u\n" + "TX RingUtilCount=%u\n" + "TX bamFifoFull=%u\n" + "TX bamFifoEmpty=%u\n" + "TX bamFifoUsageHigh=%u\n" + "TX bamFifoUsageLow=%u\n" + "TX bamUtilCount=%u\n" + "TX num_db=%u\n" + "TX num_unexpected_db=%u\n" + "TX num_bam_int_handled=%u\n" + "TX num_bam_int_in_non_running_state=%u\n" + "TX num_qmb_int_handled=%u\n" + "TX num_bam_int_handled_while_wait_for_bam=%u\n", + tx_ch_ptr->num_pkts_processed, + tx_ch_ptr->copy_engine_doorbell_value, + tx_ch_ptr->num_db_fired, + tx_ch_ptr->tx_comp_ring_stats.ringFull, + tx_ch_ptr->tx_comp_ring_stats.ringEmpty, + tx_ch_ptr->tx_comp_ring_stats.ringUsageHigh, + tx_ch_ptr->tx_comp_ring_stats.ringUsageLow, + tx_ch_ptr->tx_comp_ring_stats.RingUtilCount, + tx_ch_ptr->bam_stats.bamFifoFull, + tx_ch_ptr->bam_stats.bamFifoEmpty, + tx_ch_ptr->bam_stats.bamFifoUsageHigh, + tx_ch_ptr->bam_stats.bamFifoUsageLow, + tx_ch_ptr->bam_stats.bamUtilCount, + tx_ch_ptr->num_db, + tx_ch_ptr->num_unexpected_db, + tx_ch_ptr->num_bam_int_handled, + tx_ch_ptr->num_bam_int_in_non_running_state, + tx_ch_ptr->num_qmb_int_handled, + tx_ch_ptr->num_bam_int_handled_while_wait_for_bam); + cnt += nbytes; + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "RX max_outstanding_pkts=%u\n" + "RX num_pkts_processed=%u\n" + "RX rx_ring_rp_value=%u\n" + "RX ringFull=%u\n" + "RX ringEmpty=%u\n" + "RX ringUsageHigh=%u\n" + "RX ringUsageLow=%u\n" + "RX RingUtilCount=%u\n" + "RX bamFifoFull=%u\n" + "RX bamFifoEmpty=%u\n" + "RX bamFifoUsageHigh=%u\n" + "RX bamFifoUsageLow=%u\n" + "RX bamUtilCount=%u\n" + "RX num_bam_int_handled=%u\n" + "RX num_db=%u\n" + "RX num_unexpected_db=%u\n" + "RX num_pkts_in_dis_uninit_state=%u\n" + "RX num_ic_inj_vdev_change=%u\n" + "RX num_ic_inj_fw_desc_change=%u\n" + "RX num_qmb_int_handled=%u\n" + "RX reserved1=%u\n" + "RX reserved2=%u\n", + stats.rx_ch_stats.max_outstanding_pkts, + stats.rx_ch_stats.num_pkts_processed, + stats.rx_ch_stats.rx_ring_rp_value, + stats.rx_ch_stats.rx_ind_ring_stats.ringFull, + stats.rx_ch_stats.rx_ind_ring_stats.ringEmpty, + stats.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh, + stats.rx_ch_stats.rx_ind_ring_stats.ringUsageLow, + stats.rx_ch_stats.rx_ind_ring_stats.RingUtilCount, + stats.rx_ch_stats.bam_stats.bamFifoFull, + stats.rx_ch_stats.bam_stats.bamFifoEmpty, + stats.rx_ch_stats.bam_stats.bamFifoUsageHigh, + stats.rx_ch_stats.bam_stats.bamFifoUsageLow, + stats.rx_ch_stats.bam_stats.bamUtilCount, + stats.rx_ch_stats.num_bam_int_handled, + stats.rx_ch_stats.num_db, + stats.rx_ch_stats.num_unexpected_db, + stats.rx_ch_stats.num_pkts_in_dis_uninit_state, + stats.rx_ch_stats.num_ic_inj_vdev_change, + stats.rx_ch_stats.num_ic_inj_fw_desc_change, + stats.rx_ch_stats.num_qmb_int_handled, + stats.rx_ch_stats.reserved1, + stats.rx_ch_stats.reserved2); + cnt += nbytes; + } else { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "Fail to read WDI stats\n"); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa3_write_dbg_cnt(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long missing; + u32 option = 0; + struct ipahal_reg_debug_cnt_ctrl dbg_cnt_ctrl; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + IPAERR("IPA_DEBUG_CNT_CTRL is not supported in IPA 4.0\n"); + return -EPERM; + } + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtou32(dbg_buff, 0, &option)) + return -EFAULT; + + memset(&dbg_cnt_ctrl, 0, sizeof(dbg_cnt_ctrl)); + dbg_cnt_ctrl.type = DBG_CNT_TYPE_GENERAL; + dbg_cnt_ctrl.product = true; + dbg_cnt_ctrl.src_pipe = 0xff; + dbg_cnt_ctrl.rule_idx_pipe_rule = false; + dbg_cnt_ctrl.rule_idx = 0; + if (option == 1) + dbg_cnt_ctrl.en = true; + else + dbg_cnt_ctrl.en = false; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipahal_write_reg_n_fields(IPA_DEBUG_CNT_CTRL_n, 0, &dbg_cnt_ctrl); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return count; +} + +static ssize_t ipa3_read_dbg_cnt(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + u32 regval; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + IPAERR("IPA_DEBUG_CNT_REG is not supported in IPA 4.0\n"); + return -EPERM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + regval = + ipahal_read_reg_n(IPA_DEBUG_CNT_REG_n, 0); + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA_DEBUG_CNT_REG_0=0x%x\n", regval); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa3_read_msg(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + int cnt = 0; + int i; + + for (i = 0; i < IPA_EVENT_MAX_NUM; i++) { + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "msg[%u:%27s] W:%u R:%u\n", i, + ipa3_event_name[i], + ipa3_ctx->stats.msg_w[i], + ipa3_ctx->stats.msg_r[i]); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa3_read_nat4(struct file *file, + char __user *ubuf, size_t count, + loff_t *ppos) +{ +#define ENTRY_U32_FIELDS 8 +#define NAT_ENTRY_ENABLE 0x8000 +#define NAT_ENTRY_RST_FIN_BIT 0x4000 +#define BASE_TABLE 0 +#define EXPANSION_TABLE 1 + + u32 *base_tbl, *indx_tbl; + u32 tbl_size, *tmp; + u32 value, i, j, rule_id; + u16 enable, tbl_entry, flag; + u32 no_entries = 0; + struct ipa_pdn_entry *pdn_table = ipa3_ctx->nat_mem.pdn_mem.base; + + mutex_lock(&ipa3_ctx->nat_mem.lock); + value = ipa3_ctx->nat_mem.public_ip_addr; + pr_err( + "Table IP Address:%d.%d.%d.%d\n", + ((value & 0xFF000000) >> 24), + ((value & 0x00FF0000) >> 16), + ((value & 0x0000FF00) >> 8), + ((value & 0x000000FF))); + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + for (i = 0; i < IPA_MAX_PDN_NUM; i++) { + pr_err( + "PDN %d: ip 0x%X, src_metadata 0x%X, dst_metadata 0x%X\n", + i, pdn_table[i].public_ip, + pdn_table[i].src_metadata, + pdn_table[i].dst_metadata); + } + + pr_err("Table Size:%d\n", + ipa3_ctx->nat_mem.size_base_tables); + + pr_err("Expansion Table Size:%d\n", + ipa3_ctx->nat_mem.size_expansion_tables-1); + + if (!ipa3_ctx->nat_mem.is_sys_mem) + pr_err("Not supported for local(shared) memory\n"); + + /* Print Base tables */ + rule_id = 0; + for (j = 0; j < 2; j++) { + if (j == BASE_TABLE) { + tbl_size = ipa3_ctx->nat_mem.size_base_tables; + base_tbl = (u32 *)ipa3_ctx->nat_mem.ipv4_rules_addr; + + pr_err("\nBase Table:\n"); + } else { + tbl_size = ipa3_ctx->nat_mem.size_expansion_tables-1; + base_tbl = + (u32 *)ipa3_ctx->nat_mem.ipv4_expansion_rules_addr; + + pr_err("\nExpansion Base Table:\n"); + } + + if (base_tbl != NULL) { + for (i = 0; i <= tbl_size; i++, rule_id++) { + tmp = base_tbl; + value = tmp[4]; + enable = ((value & 0xFFFF0000) >> 16); + + if (enable & NAT_ENTRY_ENABLE) { + no_entries++; + pr_err("Rule:%d ", rule_id); + + value = *tmp; + pr_err( + "Private_IP:%d.%d.%d.%d ", + ((value & 0xFF000000) >> 24), + ((value & 0x00FF0000) >> 16), + ((value & 0x0000FF00) >> 8), + ((value & 0x000000FF))); + tmp++; + + value = *tmp; + pr_err( + "Target_IP:%d.%d.%d.%d ", + ((value & 0xFF000000) >> 24), + ((value & 0x00FF0000) >> 16), + ((value & 0x0000FF00) >> 8), + ((value & 0x000000FF))); + tmp++; + + value = *tmp; + pr_err( + "Next_Index:%d Public_Port:%d ", + (value & 0x0000FFFF), + ((value & 0xFFFF0000) >> 16)); + tmp++; + + value = *tmp; + pr_err( + "Private_Port:%d Target_Port:%d ", + (value & 0x0000FFFF), + ((value & 0xFFFF0000) >> 16)); + tmp++; + + value = *tmp; + flag = ((value & 0xFFFF0000) >> 16); + if (flag & NAT_ENTRY_RST_FIN_BIT) { + pr_err( + "IP_CKSM_delta:0x%x Flags:%s ", + (value & 0x0000FFFF), + "Direct_To_A5"); + } else { + pr_err( + "IP_CKSM_delta:0x%x Flags:%s ", + (value & 0x0000FFFF), + "Fwd_to_route"); + } + tmp++; + + value = *tmp; + pr_err( + "Time_stamp:0x%x Proto:%d ", + (value & 0x00FFFFFF), + ((value & 0xFF000000) >> 24)); + tmp++; + + value = *tmp; + pr_err( + "Prev_Index:%d Indx_tbl_entry:%d ", + (value & 0x0000FFFF), + ((value & 0xFFFF0000) >> 16)); + tmp++; + + value = *tmp; + pr_err( + "TCP_UDP_cksum_delta:0x%x\n", + ((value & 0xFFFF0000) >> 16)); + } + + base_tbl += ENTRY_U32_FIELDS; + + } + } + } + + /* Print Index tables */ + rule_id = 0; + for (j = 0; j < 2; j++) { + if (j == BASE_TABLE) { + tbl_size = ipa3_ctx->nat_mem.size_base_tables; + indx_tbl = (u32 *)ipa3_ctx->nat_mem.index_table_addr; + + pr_err("\nIndex Table:\n"); + } else { + tbl_size = ipa3_ctx->nat_mem.size_expansion_tables-1; + indx_tbl = + (u32 *)ipa3_ctx->nat_mem.index_table_expansion_addr; + + pr_err("\nExpansion Index Table:\n"); + } + + if (indx_tbl != NULL) { + for (i = 0; i <= tbl_size; i++, rule_id++) { + tmp = indx_tbl; + value = *tmp; + tbl_entry = (value & 0x0000FFFF); + + if (tbl_entry) { + pr_err("Rule:%d ", rule_id); + + value = *tmp; + pr_err( + "Table_Entry:%d Next_Index:%d\n", + tbl_entry, + ((value & 0xFFFF0000) >> 16)); + } + + indx_tbl++; + } + } + } + pr_err("Current No. Nat Entries: %d\n", no_entries); + mutex_unlock(&ipa3_ctx->nat_mem.lock); + + return 0; +} + +static ssize_t ipa3_rm_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int result, nbytes, cnt = 0; + + result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN); + if (result < 0) { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "Error in printing RM stat %d\n", result); + cnt += nbytes; + } else + cnt += result; + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static void ipa_dump_status(struct ipahal_pkt_status *status) +{ + IPA_DUMP_STATUS_FIELD(status_opcode); + IPA_DUMP_STATUS_FIELD(exception); + IPA_DUMP_STATUS_FIELD(status_mask); + IPA_DUMP_STATUS_FIELD(pkt_len); + IPA_DUMP_STATUS_FIELD(endp_src_idx); + IPA_DUMP_STATUS_FIELD(endp_dest_idx); + IPA_DUMP_STATUS_FIELD(metadata); + IPA_DUMP_STATUS_FIELD(flt_local); + IPA_DUMP_STATUS_FIELD(flt_hash); + IPA_DUMP_STATUS_FIELD(flt_global); + IPA_DUMP_STATUS_FIELD(flt_ret_hdr); + IPA_DUMP_STATUS_FIELD(flt_miss); + IPA_DUMP_STATUS_FIELD(flt_rule_id); + IPA_DUMP_STATUS_FIELD(rt_local); + IPA_DUMP_STATUS_FIELD(rt_hash); + IPA_DUMP_STATUS_FIELD(ucp); + IPA_DUMP_STATUS_FIELD(rt_tbl_idx); + IPA_DUMP_STATUS_FIELD(rt_miss); + IPA_DUMP_STATUS_FIELD(rt_rule_id); + IPA_DUMP_STATUS_FIELD(nat_hit); + IPA_DUMP_STATUS_FIELD(nat_entry_idx); + IPA_DUMP_STATUS_FIELD(nat_type); + pr_err("tag = 0x%llx\n", (u64)status->tag_info & 0xFFFFFFFFFFFF); + IPA_DUMP_STATUS_FIELD(seq_num); + IPA_DUMP_STATUS_FIELD(time_of_day_ctr); + IPA_DUMP_STATUS_FIELD(hdr_local); + IPA_DUMP_STATUS_FIELD(hdr_offset); + IPA_DUMP_STATUS_FIELD(frag_hit); + IPA_DUMP_STATUS_FIELD(frag_rule); +} + +static ssize_t ipa_status_stats_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ipa3_status_stats *stats; + int i, j; + + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) + return -EFAULT; + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa3_ctx->ep[i].sys || !ipa3_ctx->ep[i].sys->status_stat) + continue; + + memcpy(stats, ipa3_ctx->ep[i].sys->status_stat, sizeof(*stats)); + pr_err("Statuses for pipe %d\n", i); + for (j = 0; j < IPA_MAX_STATUS_STAT_NUM; j++) { + pr_err("curr=%d\n", stats->curr); + ipa_dump_status(&stats->status[stats->curr]); + pr_err("\n\n\n"); + stats->curr = (stats->curr + 1) % + IPA_MAX_STATUS_STAT_NUM; + } + } + + kfree(stats); + return 0; +} + +static ssize_t ipa3_print_active_clients_log(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int cnt; + int table_size; + + if (active_clients_buf == NULL) { + IPAERR("Active Clients buffer is not allocated"); + return 0; + } + memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENT_BUF_SIZE); + mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex); + cnt = ipa3_active_clients_log_print_buffer(active_clients_buf, + IPA_DBG_ACTIVE_CLIENT_BUF_SIZE - IPA_MAX_MSG_LEN); + table_size = ipa3_active_clients_log_print_table(active_clients_buf + + cnt, IPA_MAX_MSG_LEN); + mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex); + + return simple_read_from_buffer(ubuf, count, ppos, + active_clients_buf, cnt + table_size); +} + +static ssize_t ipa3_clear_active_clients_log(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + ipa3_active_clients_log_clear(); + + return count; +} + +static ssize_t ipa3_enable_ipc_low(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + mutex_lock(&ipa3_ctx->lock); + if (option) { + if (!ipa_ipc_low_buff) { + ipa_ipc_low_buff = + ipc_log_context_create(IPA_IPC_LOG_PAGES, + "ipa_low", 0); + } + if (ipa_ipc_low_buff == NULL) + IPAERR("failed to get logbuf_low\n"); + ipa3_ctx->logbuf_low = ipa_ipc_low_buff; + } else { + ipa3_ctx->logbuf_low = NULL; + } + mutex_unlock(&ipa3_ctx->lock); + + return count; +} + +const struct file_operations ipa3_gen_reg_ops = { + .read = ipa3_read_gen_reg, +}; + +const struct file_operations ipa3_ep_reg_ops = { + .read = ipa3_read_ep_reg, + .write = ipa3_write_ep_reg, +}; + +const struct file_operations ipa3_keep_awake_ops = { + .read = ipa3_read_keep_awake, + .write = ipa3_write_keep_awake, +}; + +const struct file_operations ipa3_ep_holb_ops = { + .write = ipa3_write_ep_holb, +}; + +const struct file_operations ipa3_hdr_ops = { + .read = ipa3_read_hdr, +}; + +const struct file_operations ipa3_rt_ops = { + .read = ipa3_read_rt, + .open = ipa3_open_dbg, +}; + +const struct file_operations ipa3_rt_hw_ops = { + .read = ipa3_read_rt_hw, + .open = ipa3_open_dbg, +}; + +const struct file_operations ipa3_proc_ctx_ops = { + .read = ipa3_read_proc_ctx, +}; + +const struct file_operations ipa3_flt_ops = { + .read = ipa3_read_flt, + .open = ipa3_open_dbg, +}; + +const struct file_operations ipa3_flt_hw_ops = { + .read = ipa3_read_flt_hw, + .open = ipa3_open_dbg, +}; + +const struct file_operations ipa3_stats_ops = { + .read = ipa3_read_stats, +}; + +const struct file_operations ipa3_wstats_ops = { + .read = ipa3_read_wstats, +}; + +const struct file_operations ipa3_wdi_ops = { + .read = ipa3_read_wdi, +}; + +const struct file_operations ipa3_ntn_ops = { + .read = ipa3_read_ntn, +}; + +const struct file_operations ipa3_msg_ops = { + .read = ipa3_read_msg, +}; + +const struct file_operations ipa3_dbg_cnt_ops = { + .read = ipa3_read_dbg_cnt, + .write = ipa3_write_dbg_cnt, +}; + +const struct file_operations ipa3_status_stats_ops = { + .read = ipa_status_stats_read, +}; + +const struct file_operations ipa3_nat4_ops = { + .read = ipa3_read_nat4, +}; + +const struct file_operations ipa3_rm_stats = { + .read = ipa3_rm_read_stats, +}; + +const struct file_operations ipa3_active_clients = { + .read = ipa3_print_active_clients_log, + .write = ipa3_clear_active_clients_log, +}; + +const struct file_operations ipa3_ipc_low_ops = { + .write = ipa3_enable_ipc_low, +}; + +void ipa3_debugfs_init(void) +{ + const mode_t read_only_mode = 0444; + const mode_t read_write_mode = 0664; + const mode_t write_only_mode = 0220; + struct dentry *file; + + dent = debugfs_create_dir("ipa", 0); + if (IS_ERR(dent)) { + IPAERR("fail to create folder in debug_fs.\n"); + return; + } + + file = debugfs_create_u32("hw_type", read_only_mode, + dent, &ipa3_ctx->ipa_hw_type); + if (!file) { + IPAERR("could not create hw_type file\n"); + goto fail; + } + + + dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, 0, + &ipa3_gen_reg_ops); + if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) { + IPAERR("fail to create file for debug_fs gen_reg\n"); + goto fail; + } + + dfile_active_clients = debugfs_create_file("active_clients", + read_write_mode, dent, 0, &ipa3_active_clients); + if (!dfile_active_clients || IS_ERR(dfile_active_clients)) { + IPAERR("fail to create file for debug_fs active_clients\n"); + goto fail; + } + + active_clients_buf = NULL; + active_clients_buf = kzalloc(IPA_DBG_ACTIVE_CLIENT_BUF_SIZE, + GFP_KERNEL); + if (active_clients_buf == NULL) + goto fail; + + dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0, + &ipa3_ep_reg_ops); + if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) { + IPAERR("fail to create file for debug_fs ep_reg\n"); + goto fail; + } + + dfile_keep_awake = debugfs_create_file("keep_awake", read_write_mode, + dent, 0, &ipa3_keep_awake_ops); + if (!dfile_keep_awake || IS_ERR(dfile_keep_awake)) { + IPAERR("fail to create file for debug_fs dfile_keep_awake\n"); + goto fail; + } + + dfile_ep_holb = debugfs_create_file("holb", write_only_mode, dent, + 0, &ipa3_ep_holb_ops); + if (!dfile_ep_holb || IS_ERR(dfile_ep_holb)) { + IPAERR("fail to create file for debug_fs dfile_ep_hol_en\n"); + goto fail; + } + + dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0, + &ipa3_hdr_ops); + if (!dfile_hdr || IS_ERR(dfile_hdr)) { + IPAERR("fail to create file for debug_fs hdr\n"); + goto fail; + } + + dfile_proc_ctx = debugfs_create_file("proc_ctx", read_only_mode, dent, + 0, &ipa3_proc_ctx_ops); + if (!dfile_hdr || IS_ERR(dfile_hdr)) { + IPAERR("fail to create file for debug_fs proc_ctx\n"); + goto fail; + } + + dfile_ip4_rt = debugfs_create_file("ip4_rt", read_only_mode, dent, + (void *)IPA_IP_v4, &ipa3_rt_ops); + if (!dfile_ip4_rt || IS_ERR(dfile_ip4_rt)) { + IPAERR("fail to create file for debug_fs ip4 rt\n"); + goto fail; + } + + dfile_ip4_rt_hw = debugfs_create_file("ip4_rt_hw", read_only_mode, dent, + (void *)IPA_IP_v4, &ipa3_rt_hw_ops); + if (!dfile_ip4_rt_hw || IS_ERR(dfile_ip4_rt_hw)) { + IPAERR("fail to create file for debug_fs ip4 rt hw\n"); + goto fail; + } + + dfile_ip6_rt = debugfs_create_file("ip6_rt", read_only_mode, dent, + (void *)IPA_IP_v6, &ipa3_rt_ops); + if (!dfile_ip6_rt || IS_ERR(dfile_ip6_rt)) { + IPAERR("fail to create file for debug_fs ip6:w rt\n"); + goto fail; + } + + dfile_ip6_rt_hw = debugfs_create_file("ip6_rt_hw", read_only_mode, dent, + (void *)IPA_IP_v6, &ipa3_rt_hw_ops); + if (!dfile_ip6_rt_hw || IS_ERR(dfile_ip6_rt_hw)) { + IPAERR("fail to create file for debug_fs ip6 rt hw\n"); + goto fail; + } + + dfile_ip4_flt = debugfs_create_file("ip4_flt", read_only_mode, dent, + (void *)IPA_IP_v4, &ipa3_flt_ops); + if (!dfile_ip4_flt || IS_ERR(dfile_ip4_flt)) { + IPAERR("fail to create file for debug_fs ip4 flt\n"); + goto fail; + } + + dfile_ip4_flt_hw = debugfs_create_file("ip4_flt_hw", read_only_mode, + dent, (void *)IPA_IP_v4, &ipa3_flt_hw_ops); + if (!dfile_ip4_flt_hw || IS_ERR(dfile_ip4_flt_hw)) { + IPAERR("fail to create file for debug_fs ip4 flt\n"); + goto fail; + } + + dfile_ip6_flt = debugfs_create_file("ip6_flt", read_only_mode, dent, + (void *)IPA_IP_v6, &ipa3_flt_ops); + if (!dfile_ip6_flt || IS_ERR(dfile_ip6_flt)) { + IPAERR("fail to create file for debug_fs ip6 flt\n"); + goto fail; + } + + dfile_ip6_flt_hw = debugfs_create_file("ip6_flt_hw", read_only_mode, + dent, (void *)IPA_IP_v6, &ipa3_flt_hw_ops); + if (!dfile_ip6_flt_hw || IS_ERR(dfile_ip6_flt_hw)) { + IPAERR("fail to create file for debug_fs ip6 flt\n"); + goto fail; + } + + dfile_stats = debugfs_create_file("stats", read_only_mode, dent, 0, + &ipa3_stats_ops); + if (!dfile_stats || IS_ERR(dfile_stats)) { + IPAERR("fail to create file for debug_fs stats\n"); + goto fail; + } + + dfile_wstats = debugfs_create_file("wstats", read_only_mode, + dent, 0, &ipa3_wstats_ops); + if (!dfile_wstats || IS_ERR(dfile_wstats)) { + IPAERR("fail to create file for debug_fs wstats\n"); + goto fail; + } + + dfile_wdi_stats = debugfs_create_file("wdi", read_only_mode, dent, 0, + &ipa3_wdi_ops); + if (!dfile_wdi_stats || IS_ERR(dfile_wdi_stats)) { + IPAERR("fail to create file for debug_fs wdi stats\n"); + goto fail; + } + + dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0, + &ipa3_ntn_ops); + if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) { + IPAERR("fail to create file for debug_fs ntn stats\n"); + goto fail; + } + + dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0, + &ipa3_dbg_cnt_ops); + if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) { + IPAERR("fail to create file for debug_fs dbg_cnt\n"); + goto fail; + } + + dfile_msg = debugfs_create_file("msg", read_only_mode, dent, 0, + &ipa3_msg_ops); + if (!dfile_msg || IS_ERR(dfile_msg)) { + IPAERR("fail to create file for debug_fs msg\n"); + goto fail; + } + + dfile_ip4_nat = debugfs_create_file("ip4_nat", read_only_mode, dent, + 0, &ipa3_nat4_ops); + if (!dfile_ip4_nat || IS_ERR(dfile_ip4_nat)) { + IPAERR("fail to create file for debug_fs ip4 nat\n"); + goto fail; + } + + dfile_rm_stats = debugfs_create_file("rm_stats", + read_only_mode, dent, 0, &ipa3_rm_stats); + if (!dfile_rm_stats || IS_ERR(dfile_rm_stats)) { + IPAERR("fail to create file for debug_fs rm_stats\n"); + goto fail; + } + + dfile_status_stats = debugfs_create_file("status_stats", + read_only_mode, dent, 0, &ipa3_status_stats_ops); + if (!dfile_status_stats || IS_ERR(dfile_status_stats)) { + IPAERR("fail to create file for debug_fs status_stats\n"); + goto fail; + } + + file = debugfs_create_u32("enable_clock_scaling", read_write_mode, + dent, &ipa3_ctx->enable_clock_scaling); + if (!file) { + IPAERR("could not create enable_clock_scaling file\n"); + goto fail; + } + + file = debugfs_create_u32("clock_scaling_bw_threshold_nominal_mbps", + read_write_mode, dent, + &ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal); + if (!file) { + IPAERR("could not create bw_threshold_nominal_mbps\n"); + goto fail; + } + + file = debugfs_create_u32("clock_scaling_bw_threshold_turbo_mbps", + read_write_mode, dent, + &ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo); + if (!file) { + IPAERR("could not create bw_threshold_turbo_mbps\n"); + goto fail; + } + + file = debugfs_create_file("enable_low_prio_print", write_only_mode, + dent, 0, &ipa3_ipc_low_ops); + if (!file) { + IPAERR("could not create enable_low_prio_print file\n"); + goto fail; + } + + ipa_debugfs_init_stats(dent); + + return; + +fail: + debugfs_remove_recursive(dent); +} + +void ipa3_debugfs_remove(void) +{ + if (IS_ERR(dent)) { + IPAERR("Debugfs:folder was not created.\n"); + return; + } + if (active_clients_buf != NULL) { + kfree(active_clients_buf); + active_clients_buf = NULL; + } + debugfs_remove_recursive(dent); +} + +struct dentry *ipa_debugfs_get_root(void) +{ + return dent; +} +EXPORT_SYMBOL(ipa_debugfs_get_root); + +#else /* !CONFIG_DEBUG_FS */ +void ipa3_debugfs_init(void) {} +void ipa3_debugfs_remove(void) {} +#endif diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c new file mode 100644 index 000000000000..824a94da95cf --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c @@ -0,0 +1,1105 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include "linux/msm_gsi.h" +#include +#include "ipa_i.h" + +#define IPA_DMA_POLLING_MIN_SLEEP_RX 1010 +#define IPA_DMA_POLLING_MAX_SLEEP_RX 1050 +#define IPA_DMA_SYS_DESC_MAX_FIFO_SZ 0x7FF8 +#define IPA_DMA_MAX_PKT_SZ 0xFFFF +#define IPA_DMA_DUMMY_BUFF_SZ 8 +#define IPA_DMA_PREFETCH_WA_THRESHOLD 9 + +#define IPADMA_DRV_NAME "ipa_dma" + +#define IPADMA_DBG(fmt, args...) \ + do { \ + pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPADMA_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPADMA_ERR(fmt, args...) \ + do { \ + pr_err(IPADMA_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPADMA_FUNC_ENTRY() \ + IPADMA_DBG_LOW("ENTRY\n") + +#define IPADMA_FUNC_EXIT() \ + IPADMA_DBG_LOW("EXIT\n") + +#ifdef CONFIG_DEBUG_FS +#define IPADMA_MAX_MSG_LEN 1024 +static char dbg_buff[IPADMA_MAX_MSG_LEN]; +static void ipa3_dma_debugfs_init(void); +static void ipa3_dma_debugfs_destroy(void); +#else +static void ipa3_dma_debugfs_init(void) {} +static void ipa3_dma_debugfs_destroy(void) {} +#endif + +/** + * struct ipa3_dma_ctx -IPADMA driver context information + * @is_enabled:is ipa_dma enabled? + * @destroy_pending: destroy ipa_dma after handling all pending memcpy + * @ipa_dma_xfer_wrapper_cache: cache of ipa3_dma_xfer_wrapper structs + * @sync_lock: lock for synchronisation in sync_memcpy + * @async_lock: lock for synchronisation in async_memcpy + * @enable_lock: lock for is_enabled + * @pending_lock: lock for synchronize is_enable and pending_cnt + * @done: no pending works-ipadma can be destroyed + * @ipa_dma_sync_prod_hdl: handle of sync memcpy producer + * @ipa_dma_async_prod_hdl:handle of async memcpy producer + * @ipa_dma_sync_cons_hdl: handle of sync memcpy consumer + * @sync_memcpy_pending_cnt: number of pending sync memcopy operations + * @async_memcpy_pending_cnt: number of pending async memcopy operations + * @uc_memcpy_pending_cnt: number of pending uc memcopy operations + * @total_sync_memcpy: total number of sync memcpy (statistics) + * @total_async_memcpy: total number of async memcpy (statistics) + * @total_uc_memcpy: total number of uc memcpy (statistics) + */ +struct ipa3_dma_ctx { + bool is_enabled; + bool destroy_pending; + struct kmem_cache *ipa_dma_xfer_wrapper_cache; + struct mutex sync_lock; + spinlock_t async_lock; + struct mutex enable_lock; + spinlock_t pending_lock; + struct completion done; + u32 ipa_dma_sync_prod_hdl; + u32 ipa_dma_async_prod_hdl; + u32 ipa_dma_sync_cons_hdl; + u32 ipa_dma_async_cons_hdl; + atomic_t sync_memcpy_pending_cnt; + atomic_t async_memcpy_pending_cnt; + atomic_t uc_memcpy_pending_cnt; + atomic_t total_sync_memcpy; + atomic_t total_async_memcpy; + atomic_t total_uc_memcpy; + struct ipa_mem_buffer ipa_dma_dummy_src_sync; + struct ipa_mem_buffer ipa_dma_dummy_dst_sync; + struct ipa_mem_buffer ipa_dma_dummy_src_async; + struct ipa_mem_buffer ipa_dma_dummy_dst_async; +}; +static struct ipa3_dma_ctx *ipa3_dma_ctx; + + +/** + * ipa3_dma_init() -Initialize IPADMA. + * + * This function initialize all IPADMA internal data and connect in dma: + * MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS + * MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS + * + * Return codes: 0: success + * -EFAULT: IPADMA is already initialized + * -EINVAL: IPA driver is not initialized + * -ENOMEM: allocating memory error + * -EPERM: pipe connection failed + */ +int ipa3_dma_init(void) +{ + struct ipa3_dma_ctx *ipa_dma_ctx_t; + struct ipa_sys_connect_params sys_in; + int res = 0; + int sync_sz; + int async_sz; + + IPADMA_FUNC_ENTRY(); + + if (ipa3_dma_ctx) { + IPADMA_ERR("Already initialized.\n"); + return -EFAULT; + } + + if (!ipa3_is_ready()) { + IPADMA_ERR("IPA is not ready yet\n"); + return -EINVAL; + } + + ipa_dma_ctx_t = kzalloc(sizeof(*(ipa3_dma_ctx)), GFP_KERNEL); + + if (!ipa_dma_ctx_t) + return -ENOMEM; + + ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache = + kmem_cache_create("IPA DMA XFER WRAPPER", + sizeof(struct ipa3_dma_xfer_wrapper), 0, 0, NULL); + if (!ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache) { + IPAERR(":failed to create ipa dma xfer wrapper cache.\n"); + res = -ENOMEM; + goto fail_mem_ctrl; + } + + mutex_init(&ipa_dma_ctx_t->enable_lock); + spin_lock_init(&ipa_dma_ctx_t->async_lock); + mutex_init(&ipa_dma_ctx_t->sync_lock); + spin_lock_init(&ipa_dma_ctx_t->pending_lock); + init_completion(&ipa_dma_ctx_t->done); + ipa_dma_ctx_t->is_enabled = false; + ipa_dma_ctx_t->destroy_pending = false; + atomic_set(&ipa_dma_ctx_t->async_memcpy_pending_cnt, 0); + atomic_set(&ipa_dma_ctx_t->sync_memcpy_pending_cnt, 0); + atomic_set(&ipa_dma_ctx_t->uc_memcpy_pending_cnt, 0); + atomic_set(&ipa_dma_ctx_t->total_async_memcpy, 0); + atomic_set(&ipa_dma_ctx_t->total_sync_memcpy, 0); + atomic_set(&ipa_dma_ctx_t->total_uc_memcpy, 0); + + sync_sz = IPA_SYS_DESC_FIFO_SZ; + async_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ; + /* + * for ipav3.5 we need to double the rings and allocate dummy buffers + * in order to apply the prefetch WA + */ + if (ipa_get_hw_type() == IPA_HW_v3_5) { + sync_sz *= 2; + async_sz *= 2; + + ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base = + dma_alloc_coherent(ipa3_ctx->pdev, + IPA_DMA_DUMMY_BUFF_SZ * 4, + &ipa_dma_ctx_t->ipa_dma_dummy_src_sync.phys_base, + GFP_KERNEL); + + if (!ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base) { + IPAERR("DMA alloc fail %d bytes for prefetch WA\n", + IPA_DMA_DUMMY_BUFF_SZ); + res = -ENOMEM; + goto fail_alloc_dummy; + } + + ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.base = + ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base + + IPA_DMA_DUMMY_BUFF_SZ; + ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.phys_base = + ipa_dma_ctx_t->ipa_dma_dummy_src_sync.phys_base + + IPA_DMA_DUMMY_BUFF_SZ; + ipa_dma_ctx_t->ipa_dma_dummy_src_async.base = + ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.base + + IPA_DMA_DUMMY_BUFF_SZ; + ipa_dma_ctx_t->ipa_dma_dummy_src_async.phys_base = + ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.phys_base + + IPA_DMA_DUMMY_BUFF_SZ; + ipa_dma_ctx_t->ipa_dma_dummy_dst_async.base = + ipa_dma_ctx_t->ipa_dma_dummy_src_async.base + + IPA_DMA_DUMMY_BUFF_SZ; + ipa_dma_ctx_t->ipa_dma_dummy_dst_async.phys_base = + ipa_dma_ctx_t->ipa_dma_dummy_src_async.phys_base + + IPA_DMA_DUMMY_BUFF_SZ; + } + + /* IPADMA SYNC PROD-source for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_PROD; + sys_in.desc_fifo_sz = sync_sz; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS; + sys_in.skip_ep_cfg = false; + if (ipa3_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_sync_prod_hdl)) { + IPADMA_ERR(":setup sync prod pipe failed\n"); + res = -EPERM; + goto fail_sync_prod; + } + + /* IPADMA SYNC CONS-destination for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS; + sys_in.desc_fifo_sz = sync_sz; + sys_in.skip_ep_cfg = false; + sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; + sys_in.notify = NULL; + sys_in.priv = NULL; + if (ipa3_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_sync_cons_hdl)) { + IPADMA_ERR(":setup sync cons pipe failed.\n"); + res = -EPERM; + goto fail_sync_cons; + } + + IPADMA_DBG("SYNC MEMCPY pipes are connected\n"); + + /* IPADMA ASYNC PROD-source for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD; + sys_in.desc_fifo_sz = async_sz; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS; + sys_in.skip_ep_cfg = false; + sys_in.notify = NULL; + if (ipa3_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_async_prod_hdl)) { + IPADMA_ERR(":setup async prod pipe failed.\n"); + res = -EPERM; + goto fail_async_prod; + } + + /* IPADMA ASYNC CONS-destination for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS; + sys_in.desc_fifo_sz = async_sz; + sys_in.skip_ep_cfg = false; + sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; + sys_in.notify = ipa3_dma_async_memcpy_notify_cb; + sys_in.priv = NULL; + if (ipa3_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_async_cons_hdl)) { + IPADMA_ERR(":setup async cons pipe failed.\n"); + res = -EPERM; + goto fail_async_cons; + } + ipa3_dma_debugfs_init(); + ipa3_dma_ctx = ipa_dma_ctx_t; + IPADMA_DBG("ASYNC MEMCPY pipes are connected\n"); + + IPADMA_FUNC_EXIT(); + return res; +fail_async_cons: + ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl); +fail_async_prod: + ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl); +fail_sync_cons: + ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl); +fail_sync_prod: + dma_free_coherent(ipa3_ctx->pdev, IPA_DMA_DUMMY_BUFF_SZ * 4, + ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base, + ipa_dma_ctx_t->ipa_dma_dummy_src_sync.phys_base); +fail_alloc_dummy: + kmem_cache_destroy(ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache); +fail_mem_ctrl: + kfree(ipa_dma_ctx_t); + ipa3_dma_ctx = NULL; + return res; + +} + +/** + * ipa3_dma_enable() -Vote for IPA clocks. + * + *Return codes: 0: success + * -EINVAL: IPADMA is not initialized + * -EPERM: Operation not permitted as ipa_dma is already + * enabled + */ +int ipa3_dma_enable(void) +{ + IPADMA_FUNC_ENTRY(); + if (ipa3_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't enable\n"); + return -EPERM; + } + mutex_lock(&ipa3_dma_ctx->enable_lock); + if (ipa3_dma_ctx->is_enabled) { + IPADMA_ERR("Already enabled.\n"); + mutex_unlock(&ipa3_dma_ctx->enable_lock); + return -EPERM; + } + IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA"); + ipa3_dma_ctx->is_enabled = true; + mutex_unlock(&ipa3_dma_ctx->enable_lock); + + IPADMA_FUNC_EXIT(); + return 0; +} + +static bool ipa3_dma_work_pending(void) +{ + if (atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt)) { + IPADMA_DBG("pending sync\n"); + return true; + } + if (atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt)) { + IPADMA_DBG("pending async\n"); + return true; + } + if (atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt)) { + IPADMA_DBG("pending uc\n"); + return true; + } + IPADMA_DBG_LOW("no pending work\n"); + return false; +} + +/** + * ipa3_dma_disable()- Unvote for IPA clocks. + * + * enter to power save mode. + * + * Return codes: 0: success + * -EINVAL: IPADMA is not initialized + * -EPERM: Operation not permitted as ipa_dma is already + * diabled + * -EFAULT: can not disable ipa_dma as there are pending + * memcopy works + */ +int ipa3_dma_disable(void) +{ + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + if (ipa3_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't disable\n"); + return -EPERM; + } + mutex_lock(&ipa3_dma_ctx->enable_lock); + spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags); + if (!ipa3_dma_ctx->is_enabled) { + IPADMA_ERR("Already disabled.\n"); + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + mutex_unlock(&ipa3_dma_ctx->enable_lock); + return -EPERM; + } + if (ipa3_dma_work_pending()) { + IPADMA_ERR("There is pending work, can't disable.\n"); + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + mutex_unlock(&ipa3_dma_ctx->enable_lock); + return -EFAULT; + } + ipa3_dma_ctx->is_enabled = false; + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA"); + mutex_unlock(&ipa3_dma_ctx->enable_lock); + IPADMA_FUNC_EXIT(); + return 0; +} + +/** + * ipa3_dma_sync_memcpy()- Perform synchronous memcpy using IPA. + * + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -gsi_status : on GSI failures + * -EFAULT: other + */ +int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len) +{ + int ep_idx; + int res; + int i = 0; + struct ipa3_sys_context *cons_sys; + struct ipa3_sys_context *prod_sys; + struct ipa3_dma_xfer_wrapper *xfer_descr = NULL; + struct ipa3_dma_xfer_wrapper *head_descr = NULL; + struct gsi_xfer_elem prod_xfer_elem; + struct gsi_xfer_elem cons_xfer_elem; + struct gsi_chan_xfer_notify gsi_notify; + unsigned long flags; + bool stop_polling = false; + bool prefetch_wa = false; + + IPADMA_FUNC_ENTRY(); + IPADMA_DBG_LOW("dest = 0x%llx, src = 0x%llx, len = %d\n", + dest, src, len); + if (ipa3_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n"); + return -EPERM; + } + if ((max(src, dest) - min(src, dest)) < len) { + IPADMA_ERR("invalid addresses - overlapping buffers\n"); + return -EINVAL; + } + if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) { + IPADMA_ERR("invalid len, %d\n", len); + return -EINVAL; + } + spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags); + if (!ipa3_dma_ctx->is_enabled) { + IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n"); + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + return -EPERM; + } + atomic_inc(&ipa3_dma_ctx->sync_memcpy_pending_cnt); + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_SYNC_CONS); + return -EFAULT; + } + cons_sys = ipa3_ctx->ep[ep_idx].sys; + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_SYNC_PROD); + return -EFAULT; + } + prod_sys = ipa3_ctx->ep[ep_idx].sys; + + xfer_descr = kmem_cache_zalloc(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, + GFP_KERNEL); + if (!xfer_descr) { + IPADMA_ERR("failed to alloc xfer descr wrapper\n"); + res = -ENOMEM; + goto fail_mem_alloc; + } + xfer_descr->phys_addr_dest = dest; + xfer_descr->phys_addr_src = src; + xfer_descr->len = len; + init_completion(&xfer_descr->xfer_done); + + mutex_lock(&ipa3_dma_ctx->sync_lock); + list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list); + cons_sys->len++; + cons_xfer_elem.addr = dest; + cons_xfer_elem.len = len; + cons_xfer_elem.type = GSI_XFER_ELEM_DATA; + cons_xfer_elem.flags = GSI_XFER_FLAG_EOT; + + prod_xfer_elem.addr = src; + prod_xfer_elem.len = len; + prod_xfer_elem.type = GSI_XFER_ELEM_DATA; + prod_xfer_elem.xfer_user_data = NULL; + + /* + * when copy is less than 9B we need to chain another dummy + * copy so the total size will be larger (for ipav3.5) + * for the consumer we have to prepare an additional credit + */ + prefetch_wa = ((ipa_get_hw_type() == IPA_HW_v3_5) && + len < IPA_DMA_PREFETCH_WA_THRESHOLD); + if (prefetch_wa) { + cons_xfer_elem.xfer_user_data = NULL; + res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1, + &cons_xfer_elem, false); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer dest descr res:%d\n", + res); + goto fail_send; + } + cons_xfer_elem.addr = + ipa3_dma_ctx->ipa_dma_dummy_dst_sync.phys_base; + cons_xfer_elem.len = IPA_DMA_DUMMY_BUFF_SZ; + cons_xfer_elem.type = GSI_XFER_ELEM_DATA; + cons_xfer_elem.flags = GSI_XFER_FLAG_EOT; + cons_xfer_elem.xfer_user_data = xfer_descr; + res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1, + &cons_xfer_elem, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer dummy dest descr res:%d\n", + res); + goto fail_send; + } + prod_xfer_elem.flags = GSI_XFER_FLAG_CHAIN; + res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1, + &prod_xfer_elem, false); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer src descr res:%d\n", + res); + ipa_assert(); + goto fail_send; + } + prod_xfer_elem.addr = + ipa3_dma_ctx->ipa_dma_dummy_src_sync.phys_base; + prod_xfer_elem.len = IPA_DMA_DUMMY_BUFF_SZ; + prod_xfer_elem.type = GSI_XFER_ELEM_DATA; + prod_xfer_elem.flags = GSI_XFER_FLAG_EOT; + prod_xfer_elem.xfer_user_data = NULL; + res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1, + &prod_xfer_elem, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer dummy src descr res:%d\n", + res); + ipa_assert(); + goto fail_send; + } + } else { + cons_xfer_elem.xfer_user_data = xfer_descr; + res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1, + &cons_xfer_elem, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer dest descr res:%d\n", + res); + goto fail_send; + } + prod_xfer_elem.flags = GSI_XFER_FLAG_EOT; + res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1, + &prod_xfer_elem, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer src descr res:%d\n", + res); + ipa_assert(); + goto fail_send; + } + } + head_descr = list_first_entry(&cons_sys->head_desc_list, + struct ipa3_dma_xfer_wrapper, link); + + /* in case we are not the head of the list, wait for head to wake us */ + if (xfer_descr != head_descr) { + mutex_unlock(&ipa3_dma_ctx->sync_lock); + wait_for_completion(&xfer_descr->xfer_done); + mutex_lock(&ipa3_dma_ctx->sync_lock); + head_descr = list_first_entry(&cons_sys->head_desc_list, + struct ipa3_dma_xfer_wrapper, link); + /* Unexpected transfer sent from HW */ + BUG_ON(xfer_descr != head_descr); + } + mutex_unlock(&ipa3_dma_ctx->sync_lock); + + do { + /* wait for transfer to complete */ + res = gsi_poll_channel(cons_sys->ep->gsi_chan_hdl, + &gsi_notify); + if (res == GSI_STATUS_SUCCESS) + stop_polling = true; + else if (res != GSI_STATUS_POLL_EMPTY) + IPADMA_ERR( + "Failed: gsi_poll_chanel, returned %d loop#:%d\n", + res, i); + usleep_range(IPA_DMA_POLLING_MIN_SLEEP_RX, + IPA_DMA_POLLING_MAX_SLEEP_RX); + i++; + } while (!stop_polling); + + /* for prefetch WA we will receive the length of the dummy + * transfer in the event (because it is the second element) + */ + if (prefetch_wa) + ipa_assert_on(gsi_notify.bytes_xfered != + IPA_DMA_DUMMY_BUFF_SZ); + else + ipa_assert_on(len != gsi_notify.bytes_xfered); + + ipa_assert_on(dest != ((struct ipa3_dma_xfer_wrapper *) + (gsi_notify.xfer_user_data))->phys_addr_dest); + + mutex_lock(&ipa3_dma_ctx->sync_lock); + list_del(&head_descr->link); + cons_sys->len--; + kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); + /* wake the head of the list */ + if (!list_empty(&cons_sys->head_desc_list)) { + head_descr = list_first_entry(&cons_sys->head_desc_list, + struct ipa3_dma_xfer_wrapper, link); + complete(&head_descr->xfer_done); + } + mutex_unlock(&ipa3_dma_ctx->sync_lock); + + atomic_inc(&ipa3_dma_ctx->total_sync_memcpy); + atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt); + if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending()) + complete(&ipa3_dma_ctx->done); + + IPADMA_FUNC_EXIT(); + return res; + +fail_send: + list_del(&xfer_descr->link); + cons_sys->len--; + mutex_unlock(&ipa3_dma_ctx->sync_lock); + kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); +fail_mem_alloc: + atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt); + if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending()) + complete(&ipa3_dma_ctx->done); + return res; +} + +/** + * ipa3_dma_async_memcpy()- Perform asynchronous memcpy using IPA. + * + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * @user_cb: callback function to notify the client when the copy was done. + * @user_param: cookie for user_cb. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -gsi_status : on GSI failures + * -EFAULT: descr fifo is full. + */ +int ipa3_dma_async_memcpy(u64 dest, u64 src, int len, + void (*user_cb)(void *user1), void *user_param) +{ + int ep_idx; + int res = 0; + struct ipa3_dma_xfer_wrapper *xfer_descr = NULL; + struct ipa3_sys_context *prod_sys; + struct ipa3_sys_context *cons_sys; + struct gsi_xfer_elem xfer_elem_cons, xfer_elem_prod; + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + IPADMA_DBG_LOW("dest = 0x%llx, src = 0x%llx, len = %d\n", + dest, src, len); + if (ipa3_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n"); + return -EPERM; + } + if ((max(src, dest) - min(src, dest)) < len) { + IPADMA_ERR("invalid addresses - overlapping buffers\n"); + return -EINVAL; + } + if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) { + IPADMA_ERR("invalid len, %d\n", len); + return -EINVAL; + } + if (!user_cb) { + IPADMA_ERR("null pointer: user_cb\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags); + if (!ipa3_dma_ctx->is_enabled) { + IPADMA_ERR("can't memcpy, IPA_DMA isn't enabled\n"); + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + return -EPERM; + } + atomic_inc(&ipa3_dma_ctx->async_memcpy_pending_cnt); + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); + return -EFAULT; + } + cons_sys = ipa3_ctx->ep[ep_idx].sys; + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_SYNC_PROD); + return -EFAULT; + } + prod_sys = ipa3_ctx->ep[ep_idx].sys; + + xfer_descr = kmem_cache_zalloc(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, + GFP_KERNEL); + if (!xfer_descr) { + res = -ENOMEM; + goto fail_mem_alloc; + } + xfer_descr->phys_addr_dest = dest; + xfer_descr->phys_addr_src = src; + xfer_descr->len = len; + xfer_descr->callback = user_cb; + xfer_descr->user1 = user_param; + + spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags); + list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list); + cons_sys->len++; + /* + * when copy is less than 9B we need to chain another dummy + * copy so the total size will be larger (for ipav3.5) + */ + if ((ipa_get_hw_type() == IPA_HW_v3_5) && len < + IPA_DMA_PREFETCH_WA_THRESHOLD) { + xfer_elem_cons.addr = dest; + xfer_elem_cons.len = len; + xfer_elem_cons.type = GSI_XFER_ELEM_DATA; + xfer_elem_cons.flags = GSI_XFER_FLAG_EOT; + xfer_elem_cons.xfer_user_data = NULL; + res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1, + &xfer_elem_cons, false); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer on dest descr res: %d\n", + res); + goto fail_send; + } + xfer_elem_cons.addr = + ipa3_dma_ctx->ipa_dma_dummy_dst_async.phys_base; + xfer_elem_cons.len = IPA_DMA_DUMMY_BUFF_SZ; + xfer_elem_cons.type = GSI_XFER_ELEM_DATA; + xfer_elem_cons.flags = GSI_XFER_FLAG_EOT; + xfer_elem_cons.xfer_user_data = xfer_descr; + res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1, + &xfer_elem_cons, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer on dummy dest descr res: %d\n", + res); + goto fail_send; + } + + xfer_elem_prod.addr = src; + xfer_elem_prod.len = len; + xfer_elem_prod.type = GSI_XFER_ELEM_DATA; + xfer_elem_prod.flags = GSI_XFER_FLAG_CHAIN; + xfer_elem_prod.xfer_user_data = NULL; + res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1, + &xfer_elem_prod, false); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer on src descr res: %d\n", + res); + ipa_assert(); + goto fail_send; + } + xfer_elem_prod.addr = + ipa3_dma_ctx->ipa_dma_dummy_src_async.phys_base; + xfer_elem_prod.len = IPA_DMA_DUMMY_BUFF_SZ; + xfer_elem_prod.type = GSI_XFER_ELEM_DATA; + xfer_elem_prod.flags = GSI_XFER_FLAG_EOT; + xfer_elem_prod.xfer_user_data = NULL; + res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1, + &xfer_elem_prod, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer on dummy src descr res: %d\n", + res); + ipa_assert(); + goto fail_send; + } + } else { + + xfer_elem_cons.addr = dest; + xfer_elem_cons.len = len; + xfer_elem_cons.type = GSI_XFER_ELEM_DATA; + xfer_elem_cons.flags = GSI_XFER_FLAG_EOT; + xfer_elem_cons.xfer_user_data = xfer_descr; + res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1, + &xfer_elem_cons, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer on dummy dest descr res: %d\n", + res); + ipa_assert(); + goto fail_send; + } + xfer_elem_prod.addr = src; + xfer_elem_prod.len = len; + xfer_elem_prod.type = GSI_XFER_ELEM_DATA; + xfer_elem_prod.flags = GSI_XFER_FLAG_EOT; + xfer_elem_prod.xfer_user_data = NULL; + res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1, + &xfer_elem_prod, true); + if (res) { + IPADMA_ERR( + "Failed: gsi_queue_xfer on dummy src descr res: %d\n", + res); + ipa_assert(); + goto fail_send; + } + + } + spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags); + IPADMA_FUNC_EXIT(); + return res; + +fail_send: + list_del(&xfer_descr->link); + spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags); + kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); +fail_mem_alloc: + atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt); + if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending()) + complete(&ipa3_dma_ctx->done); + return res; +} + +/** + * ipa3_dma_uc_memcpy() - Perform a memcpy action using IPA uC + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -EBADF: IPA uC is not loaded + */ +int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len) +{ + int res; + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + if (ipa3_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n"); + return -EPERM; + } + if ((max(src, dest) - min(src, dest)) < len) { + IPADMA_ERR("invalid addresses - overlapping buffers\n"); + return -EINVAL; + } + if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) { + IPADMA_ERR("invalid len, %d\n", len); + return -EINVAL; + } + + spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags); + if (!ipa3_dma_ctx->is_enabled) { + IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n"); + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + return -EPERM; + } + atomic_inc(&ipa3_dma_ctx->uc_memcpy_pending_cnt); + spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags); + + res = ipa3_uc_memcpy(dest, src, len); + if (res) { + IPADMA_ERR("ipa3_uc_memcpy failed %d\n", res); + goto dec_and_exit; + } + + atomic_inc(&ipa3_dma_ctx->total_uc_memcpy); + res = 0; +dec_and_exit: + atomic_dec(&ipa3_dma_ctx->uc_memcpy_pending_cnt); + if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending()) + complete(&ipa3_dma_ctx->done); + IPADMA_FUNC_EXIT(); + return res; +} + +/** + * ipa3_dma_destroy() -teardown IPADMA pipes and release ipadma. + * + * this is a blocking function, returns just after destroying IPADMA. + */ +void ipa3_dma_destroy(void) +{ + int res = 0; + + IPADMA_FUNC_ENTRY(); + if (!ipa3_dma_ctx) { + IPADMA_ERR("IPADMA isn't initialized\n"); + return; + } + + if (ipa3_dma_work_pending()) { + ipa3_dma_ctx->destroy_pending = true; + IPADMA_DBG("There are pending memcpy, wait for completion\n"); + wait_for_completion(&ipa3_dma_ctx->done); + } + + res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_cons_hdl); + if (res) + IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n"); + ipa3_dma_ctx->ipa_dma_async_cons_hdl = 0; + res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_cons_hdl); + if (res) + IPADMA_ERR("teardown IPADMA SYNC CONS failed\n"); + ipa3_dma_ctx->ipa_dma_sync_cons_hdl = 0; + res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_prod_hdl); + if (res) + IPADMA_ERR("teardown IPADMA ASYNC PROD failed\n"); + ipa3_dma_ctx->ipa_dma_async_prod_hdl = 0; + res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_prod_hdl); + if (res) + IPADMA_ERR("teardown IPADMA SYNC PROD failed\n"); + ipa3_dma_ctx->ipa_dma_sync_prod_hdl = 0; + + ipa3_dma_debugfs_destroy(); + kmem_cache_destroy(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache); + kfree(ipa3_dma_ctx); + dma_free_coherent(ipa3_ctx->pdev, IPA_DMA_DUMMY_BUFF_SZ * 4, + ipa3_dma_ctx->ipa_dma_dummy_src_sync.base, + ipa3_dma_ctx->ipa_dma_dummy_src_sync.phys_base); + ipa3_dma_ctx = NULL; + + IPADMA_FUNC_EXIT(); +} + +/** + * ipa3_dma_async_memcpy_notify_cb() - Callback function which will be called + * by IPA driver after getting notify on Rx operation is completed (data was + * written to dest descriptor on async_cons ep). + * + * @priv -not in use. + * @evt - event name - IPA_RECIVE. + * @data -the ipa_mem_buffer. + */ +void ipa3_dma_async_memcpy_notify_cb(void *priv + , enum ipa_dp_evt_type evt, unsigned long data) +{ + int ep_idx = 0; + struct ipa3_dma_xfer_wrapper *xfer_descr_expected; + struct ipa3_sys_context *sys; + unsigned long flags; + struct ipa_mem_buffer *mem_info; + + IPADMA_FUNC_ENTRY(); + + mem_info = (struct ipa_mem_buffer *)data; + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); + if (ep_idx < 0) { + IPADMA_ERR("IPA Client mapping failed\n"); + return; + } + sys = ipa3_ctx->ep[ep_idx].sys; + + spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags); + xfer_descr_expected = list_first_entry(&sys->head_desc_list, + struct ipa3_dma_xfer_wrapper, link); + list_del(&xfer_descr_expected->link); + sys->len--; + spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags); + atomic_inc(&ipa3_dma_ctx->total_async_memcpy); + atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt); + xfer_descr_expected->callback(xfer_descr_expected->user1); + + kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, + xfer_descr_expected); + + if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending()) + complete(&ipa3_dma_ctx->done); + + IPADMA_FUNC_EXIT(); +} + +#ifdef CONFIG_DEBUG_FS +static struct dentry *dent; +static struct dentry *dfile_info; + +static ssize_t ipa3_dma_debugfs_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes = 0; + + if (!ipa3_dma_ctx) { + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "Not initialized\n"); + } else { + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "Status:\n IPADMA is %s\n", + (ipa3_dma_ctx->is_enabled) ? "Enabled" : "Disabled"); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "Statistics:\n total sync memcpy: %d\n ", + atomic_read(&ipa3_dma_ctx->total_sync_memcpy)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "total async memcpy: %d\n ", + atomic_read(&ipa3_dma_ctx->total_async_memcpy)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "pending sync memcpy jobs: %d\n ", + atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "pending async memcpy jobs: %d\n", + atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "pending uc memcpy jobs: %d\n", + atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt)); + } + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa3_dma_debugfs_reset_statistics(struct file *file, + const char __user *ubuf, + size_t count, + loff_t *ppos) +{ + unsigned long missing; + s8 in_num = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &in_num)) + return -EFAULT; + switch (in_num) { + case 0: + if (ipa3_dma_work_pending()) + IPADMA_ERR("Note, there are pending memcpy\n"); + + atomic_set(&ipa3_dma_ctx->total_async_memcpy, 0); + atomic_set(&ipa3_dma_ctx->total_sync_memcpy, 0); + break; + default: + IPADMA_ERR("invalid argument: To reset statistics echo 0\n"); + break; + } + return count; +} + +const struct file_operations ipa3_ipadma_stats_ops = { + .read = ipa3_dma_debugfs_read, + .write = ipa3_dma_debugfs_reset_statistics, +}; + +static void ipa3_dma_debugfs_init(void) +{ + const mode_t read_write_mode = 0666; + + dent = debugfs_create_dir("ipa_dma", 0); + if (IS_ERR(dent)) { + IPADMA_ERR("fail to create folder ipa_dma\n"); + return; + } + + dfile_info = + debugfs_create_file("info", read_write_mode, dent, + 0, &ipa3_ipadma_stats_ops); + if (!dfile_info || IS_ERR(dfile_info)) { + IPADMA_ERR("fail to create file stats\n"); + goto fail; + } + return; +fail: + debugfs_remove_recursive(dent); +} + +static void ipa3_dma_debugfs_destroy(void) +{ + debugfs_remove_recursive(dent); +} + +#endif /* !CONFIG_DEBUG_FS */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c new file mode 100644 index 000000000000..253d891ba0d1 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -0,0 +1,3807 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" +#include "ipa_trace.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_fltrt.h" + +#define IPA_WAN_AGGR_PKT_CNT 5 +#define IPA_LAST_DESC_CNT 0xFFFF +#define POLLING_INACTIVITY_RX 40 +#define POLLING_MIN_SLEEP_RX 1010 +#define POLLING_MAX_SLEEP_RX 1050 +#define POLLING_INACTIVITY_TX 40 +#define POLLING_MIN_SLEEP_TX 400 +#define POLLING_MAX_SLEEP_TX 500 +/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */ +#define IPA_MTU 1500 +#define IPA_GENERIC_AGGR_BYTE_LIMIT 6 +#define IPA_GENERIC_AGGR_TIME_LIMIT 1 +#define IPA_GENERIC_AGGR_PKT_LIMIT 0 + +#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192 +#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\ + (X) + NET_SKB_PAD) +\ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\ + (IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X))) +#define IPA_GENERIC_RX_BUFF_LIMIT (\ + IPA_REAL_GENERIC_RX_BUFF_SZ(\ + IPA_GENERIC_RX_BUFF_BASE_SZ) -\ + IPA_GENERIC_RX_BUFF_BASE_SZ) + +/* less 1 nominal MTU (1500 bytes) rounded to units of KB */ +#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000) + +#define IPA_RX_BUFF_CLIENT_HEADROOM 256 + +#define IPA_WLAN_RX_POOL_SZ 100 +#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5 +#define IPA_WLAN_RX_BUFF_SZ 2048 +#define IPA_WLAN_COMM_RX_POOL_LOW 100 +#define IPA_WLAN_COMM_RX_POOL_HIGH 900 + +#define IPA_ODU_RX_BUFF_SZ 2048 +#define IPA_ODU_RX_POOL_SZ 64 +#define IPA_SIZE_DL_CSUM_META_TRAILER 8 + +#define IPA_GSI_MAX_CH_LOW_WEIGHT 15 +#define IPA_GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */ + +#define IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC 10 +/* The below virtual channel cannot be used by any entity */ +#define IPA_GSI_CH_20_WA_VIRT_CHAN 29 + +#define IPA_DEFAULT_SYS_YELLOW_WM 32 +#define IPA_REPL_XFER_THRESH 10 + +#define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000) + +static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags); +static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys); +static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys); +static void ipa3_replenish_rx_work_func(struct work_struct *work); +static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys); +static void ipa3_wq_handle_rx(struct work_struct *work); +static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size); +static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys, + u32 size); +static int ipa3_assign_policy(struct ipa_sys_connect_params *in, + struct ipa3_sys_context *sys); +static void ipa3_cleanup_rx(struct ipa3_sys_context *sys); +static void ipa3_wq_rx_avail(struct work_struct *work); +static void ipa3_alloc_wlan_rx_common_cache(u32 size); +static void ipa3_cleanup_wlan_rx_common_cache(void); +static void ipa3_wq_repl_rx(struct work_struct *work); +static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys, + struct ipa_mem_buffer *mem_info); +static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in, + struct ipa3_ep_context *ep); +static int ipa_populate_tag_field(struct ipa3_desc *desc, + struct ipa3_tx_pkt_wrapper *tx_pkt, + struct ipahal_imm_cmd_pyld **tag_pyld_ret); +static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys, + struct ipa_mem_buffer *mem_info); +static unsigned long tag_to_pointer_wa(uint64_t tag); +static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt); + +static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit); + +static void ipa3_wq_write_done_common(struct ipa3_sys_context *sys, + struct ipa3_tx_pkt_wrapper *tx_pkt) +{ + struct ipa3_tx_pkt_wrapper *next_pkt; + int i, cnt; + + if (unlikely(tx_pkt == NULL)) { + IPAERR("tx_pkt is NULL\n"); + return; + } + + cnt = tx_pkt->cnt; + IPADBG_LOW("cnt: %d\n", cnt); + for (i = 0; i < cnt; i++) { + spin_lock_bh(&sys->spinlock); + if (unlikely(list_empty(&sys->head_desc_list))) { + spin_unlock_bh(&sys->spinlock); + return; + } + next_pkt = list_next_entry(tx_pkt, link); + list_del(&tx_pkt->link); + sys->len--; + spin_unlock_bh(&sys->spinlock); + if (!tx_pkt->no_unmap_dma) { + if (tx_pkt->type != IPA_DATA_DESC_SKB_PAGED) { + dma_unmap_single(ipa3_ctx->pdev, + tx_pkt->mem.phys_base, + tx_pkt->mem.size, + DMA_TO_DEVICE); + } else { + dma_unmap_page(ipa3_ctx->pdev, + next_pkt->mem.phys_base, + next_pkt->mem.size, + DMA_TO_DEVICE); + } + } + if (tx_pkt->callback) + tx_pkt->callback(tx_pkt->user1, tx_pkt->user2); + + kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt); + tx_pkt = next_pkt; + } +} + +static void ipa3_wq_write_done_status(int src_pipe, + struct ipa3_tx_pkt_wrapper *tx_pkt) +{ + struct ipa3_sys_context *sys; + + WARN_ON(src_pipe >= ipa3_ctx->ipa_num_pipes); + + if (!ipa3_ctx->ep[src_pipe].status.status_en) + return; + + sys = ipa3_ctx->ep[src_pipe].sys; + if (!sys) + return; + + ipa3_wq_write_done_common(sys, tx_pkt); +} + +/** + * ipa_write_done() - this function will be (eventually) called when a Tx + * operation is complete + * * @work: work_struct used by the work queue + * + * Will be called in deferred context. + * - invoke the callback supplied by the client who sent this command + * - iterate over all packets and validate that + * the order for sent packet is the same as expected + * - delete all the tx packet descriptors from the system + * pipe context (not needed anymore) + */ +static void ipa3_wq_write_done(struct work_struct *work) +{ + struct ipa3_tx_pkt_wrapper *tx_pkt; + struct ipa3_sys_context *sys; + struct ipa3_tx_pkt_wrapper *this_pkt; + + tx_pkt = container_of(work, struct ipa3_tx_pkt_wrapper, work); + sys = tx_pkt->sys; + spin_lock_bh(&sys->spinlock); + this_pkt = list_first_entry(&sys->head_desc_list, + struct ipa3_tx_pkt_wrapper, link); + while (tx_pkt != this_pkt) { + spin_unlock_bh(&sys->spinlock); + ipa3_wq_write_done_common(sys, this_pkt); + spin_lock_bh(&sys->spinlock); + this_pkt = list_first_entry(&sys->head_desc_list, + struct ipa3_tx_pkt_wrapper, link); + } + spin_unlock_bh(&sys->spinlock); + ipa3_wq_write_done_common(sys, tx_pkt); +} + + +static void ipa3_send_nop_desc(struct work_struct *work) +{ + struct ipa3_sys_context *sys = container_of(work, + struct ipa3_sys_context, work); + struct gsi_xfer_elem nop_xfer; + struct ipa3_tx_pkt_wrapper *tx_pkt; + + IPADBG_LOW("gsi send NOP for ch: %lu\n", sys->ep->gsi_chan_hdl); + tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, GFP_KERNEL); + if (!tx_pkt) { + queue_work(sys->wq, &sys->work); + return; + } + + INIT_LIST_HEAD(&tx_pkt->link); + tx_pkt->cnt = 1; + INIT_WORK(&tx_pkt->work, ipa3_wq_write_done); + tx_pkt->no_unmap_dma = true; + tx_pkt->sys = sys; + spin_lock_bh(&sys->spinlock); + list_add_tail(&tx_pkt->link, &sys->head_desc_list); + spin_unlock_bh(&sys->spinlock); + + memset(&nop_xfer, 0, sizeof(nop_xfer)); + nop_xfer.type = GSI_XFER_ELEM_NOP; + nop_xfer.flags = GSI_XFER_FLAG_EOT; + nop_xfer.xfer_user_data = tx_pkt; + if (gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, &nop_xfer, true)) { + IPAERR("gsi_queue_xfer for ch:%lu failed\n", + sys->ep->gsi_chan_hdl); + queue_work(sys->wq, &sys->work); + return; + } + sys->len_pending_xfer = 0; + +} + + +/** + * ipa3_send() - Send multiple descriptors in one HW transaction + * @sys: system pipe context + * @num_desc: number of packets + * @desc: packets to send (may be immediate command or data) + * @in_atomic: whether caller is in atomic context + * + * This function is used for GPI connection. + * - ipa3_tx_pkt_wrapper will be used for each ipa + * descriptor (allocated from wrappers cache) + * - The wrapper struct will be configured for each ipa-desc payload and will + * contain information which will be later used by the user callbacks + * - Each packet (command or data) that will be sent will also be saved in + * ipa3_sys_context for later check that all data was sent + * + * Return codes: 0: success, -EFAULT: failure + */ +int ipa3_send(struct ipa3_sys_context *sys, + u32 num_desc, + struct ipa3_desc *desc, + bool in_atomic) +{ + struct ipa3_tx_pkt_wrapper *tx_pkt, *tx_pkt_first; + struct ipahal_imm_cmd_pyld *tag_pyld_ret = NULL; + struct ipa3_tx_pkt_wrapper *next_pkt; + struct gsi_xfer_elem *gsi_xfer_elem_array = NULL; + int i = 0; + int j; + int result; + u32 mem_flag = GFP_ATOMIC; + const struct ipa_gsi_ep_config *gsi_ep_cfg; + + if (unlikely(!in_atomic)) + mem_flag = GFP_KERNEL; + + gsi_ep_cfg = ipa3_get_gsi_ep_info(sys->ep->client); + if (unlikely(!gsi_ep_cfg)) { + IPAERR("failed to get gsi EP config for client=%d\n", + sys->ep->client); + return -EFAULT; + } + if (unlikely(num_desc > gsi_ep_cfg->ipa_if_tlv)) { + IPAERR("Too many chained descriptors need=%d max=%d\n", + num_desc, gsi_ep_cfg->ipa_if_tlv); + WARN_ON(1); + return -EPERM; + } + + gsi_xfer_elem_array = + kzalloc(num_desc * sizeof(struct gsi_xfer_elem), + mem_flag); + if (!gsi_xfer_elem_array) + return -ENOMEM; + + spin_lock_bh(&sys->spinlock); + + for (i = 0; i < num_desc; i++) { + tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, + mem_flag); + if (!tx_pkt) + goto failure; + + INIT_LIST_HEAD(&tx_pkt->link); + + if (i == 0) { + tx_pkt_first = tx_pkt; + tx_pkt->cnt = num_desc; + INIT_WORK(&tx_pkt->work, ipa3_wq_write_done); + } + + /* populate tag field */ + if (desc[i].is_tag_status) { + if (ipa_populate_tag_field(&desc[i], tx_pkt, + &tag_pyld_ret)) { + IPAERR("Failed to populate tag field\n"); + goto failure_dma_map; + } + } + + tx_pkt->type = desc[i].type; + + if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) { + tx_pkt->mem.base = desc[i].pyld; + tx_pkt->mem.size = desc[i].len; + + if (!desc[i].dma_address_valid) { + tx_pkt->mem.phys_base = + dma_map_single(ipa3_ctx->pdev, + tx_pkt->mem.base, + tx_pkt->mem.size, + DMA_TO_DEVICE); + } else { + tx_pkt->mem.phys_base = + desc[i].dma_address; + tx_pkt->no_unmap_dma = true; + } + } else { + tx_pkt->mem.base = desc[i].frag; + tx_pkt->mem.size = desc[i].len; + + if (!desc[i].dma_address_valid) { + tx_pkt->mem.phys_base = + skb_frag_dma_map(ipa3_ctx->pdev, + desc[i].frag, + 0, tx_pkt->mem.size, + DMA_TO_DEVICE); + } else { + tx_pkt->mem.phys_base = + desc[i].dma_address; + tx_pkt->no_unmap_dma = true; + } + } + if (dma_mapping_error(ipa3_ctx->pdev, tx_pkt->mem.phys_base)) { + IPAERR("failed to do dma map.\n"); + goto failure_dma_map; + } + + tx_pkt->sys = sys; + tx_pkt->callback = desc[i].callback; + tx_pkt->user1 = desc[i].user1; + tx_pkt->user2 = desc[i].user2; + + list_add_tail(&tx_pkt->link, &sys->head_desc_list); + + gsi_xfer_elem_array[i].addr = tx_pkt->mem.phys_base; + + /* + * Special treatment for immediate commands, where + * the structure of the descriptor is different + */ + if (desc[i].type == IPA_IMM_CMD_DESC) { + gsi_xfer_elem_array[i].len = desc[i].opcode; + gsi_xfer_elem_array[i].type = + GSI_XFER_ELEM_IMME_CMD; + } else { + gsi_xfer_elem_array[i].len = desc[i].len; + gsi_xfer_elem_array[i].type = + GSI_XFER_ELEM_DATA; + } + + if (i == (num_desc - 1)) { + if (!sys->use_comm_evt_ring) { + gsi_xfer_elem_array[i].flags |= + GSI_XFER_FLAG_EOT; + gsi_xfer_elem_array[i].flags |= + GSI_XFER_FLAG_BEI; + } + gsi_xfer_elem_array[i].xfer_user_data = + tx_pkt_first; + } else { + gsi_xfer_elem_array[i].flags |= + GSI_XFER_FLAG_CHAIN; + } + } + + IPADBG_LOW("ch:%lu queue xfer\n", sys->ep->gsi_chan_hdl); + result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc, + gsi_xfer_elem_array, true); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("GSI xfer failed.\n"); + goto failure; + } + kfree(gsi_xfer_elem_array); + + spin_unlock_bh(&sys->spinlock); + + /* set the timer for sending the NOP descriptor */ + if (sys->use_comm_evt_ring && !hrtimer_active(&sys->db_timer)) { + ktime_t time = ktime_set(0, IPA_TX_SEND_COMPL_NOP_DELAY_NS); + + IPADBG_LOW("scheduling timer for ch %lu\n", + sys->ep->gsi_chan_hdl); + hrtimer_start(&sys->db_timer, time, HRTIMER_MODE_REL); + } + + return 0; + +failure_dma_map: + kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt); + +failure: + ipahal_destroy_imm_cmd(tag_pyld_ret); + tx_pkt = tx_pkt_first; + for (j = 0; j < i; j++) { + next_pkt = list_next_entry(tx_pkt, link); + list_del(&tx_pkt->link); + + if (!tx_pkt->no_unmap_dma) { + if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) { + dma_unmap_single(ipa3_ctx->pdev, + tx_pkt->mem.phys_base, + tx_pkt->mem.size, DMA_TO_DEVICE); + } else { + dma_unmap_page(ipa3_ctx->pdev, + tx_pkt->mem.phys_base, + tx_pkt->mem.size, + DMA_TO_DEVICE); + } + } + kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt); + tx_pkt = next_pkt; + } + + kfree(gsi_xfer_elem_array); + + spin_unlock_bh(&sys->spinlock); + return -EFAULT; +} + +/** + * ipa3_send_one() - Send a single descriptor + * @sys: system pipe context + * @desc: descriptor to send + * @in_atomic: whether caller is in atomic context + * + * - Allocate tx_packet wrapper + * - transfer data to the IPA + * - after the transfer was done the SPS will + * notify the sending user via ipa_sps_irq_comp_tx() + * + * Return codes: 0: success, -EFAULT: failure + */ +int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc, + bool in_atomic) +{ + return ipa3_send(sys, 1, desc, in_atomic); +} + +/** + * ipa3_transport_irq_cmd_ack - callback function which will be called by + * the transport driver after an immediate command is complete. + * @user1: pointer to the descriptor of the transfer + * @user2: + * + * Complete the immediate commands completion object, this will release the + * thread which waits on this completion object (ipa3_send_cmd()) + */ +static void ipa3_transport_irq_cmd_ack(void *user1, int user2) +{ + struct ipa3_desc *desc = (struct ipa3_desc *)user1; + + if (WARN(!desc, "desc is NULL")) + return; + + IPADBG_LOW("got ack for cmd=%d\n", desc->opcode); + complete(&desc->xfer_done); +} + +/** + * ipa3_transport_irq_cmd_ack_free - callback function which will be + * called by the transport driver after an immediate command is complete. + * This function will also free the completion object once it is done. + * @tag_comp: pointer to the completion object + * @ignored: parameter not used + * + * Complete the immediate commands completion object, this will release the + * thread which waits on this completion object (ipa3_send_cmd()) + */ +static void ipa3_transport_irq_cmd_ack_free(void *tag_comp, int ignored) +{ + struct ipa3_tag_completion *comp = tag_comp; + + if (!comp) { + IPAERR("comp is NULL\n"); + return; + } + + complete(&comp->comp); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); +} + +/** + * ipa3_send_cmd - send immediate commands + * @num_desc: number of descriptors within the desc struct + * @descr: descriptor structure + * + * Function will block till command gets ACK from IPA HW, caller needs + * to free any resources it allocated after function returns + * The callback in ipa3_desc should not be set by the caller + * for this function. + */ +int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr) +{ + struct ipa3_desc *desc; + int i, result = 0; + struct ipa3_sys_context *sys; + int ep_idx; + + for (i = 0; i < num_desc; i++) + IPADBG("sending imm cmd %d\n", descr[i].opcode); + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD); + if (-1 == ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_CMD_PROD); + return -EFAULT; + } + + sys = ipa3_ctx->ep[ep_idx].sys; + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + if (num_desc == 1) { + init_completion(&descr->xfer_done); + + if (descr->callback || descr->user1) + WARN_ON(1); + + descr->callback = ipa3_transport_irq_cmd_ack; + descr->user1 = descr; + if (ipa3_send_one(sys, descr, true)) { + IPAERR("fail to send immediate command\n"); + result = -EFAULT; + goto bail; + } + wait_for_completion(&descr->xfer_done); + } else { + desc = &descr[num_desc - 1]; + init_completion(&desc->xfer_done); + + if (desc->callback || desc->user1) + WARN_ON(1); + + desc->callback = ipa3_transport_irq_cmd_ack; + desc->user1 = desc; + if (ipa3_send(sys, num_desc, descr, true)) { + IPAERR("fail to send multiple immediate command set\n"); + result = -EFAULT; + goto bail; + } + wait_for_completion(&desc->xfer_done); + } + +bail: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +/** + * ipa3_send_cmd_timeout - send immediate commands with limited time + * waiting for ACK from IPA HW + * @num_desc: number of descriptors within the desc struct + * @descr: descriptor structure + * @timeout: millisecond to wait till get ACK from IPA HW + * + * Function will block till command gets ACK from IPA HW or timeout. + * Caller needs to free any resources it allocated after function returns + * The callback in ipa3_desc should not be set by the caller + * for this function. + */ +int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout) +{ + struct ipa3_desc *desc; + int i, result = 0; + struct ipa3_sys_context *sys; + int ep_idx; + int completed; + struct ipa3_tag_completion *comp; + + for (i = 0; i < num_desc; i++) + IPADBG("sending imm cmd %d\n", descr[i].opcode); + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD); + if (-1 == ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_CMD_PROD); + return -EFAULT; + } + + comp = kzalloc(sizeof(*comp), GFP_ATOMIC); + if (!comp) + return -ENOMEM; + + init_completion(&comp->comp); + + /* completion needs to be released from both here and in ack callback */ + atomic_set(&comp->cnt, 2); + + sys = ipa3_ctx->ep[ep_idx].sys; + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + if (num_desc == 1) { + if (descr->callback || descr->user1) + WARN_ON(1); + + descr->callback = ipa3_transport_irq_cmd_ack_free; + descr->user1 = comp; + if (ipa3_send_one(sys, descr, true)) { + IPAERR("fail to send immediate command\n"); + kfree(comp); + result = -EFAULT; + goto bail; + } + } else { + desc = &descr[num_desc - 1]; + + if (desc->callback || desc->user1) + WARN_ON(1); + + desc->callback = ipa3_transport_irq_cmd_ack_free; + desc->user1 = comp; + if (ipa3_send(sys, num_desc, descr, true)) { + IPAERR("fail to send multiple immediate command set\n"); + kfree(comp); + result = -EFAULT; + goto bail; + } + } + + completed = wait_for_completion_timeout( + &comp->comp, msecs_to_jiffies(timeout)); + if (!completed) + IPADBG("timeout waiting for imm-cmd ACK\n"); + + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + +bail: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +/** + * ipa3_handle_rx_core() - The core functionality of packet reception. This + * function is read from multiple code paths. + * + * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN + * endpoint. The function runs as long as there are packets in the pipe. + * For each packet: + * - Disconnect the packet from the system pipe linked list + * - Unmap the packets skb, make it non DMAable + * - Free the packet from the cache + * - Prepare a proper skb + * - Call the endpoints notify function, passing the skb in the parameters + * - Replenish the rx cache + */ +static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all, + bool in_poll_state) +{ + int ret; + int cnt = 0; + struct ipa_mem_buffer mem_info = { 0 }; + + while ((in_poll_state ? atomic_read(&sys->curr_polling_state) : + !atomic_read(&sys->curr_polling_state))) { + if (cnt && !process_all) + break; + + ret = ipa_poll_gsi_pkt(sys, &mem_info); + if (ret) + break; + + if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client)) + ipa3_dma_memcpy_notify(sys, &mem_info); + else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client)) + ipa3_wlan_wq_rx_common(sys, mem_info.size); + else + ipa3_wq_rx_common(sys, mem_info.size); + + ++cnt; + } + return cnt; +} + +/** + * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode + */ +static void ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys) +{ + int ret; + + if (!atomic_read(&sys->curr_polling_state)) { + IPAERR("already in intr mode\n"); + goto fail; + } + atomic_set(&sys->curr_polling_state, 0); + ipa3_dec_release_wakelock(); + ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl, + GSI_CHAN_MODE_CALLBACK); + if (ret != GSI_STATUS_SUCCESS) { + IPAERR("Failed to switch to intr mode.\n"); + goto fail; + } + return; + +fail: + queue_delayed_work(sys->wq, &sys->switch_to_intr_work, + msecs_to_jiffies(1)); +} + +/** + * ipa3_handle_rx() - handle packet reception. This function is executed in the + * context of a work queue. + * @work: work struct needed by the work queue + * + * ipa3_handle_rx_core() is run in polling mode. After all packets has been + * received, the driver switches back to interrupt mode. + */ +static void ipa3_handle_rx(struct ipa3_sys_context *sys) +{ + int inactive_cycles = 0; + int cnt; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + do { + cnt = ipa3_handle_rx_core(sys, true, true); + if (cnt == 0) + inactive_cycles++; + else + inactive_cycles = 0; + + trace_idle_sleep_enter3(sys->ep->client); + usleep_range(POLLING_MIN_SLEEP_RX, POLLING_MAX_SLEEP_RX); + trace_idle_sleep_exit3(sys->ep->client); + + /* + * if pipe is out of buffers there is no point polling for + * completed descs; release the worker so delayed work can + * run in a timely manner + */ + if (sys->len - sys->len_pending_xfer == 0) + break; + + } while (inactive_cycles <= POLLING_INACTIVITY_RX); + + trace_poll_to_intr3(sys->ep->client); + ipa3_rx_switch_to_intr_mode(sys); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + +static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work) +{ + struct delayed_work *dwork; + struct ipa3_sys_context *sys; + + dwork = container_of(work, struct delayed_work, work); + sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work); + + if (sys->ep->napi_enabled) { + /* interrupt mode is done in ipa3_rx_poll context */ + ipa_assert(); + } else + ipa3_handle_rx(sys); +} + +enum hrtimer_restart ipa3_ring_doorbell_timer_fn(struct hrtimer *param) +{ + struct ipa3_sys_context *sys = container_of(param, + struct ipa3_sys_context, db_timer); + + queue_work(sys->wq, &sys->work); + return HRTIMER_NORESTART; +} + +/** + * ipa3_setup_sys_pipe() - Setup an IPA GPI pipe and perform + * IPA EP configuration + * @sys_in: [in] input needed to setup the pipe and configure EP + * @clnt_hdl: [out] client handle + * + * - configure the end-point registers with the supplied + * parameters from the user. + * - Creates a GPI connection with IPA. + * - allocate descriptor FIFO + * + * Returns: 0 on success, negative on failure + */ +int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl) +{ + struct ipa3_ep_context *ep; + int ipa_ep_idx; + int result = -EINVAL; + char buff[IPA_RESOURCE_NAME_MAX]; + + if (sys_in == NULL || clnt_hdl == NULL) { + IPAERR("NULL args\n"); + goto fail_gen; + } + + if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) { + IPAERR("bad parm client:%d fifo_sz:%d\n", + sys_in->client, sys_in->desc_fifo_sz); + goto fail_gen; + } + + ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + goto fail_gen; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + if (ep->valid == 1) { + IPAERR("EP %d already allocated.\n", ipa_ep_idx); + goto fail_gen; + } + + IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client); + memset(ep, 0, offsetof(struct ipa3_ep_context, sys)); + + if (!ep->sys) { + ep->sys = kzalloc(sizeof(struct ipa3_sys_context), GFP_KERNEL); + if (!ep->sys) { + IPAERR("failed to sys ctx for client %d\n", + sys_in->client); + result = -ENOMEM; + goto fail_and_disable_clocks; + } + + ep->sys->ep = ep; + snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d", + sys_in->client); + ep->sys->wq = alloc_workqueue(buff, + WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1); + + if (!ep->sys->wq) { + IPAERR("failed to create wq for client %d\n", + sys_in->client); + result = -EFAULT; + goto fail_wq; + } + + snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d", + sys_in->client); + ep->sys->repl_wq = alloc_workqueue(buff, + WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1); + if (!ep->sys->repl_wq) { + IPAERR("failed to create rep wq for client %d\n", + sys_in->client); + result = -EFAULT; + goto fail_wq2; + } + + INIT_LIST_HEAD(&ep->sys->head_desc_list); + INIT_LIST_HEAD(&ep->sys->rcycl_list); + spin_lock_init(&ep->sys->spinlock); + hrtimer_init(&ep->sys->db_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn; + } else { + memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep)); + } + + ep->skip_ep_cfg = sys_in->skip_ep_cfg; + if (ipa3_assign_policy(sys_in, ep->sys)) { + IPAERR("failed to sys ctx for client %d\n", sys_in->client); + result = -ENOMEM; + goto fail_gen2; + } + + ep->valid = 1; + ep->client = sys_in->client; + ep->client_notify = sys_in->notify; + ep->napi_enabled = sys_in->napi_enabled; + ep->priv = sys_in->priv; + ep->keep_ipa_awake = sys_in->keep_ipa_awake; + atomic_set(&ep->avail_fifo_desc, + ((sys_in->desc_fifo_sz / IPA_FIFO_ELEMENT_SIZE) - 1)); + + if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) && + ep->sys->status_stat == NULL) { + ep->sys->status_stat = + kzalloc(sizeof(struct ipa3_status_stats), GFP_KERNEL); + if (!ep->sys->status_stat) + goto fail_gen2; + } + + if (!ep->skip_ep_cfg) { + if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto fail_gen2; + } + if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) { + IPAERR("fail to configure status of EP.\n"); + goto fail_gen2; + } + IPADBG("ep %d configuration successful\n", ipa_ep_idx); + } else { + IPADBG("skipping ep %d configuration\n", ipa_ep_idx); + } + + result = ipa_gsi_setup_channel(sys_in, ep); + if (result) { + IPAERR("Failed to setup GSI channel\n"); + goto fail_gen2; + } + + *clnt_hdl = ipa_ep_idx; + + if (ep->sys->repl_hdlr == ipa3_fast_replenish_rx_cache) { + ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1; + ep->sys->repl.cache = kcalloc(ep->sys->repl.capacity, + sizeof(void *), GFP_KERNEL); + if (!ep->sys->repl.cache) { + IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx); + ep->sys->repl_hdlr = ipa3_replenish_rx_cache; + ep->sys->repl.capacity = 0; + } else { + atomic_set(&ep->sys->repl.head_idx, 0); + atomic_set(&ep->sys->repl.tail_idx, 0); + ipa3_wq_repl_rx(&ep->sys->repl_work); + } + } + + if (IPA_CLIENT_IS_CONS(sys_in->client)) + ipa3_replenish_rx_cache(ep->sys); + + if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) { + ipa3_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW); + atomic_inc(&ipa3_ctx->wc_memb.active_clnt_cnt); + } + + ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) { + if (ipa3_ctx->modem_cfg_emb_pipe_flt && + sys_in->client == IPA_CLIENT_APPS_WAN_PROD) + IPADBG("modem cfg emb pipe flt\n"); + else + ipa3_install_dflt_flt_rules(ipa_ep_idx); + } + + result = ipa3_enable_data_path(ipa_ep_idx); + if (result) { + IPAERR("enable data path failed res=%d ep=%d.\n", result, + ipa_ep_idx); + goto fail_gen2; + } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + + IPADBG("client %d (ep: %d) connected sys=%pK\n", sys_in->client, + ipa_ep_idx, ep->sys); + + return 0; + +fail_gen2: + destroy_workqueue(ep->sys->repl_wq); +fail_wq2: + destroy_workqueue(ep->sys->wq); +fail_wq: + kfree(ep->sys); + memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); +fail_and_disable_clocks: + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); +fail_gen: + return result; +} + +/** + * ipa3_teardown_sys_pipe() - Teardown the GPI pipe and cleanup IPA EP + * @clnt_hdl: [in] the handle obtained from ipa3_setup_sys_pipe + * + * Returns: 0 on success, negative on failure + */ +int ipa3_teardown_sys_pipe(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + int empty; + int result; + int i; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipa3_disable_data_path(clnt_hdl); + if (ep->napi_enabled) { + do { + usleep_range(95, 105); + } while (atomic_read(&ep->sys->curr_polling_state)); + } + + if (IPA_CLIENT_IS_PROD(ep->client)) { + do { + spin_lock_bh(&ep->sys->spinlock); + empty = list_empty(&ep->sys->head_desc_list); + spin_unlock_bh(&ep->sys->spinlock); + if (!empty) + usleep_range(95, 105); + else + break; + } while (1); + } + + if (IPA_CLIENT_IS_CONS(ep->client)) + cancel_delayed_work_sync(&ep->sys->replenish_rx_work); + flush_workqueue(ep->sys->wq); + /* channel stop might fail on timeout if IPA is busy */ + for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) { + result = ipa3_stop_gsi_channel(clnt_hdl); + if (result == GSI_STATUS_SUCCESS) + break; + + if (result != -GSI_STATUS_AGAIN && + result != -GSI_STATUS_TIMED_OUT) + break; + } + + if (result != GSI_STATUS_SUCCESS) { + IPAERR("GSI stop chan err: %d.\n", result); + ipa_assert(); + return result; + } + result = ipa3_reset_gsi_channel(clnt_hdl); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("Failed to reset chan: %d.\n", result); + ipa_assert(); + return result; + } + dma_free_coherent(ipa3_ctx->pdev, + ep->gsi_mem_info.chan_ring_len, + ep->gsi_mem_info.chan_ring_base_vaddr, + ep->gsi_mem_info.chan_ring_base_addr); + result = gsi_dealloc_channel(ep->gsi_chan_hdl); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("Failed to dealloc chan: %d.\n", result); + ipa_assert(); + return result; + } + + /* free event ring only when it is present */ + if (ep->sys->use_comm_evt_ring) { + ipa3_ctx->gsi_evt_comm_ring_rem += + ep->gsi_mem_info.chan_ring_len; + } else if (ep->gsi_evt_ring_hdl != ~0) { + result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl); + if (WARN(result != GSI_STATUS_SUCCESS, "reset evt %d", result)) + return result; + + dma_free_coherent(ipa3_ctx->pdev, + ep->gsi_mem_info.evt_ring_len, + ep->gsi_mem_info.evt_ring_base_vaddr, + ep->gsi_mem_info.evt_ring_base_addr); + result = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); + if (WARN(result != GSI_STATUS_SUCCESS, "deall evt %d", result)) + return result; + } + if (ep->sys->repl_wq) + flush_workqueue(ep->sys->repl_wq); + if (IPA_CLIENT_IS_CONS(ep->client)) + ipa3_cleanup_rx(ep->sys); + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) { + if (ipa3_ctx->modem_cfg_emb_pipe_flt && + ep->client == IPA_CLIENT_APPS_WAN_PROD) + IPADBG("modem cfg emb pipe flt\n"); + else + ipa3_delete_dflt_flt_rules(clnt_hdl); + } + + if (IPA_CLIENT_IS_WLAN_CONS(ep->client)) + atomic_dec(&ipa3_ctx->wc_memb.active_clnt_cnt); + + memset(&ep->wstats, 0, sizeof(struct ipa3_wlan_stats)); + + if (!atomic_read(&ipa3_ctx->wc_memb.active_clnt_cnt)) + ipa3_cleanup_wlan_rx_common_cache(); + + ep->valid = 0; + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + + return 0; +} + +/** + * ipa3_tx_comp_usr_notify_release() - Callback function which will call the + * user supplied callback function to release the skb, or release it on + * its own if no callback function was supplied. + * @user1 + * @user2 + * + * This notified callback is for the destination client. + */ +static void ipa3_tx_comp_usr_notify_release(void *user1, int user2) +{ + struct sk_buff *skb = (struct sk_buff *)user1; + int ep_idx = user2; + + IPADBG_LOW("skb=%pK ep=%d\n", skb, ep_idx); + + IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_pkts_compl); + + if (ipa3_ctx->ep[ep_idx].client_notify) + ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv, + IPA_WRITE_DONE, (unsigned long)skb); + else + dev_kfree_skb_any(skb); +} + +void ipa3_tx_cmd_comp(void *user1, int user2) +{ + ipahal_destroy_imm_cmd(user1); +} + +/** + * ipa3_tx_dp() - Data-path tx handler + * @dst: [in] which IPA destination to route tx packets to + * @skb: [in] the packet to send + * @metadata: [in] TX packet meta-data + * + * Data-path tx handler, this is used for both SW data-path which by-passes most + * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If + * dst is a "valid" CONS type, then SW data-path is used. If dst is the + * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else + * is an error. For errors, client needs to free the skb as needed. For success, + * IPA driver will later invoke client callback if one was supplied. That + * callback should free the skb. If no callback supplied, IPA driver will free + * the skb internally + * + * The function will use two descriptors for this send command + * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent), + * the first descriptor will be used to inform the IPA hardware that + * apps need to push data into the IPA (IP_PACKET_INIT immediate command). + * Once this send was done from transport point-of-view the IPA driver will + * get notified by the supplied callback. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *meta) +{ + struct ipa3_desc *desc; + struct ipa3_desc _desc[3]; + int dst_ep_idx; + struct ipahal_imm_cmd_pyld *cmd_pyld = NULL; + struct ipa3_sys_context *sys; + int src_ep_idx; + int num_frags, f; + const struct ipa_gsi_ep_config *gsi_ep; + int data_idx; + + if (unlikely(!ipa3_ctx)) { + IPAERR("IPA3 driver was not initialized\n"); + return -EINVAL; + } + + if (skb->len == 0) { + IPAERR("packet size is 0\n"); + return -EINVAL; + } + + /* + * USB_CONS: PKT_INIT ep_idx = dst pipe + * Q6_CONS: PKT_INIT ep_idx = sender pipe + * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe + * + * LAN TX: all PKT_INIT + * WAN TX: PKT_INIT (cmd) + HW (data) + * + */ + if (IPA_CLIENT_IS_CONS(dst)) { + src_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_PROD); + if (-1 == src_ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_LAN_PROD); + goto fail_gen; + } + dst_ep_idx = ipa3_get_ep_mapping(dst); + } else { + src_ep_idx = ipa3_get_ep_mapping(dst); + if (-1 == src_ep_idx) { + IPAERR("Client %u is not mapped\n", dst); + goto fail_gen; + } + if (meta && meta->pkt_init_dst_ep_valid) + dst_ep_idx = meta->pkt_init_dst_ep; + else + dst_ep_idx = -1; + } + + sys = ipa3_ctx->ep[src_ep_idx].sys; + + if (!sys->ep->valid) { + IPAERR("pipe not valid\n"); + goto fail_gen; + } + + num_frags = skb_shinfo(skb)->nr_frags; + /* + * make sure TLV FIFO supports the needed frags. + * 2 descriptors are needed for IP_PACKET_INIT and TAG_STATUS. + * 1 descriptor needed for the linear portion of skb. + */ + gsi_ep = ipa3_get_gsi_ep_info(ipa3_ctx->ep[src_ep_idx].client); + if (gsi_ep && (num_frags + 3 > gsi_ep->ipa_if_tlv)) { + if (skb_linearize(skb)) { + IPAERR("Failed to linear skb with %d frags\n", + num_frags); + goto fail_gen; + } + num_frags = 0; + } + if (num_frags) { + /* 1 desc for tag to resolve status out-of-order issue; + * 1 desc is needed for the linear portion of skb; + * 1 desc may be needed for the PACKET_INIT; + * 1 desc for each frag + */ + desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC); + if (!desc) { + IPAERR("failed to alloc desc array\n"); + goto fail_gen; + } + } else { + memset(_desc, 0, 3 * sizeof(struct ipa3_desc)); + desc = &_desc[0]; + } + + if (dst_ep_idx != -1) { + /* SW data path */ + data_idx = 0; + if (sys->policy == IPA_POLICY_NOINTR_MODE) { + /* + * For non-interrupt mode channel (where there is no + * event ring) TAG STATUS are used for completion + * notification. IPA will generate a status packet with + * tag info as a result of the TAG STATUS command. + */ + desc[data_idx].is_tag_status = true; + data_idx++; + } + desc[data_idx].opcode = ipa3_ctx->pkt_init_imm_opcode; + desc[data_idx].dma_address_valid = true; + desc[data_idx].dma_address = ipa3_ctx->pkt_init_imm[dst_ep_idx]; + desc[data_idx].type = IPA_IMM_CMD_DESC; + desc[data_idx].callback = NULL; + data_idx++; + desc[data_idx].pyld = skb->data; + desc[data_idx].len = skb_headlen(skb); + desc[data_idx].type = IPA_DATA_DESC_SKB; + desc[data_idx].callback = ipa3_tx_comp_usr_notify_release; + desc[data_idx].user1 = skb; + desc[data_idx].user2 = (meta && meta->pkt_init_dst_ep_valid && + meta->pkt_init_dst_ep_remote) ? + src_ep_idx : + dst_ep_idx; + if (meta && meta->dma_address_valid) { + desc[data_idx].dma_address_valid = true; + desc[data_idx].dma_address = meta->dma_address; + } + data_idx++; + + for (f = 0; f < num_frags; f++) { + desc[data_idx + f].frag = &skb_shinfo(skb)->frags[f]; + desc[data_idx + f].type = IPA_DATA_DESC_SKB_PAGED; + desc[data_idx + f].len = + skb_frag_size(desc[data_idx + f].frag); + } + /* don't free skb till frag mappings are released */ + if (num_frags) { + desc[data_idx + f - 1].callback = desc[2].callback; + desc[data_idx + f - 1].user1 = desc[2].user1; + desc[data_idx + f - 1].user2 = desc[2].user2; + desc[data_idx - 1].callback = NULL; + } + + if (ipa3_send(sys, num_frags + data_idx, desc, true)) { + IPAERR("fail to send skb %pK num_frags %u SWP\n", + skb, num_frags); + goto fail_send; + } + IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_sw_pkts); + } else { + /* HW data path */ + data_idx = 0; + if (sys->policy == IPA_POLICY_NOINTR_MODE) { + /* + * For non-interrupt mode channel (where there is no + * event ring) TAG STATUS are used for completion + * notification. IPA will generate a status packet with + * tag info as a result of the TAG STATUS command. + */ + desc[data_idx].is_tag_status = true; + data_idx++; + } + desc[data_idx].pyld = skb->data; + desc[data_idx].len = skb_headlen(skb); + desc[data_idx].type = IPA_DATA_DESC_SKB; + desc[data_idx].callback = ipa3_tx_comp_usr_notify_release; + desc[data_idx].user1 = skb; + desc[data_idx].user2 = src_ep_idx; + + if (meta && meta->dma_address_valid) { + desc[data_idx].dma_address_valid = true; + desc[data_idx].dma_address = meta->dma_address; + } + if (num_frags == 0) { + if (ipa3_send(sys, data_idx + 1, desc, true)) { + IPAERR("fail to send skb %pK HWP\n", skb); + goto fail_mem; + } + } else { + for (f = 0; f < num_frags; f++) { + desc[data_idx+f+1].frag = + &skb_shinfo(skb)->frags[f]; + desc[data_idx+f+1].type = + IPA_DATA_DESC_SKB_PAGED; + desc[data_idx+f+1].len = + skb_frag_size(desc[data_idx+f+1].frag); + } + /* don't free skb till frag mappings are released */ + desc[data_idx+f].callback = desc[data_idx].callback; + desc[data_idx+f].user1 = desc[data_idx].user1; + desc[data_idx+f].user2 = desc[data_idx].user2; + desc[data_idx].callback = NULL; + + if (ipa3_send(sys, num_frags + data_idx + 1, + desc, true)) { + IPAERR("fail to send skb %pK num_frags %u\n", + skb, num_frags); + goto fail_mem; + } + } + IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_hw_pkts); + } + + if (num_frags) { + kfree(desc); + IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_non_linear); + } + return 0; + +fail_send: + ipahal_destroy_imm_cmd(cmd_pyld); +fail_mem: + if (num_frags) + kfree(desc); +fail_gen: + return -EFAULT; +} + +static void ipa3_wq_handle_rx(struct work_struct *work) +{ + struct ipa3_sys_context *sys; + + sys = container_of(work, struct ipa3_sys_context, work); + + if (sys->ep->napi_enabled) { + IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI"); + sys->ep->client_notify(sys->ep->priv, + IPA_CLIENT_START_POLL, 0); + } else + ipa3_handle_rx(sys); +} + +static void ipa3_wq_repl_rx(struct work_struct *work) +{ + struct ipa3_sys_context *sys; + void *ptr; + struct ipa3_rx_pkt_wrapper *rx_pkt; + gfp_t flag = GFP_KERNEL; + u32 next; + u32 curr; + + sys = container_of(work, struct ipa3_sys_context, repl_work); + curr = atomic_read(&sys->repl.tail_idx); + +begin: + while (1) { + next = (curr + 1) % sys->repl.capacity; + if (next == atomic_read(&sys->repl.head_idx)) + goto fail_kmem_cache_alloc; + + rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache, + flag); + if (!rx_pkt) + goto fail_kmem_cache_alloc; + + INIT_LIST_HEAD(&rx_pkt->link); + INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail); + rx_pkt->sys = sys; + + rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag); + if (rx_pkt->data.skb == NULL) + goto fail_skb_alloc; + + ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); + rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr, + sys->rx_buff_sz, + DMA_FROM_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) { + pr_err_ratelimited("%s dma map fail %pK for %pK sys=%pK\n", + __func__, (void *)rx_pkt->data.dma_addr, + ptr, sys); + goto fail_dma_mapping; + } + + sys->repl.cache[curr] = rx_pkt; + curr = next; + /* ensure write is done before setting tail index */ + mb(); + atomic_set(&sys->repl.tail_idx, next); + } + + return; + +fail_dma_mapping: + sys->free_skb(rx_pkt->data.skb); +fail_skb_alloc: + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); +fail_kmem_cache_alloc: + if (atomic_read(&sys->repl.tail_idx) == + atomic_read(&sys->repl.head_idx)) { + if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) + IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty); + else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) + IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_repl_rx_empty); + pr_err_ratelimited("%s sys=%pK repl ring empty\n", + __func__, sys); + goto begin; + } +} + +static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt = NULL; + struct ipa3_rx_pkt_wrapper *tmp; + int ret; + struct gsi_xfer_elem gsi_xfer_elem_one; + u32 rx_len_cached = 0; + + IPADBG_LOW("\n"); + + spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); + rx_len_cached = sys->len; + + if (rx_len_cached < sys->rx_pool_sz) { + list_for_each_entry_safe(rx_pkt, tmp, + &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) { + list_del(&rx_pkt->link); + + if (ipa3_ctx->wc_memb.wlan_comm_free_cnt > 0) + ipa3_ctx->wc_memb.wlan_comm_free_cnt--; + + INIT_LIST_HEAD(&rx_pkt->link); + rx_pkt->len = 0; + rx_pkt->sys = sys; + + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + memset(&gsi_xfer_elem_one, 0, + sizeof(gsi_xfer_elem_one)); + gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr; + gsi_xfer_elem_one.len = IPA_WLAN_RX_BUFF_SZ; + gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT; + gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB; + gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA; + gsi_xfer_elem_one.xfer_user_data = rx_pkt; + + ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, + &gsi_xfer_elem_one, true); + + if (ret) { + IPAERR("failed to provide buffer: %d\n", ret); + goto fail_provide_rx_buffer; + } + + rx_len_cached = ++sys->len; + + if (rx_len_cached >= sys->rx_pool_sz) { + spin_unlock_bh( + &ipa3_ctx->wc_memb.wlan_spinlock); + return; + } + } + } + spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); + + if (rx_len_cached < sys->rx_pool_sz && + ipa3_ctx->wc_memb.wlan_comm_total_cnt < + IPA_WLAN_COMM_RX_POOL_HIGH) { + ipa3_replenish_rx_cache(sys); + ipa3_ctx->wc_memb.wlan_comm_total_cnt += + (sys->rx_pool_sz - rx_len_cached); + } + + return; + +fail_provide_rx_buffer: + list_del(&rx_pkt->link); + spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); +} + +static void ipa3_cleanup_wlan_rx_common_cache(void) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt; + struct ipa3_rx_pkt_wrapper *tmp; + + spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); + + list_for_each_entry_safe(rx_pkt, tmp, + &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) { + list_del(&rx_pkt->link); + dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr, + IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_pkt->data.skb); + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); + ipa3_ctx->wc_memb.wlan_comm_free_cnt--; + ipa3_ctx->wc_memb.wlan_comm_total_cnt--; + } + ipa3_ctx->wc_memb.total_tx_pkts_freed = 0; + + if (ipa3_ctx->wc_memb.wlan_comm_free_cnt != 0) + IPAERR("wlan comm buff free cnt: %d\n", + ipa3_ctx->wc_memb.wlan_comm_free_cnt); + + if (ipa3_ctx->wc_memb.wlan_comm_total_cnt != 0) + IPAERR("wlan comm buff total cnt: %d\n", + ipa3_ctx->wc_memb.wlan_comm_total_cnt); + + spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); + +} + +static void ipa3_alloc_wlan_rx_common_cache(u32 size) +{ + void *ptr; + struct ipa3_rx_pkt_wrapper *rx_pkt; + int rx_len_cached = 0; + gfp_t flag = GFP_NOWAIT | __GFP_NOWARN; + + rx_len_cached = ipa3_ctx->wc_memb.wlan_comm_total_cnt; + while (rx_len_cached < size) { + rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache, + flag); + if (!rx_pkt) + goto fail_kmem_cache_alloc; + + INIT_LIST_HEAD(&rx_pkt->link); + INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail); + + rx_pkt->data.skb = + ipa3_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ, + flag); + if (rx_pkt->data.skb == NULL) { + IPAERR("failed to alloc skb\n"); + goto fail_skb_alloc; + } + ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ); + rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr, + IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure %pK for %pK\n", + (void *)rx_pkt->data.dma_addr, ptr); + goto fail_dma_mapping; + } + + spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); + list_add_tail(&rx_pkt->link, + &ipa3_ctx->wc_memb.wlan_comm_desc_list); + rx_len_cached = ++ipa3_ctx->wc_memb.wlan_comm_total_cnt; + + ipa3_ctx->wc_memb.wlan_comm_free_cnt++; + spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); + + } + + return; + +fail_dma_mapping: + dev_kfree_skb_any(rx_pkt->data.skb); +fail_skb_alloc: + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); +fail_kmem_cache_alloc: + return; +} + + +/** + * ipa3_replenish_rx_cache() - Replenish the Rx packets cache. + * + * The function allocates buffers in the rx_pkt_wrapper_cache cache until there + * are IPA_RX_POOL_CEIL buffers in the cache. + * - Allocate a buffer in the cache + * - Initialized the packets link + * - Initialize the packets work struct + * - Allocate the packets socket buffer (skb) + * - Fill the packets skb with data + * - Make the packet DMAable + * - Add the packet to the system pipe linked list + */ +static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys) +{ + void *ptr; + struct ipa3_rx_pkt_wrapper *rx_pkt; + int ret; + int rx_len_cached = 0; + struct gsi_xfer_elem gsi_xfer_elem_one; + gfp_t flag = GFP_NOWAIT | __GFP_NOWARN; + + rx_len_cached = sys->len; + + while (rx_len_cached < sys->rx_pool_sz) { + rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache, + flag); + if (!rx_pkt) + goto fail_kmem_cache_alloc; + + INIT_LIST_HEAD(&rx_pkt->link); + INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail); + rx_pkt->sys = sys; + + rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag); + if (rx_pkt->data.skb == NULL) { + IPAERR("failed to alloc skb\n"); + goto fail_skb_alloc; + } + ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); + rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr, + sys->rx_buff_sz, + DMA_FROM_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure %pK for %pK\n", + (void *)rx_pkt->data.dma_addr, ptr); + goto fail_dma_mapping; + } + + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + rx_len_cached = ++sys->len; + + memset(&gsi_xfer_elem_one, 0, + sizeof(gsi_xfer_elem_one)); + gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr; + gsi_xfer_elem_one.len = sys->rx_buff_sz; + gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT; + gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB; + gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA; + gsi_xfer_elem_one.xfer_user_data = rx_pkt; + + ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, + 1, &gsi_xfer_elem_one, false); + if (ret != GSI_STATUS_SUCCESS) { + IPAERR("failed to provide buffer: %d\n", + ret); + goto fail_provide_rx_buffer; + } + + /* + * As doorbell is a costly operation, notify to GSI + * of new buffers if threshold is exceeded + */ + if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) { + sys->len_pending_xfer = 0; + gsi_start_xfer(sys->ep->gsi_chan_hdl); + } + } + + return; + +fail_provide_rx_buffer: + list_del(&rx_pkt->link); + rx_len_cached = --sys->len; + dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); +fail_dma_mapping: + sys->free_skb(rx_pkt->data.skb); +fail_skb_alloc: + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); +fail_kmem_cache_alloc: + if (rx_len_cached - sys->len_pending_xfer == 0) + queue_delayed_work(sys->wq, &sys->replenish_rx_work, + msecs_to_jiffies(1)); +} + +static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) +{ + void *ptr; + struct ipa3_rx_pkt_wrapper *rx_pkt; + int ret; + int rx_len_cached = 0; + struct gsi_xfer_elem gsi_xfer_elem_one; + gfp_t flag = GFP_NOWAIT | __GFP_NOWARN; + + rx_len_cached = sys->len; + + while (rx_len_cached < sys->rx_pool_sz) { + if (list_empty(&sys->rcycl_list)) { + rx_pkt = kmem_cache_zalloc( + ipa3_ctx->rx_pkt_wrapper_cache, flag); + if (!rx_pkt) + goto fail_kmem_cache_alloc; + + INIT_LIST_HEAD(&rx_pkt->link); + INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail); + rx_pkt->sys = sys; + + rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag); + if (rx_pkt->data.skb == NULL) { + IPAERR("failed to alloc skb\n"); + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, + rx_pkt); + goto fail_kmem_cache_alloc; + } + ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); + rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, + ptr, sys->rx_buff_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, + rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure %pK for %pK\n", + (void *)rx_pkt->data.dma_addr, ptr); + goto fail_dma_mapping; + } + } else { + spin_lock_bh(&sys->spinlock); + rx_pkt = list_first_entry(&sys->rcycl_list, + struct ipa3_rx_pkt_wrapper, link); + list_del(&rx_pkt->link); + spin_unlock_bh(&sys->spinlock); + INIT_LIST_HEAD(&rx_pkt->link); + ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); + rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, + ptr, sys->rx_buff_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, + rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure %pK for %pK\n", + (void *)rx_pkt->data.dma_addr, ptr); + goto fail_dma_mapping; + } + } + + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + rx_len_cached = ++sys->len; + memset(&gsi_xfer_elem_one, 0, + sizeof(gsi_xfer_elem_one)); + gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr; + gsi_xfer_elem_one.len = sys->rx_buff_sz; + gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT; + gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB; + gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA; + gsi_xfer_elem_one.xfer_user_data = rx_pkt; + + ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, + 1, &gsi_xfer_elem_one, false); + if (ret != GSI_STATUS_SUCCESS) { + IPAERR("failed to provide buffer: %d\n", + ret); + goto fail_provide_rx_buffer; + } + + /* + * As doorbell is a costly operation, notify to GSI + * of new buffers if threshold is exceeded + */ + if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) { + sys->len_pending_xfer = 0; + gsi_start_xfer(sys->ep->gsi_chan_hdl); + } + } + + return; +fail_provide_rx_buffer: + rx_len_cached = --sys->len; + list_del(&rx_pkt->link); + INIT_LIST_HEAD(&rx_pkt->link); + dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); +fail_dma_mapping: + spin_lock_bh(&sys->spinlock); + list_add_tail(&rx_pkt->link, &sys->rcycl_list); + INIT_LIST_HEAD(&rx_pkt->link); + spin_unlock_bh(&sys->spinlock); +fail_kmem_cache_alloc: + if (rx_len_cached - sys->len_pending_xfer == 0) + queue_delayed_work(sys->wq, &sys->replenish_rx_work, + msecs_to_jiffies(1)); +} + +static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt; + int ret; + int rx_len_cached = 0; + struct gsi_xfer_elem gsi_xfer_elem_one; + u32 curr; + + spin_lock_bh(&sys->spinlock); + + rx_len_cached = sys->len; + curr = atomic_read(&sys->repl.head_idx); + + while (rx_len_cached < sys->rx_pool_sz) { + if (curr == atomic_read(&sys->repl.tail_idx)) + break; + + rx_pkt = sys->repl.cache[curr]; + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + + memset(&gsi_xfer_elem_one, 0, + sizeof(gsi_xfer_elem_one)); + gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr; + gsi_xfer_elem_one.len = sys->rx_buff_sz; + gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT; + gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB; + gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA; + gsi_xfer_elem_one.xfer_user_data = rx_pkt; + + ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, + &gsi_xfer_elem_one, false); + if (ret != GSI_STATUS_SUCCESS) { + IPAERR("failed to provide buffer: %d\n", + ret); + break; + } + + /* + * As doorbell is a costly operation, notify to GSI + * of new buffers if threshold is exceeded + */ + if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) { + sys->len_pending_xfer = 0; + gsi_start_xfer(sys->ep->gsi_chan_hdl); + } + + rx_len_cached = ++sys->len; + curr = (curr + 1) % sys->repl.capacity; + /* ensure write is done before setting head index */ + mb(); + atomic_set(&sys->repl.head_idx, curr); + } + spin_unlock_bh(&sys->spinlock); + + queue_work(sys->repl_wq, &sys->repl_work); + + if (rx_len_cached - sys->len_pending_xfer + <= IPA_DEFAULT_SYS_YELLOW_WM) { + if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) + IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty); + else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) + IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty); + else + WARN_ON(1); + queue_delayed_work(sys->wq, &sys->replenish_rx_work, + msecs_to_jiffies(1)); + } +} + +static void ipa3_replenish_rx_work_func(struct work_struct *work) +{ + struct delayed_work *dwork; + struct ipa3_sys_context *sys; + + dwork = container_of(work, struct delayed_work, work); + sys = container_of(dwork, struct ipa3_sys_context, replenish_rx_work); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + sys->repl_hdlr(sys); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + +/** + * ipa3_cleanup_rx() - release RX queue resources + * + */ +static void ipa3_cleanup_rx(struct ipa3_sys_context *sys) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt; + struct ipa3_rx_pkt_wrapper *r; + u32 head; + u32 tail; + + list_for_each_entry_safe(rx_pkt, r, + &sys->head_desc_list, link) { + list_del(&rx_pkt->link); + dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + sys->free_skb(rx_pkt->data.skb); + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); + } + + list_for_each_entry_safe(rx_pkt, r, + &sys->rcycl_list, link) { + list_del(&rx_pkt->link); + dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + sys->free_skb(rx_pkt->data.skb); + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); + } + + if (sys->repl.cache) { + head = atomic_read(&sys->repl.head_idx); + tail = atomic_read(&sys->repl.tail_idx); + while (head != tail) { + rx_pkt = sys->repl.cache[head]; + dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + sys->free_skb(rx_pkt->data.skb); + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); + head = (head + 1) % sys->repl.capacity; + } + kfree(sys->repl.cache); + } +} + +static struct sk_buff *ipa3_skb_copy_for_client(struct sk_buff *skb, int len) +{ + struct sk_buff *skb2 = NULL; + + skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL); + if (likely(skb2)) { + /* Set the data pointer */ + skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM); + memcpy(skb2->data, skb->data, len); + skb2->len = len; + skb_set_tail_pointer(skb2, len); + } + + return skb2; +} + +static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb, + struct ipa3_sys_context *sys) +{ + int rc = 0; + struct ipahal_pkt_status status; + u32 pkt_status_sz; + struct sk_buff *skb2; + int pad_len_byte; + int len; + unsigned char *buf; + int src_pipe; + unsigned int used = *(unsigned int *)skb->cb; + unsigned int used_align = ALIGN(used, 32); + unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used; + struct ipa3_tx_pkt_wrapper *tx_pkt = NULL; + unsigned long ptr; + + IPA_DUMP_BUFF(skb->data, 0, skb->len); + + if (skb->len == 0) { + IPAERR("ZLT packet arrived to AP\n"); + return rc; + } + + if (sys->len_partial) { + IPADBG_LOW("len_partial %d\n", sys->len_partial); + buf = skb_push(skb, sys->len_partial); + memcpy(buf, sys->prev_skb->data, sys->len_partial); + sys->len_partial = 0; + sys->free_skb(sys->prev_skb); + sys->prev_skb = NULL; + goto begin; + } + + /* this pipe has TX comp (status only) + mux-ed LAN RX data + * (status+data) + */ + if (sys->len_rem) { + IPADBG_LOW("rem %d skb %d pad %d\n", sys->len_rem, skb->len, + sys->len_pad); + if (sys->len_rem <= skb->len) { + if (sys->prev_skb) { + skb2 = skb_copy_expand(sys->prev_skb, 0, + sys->len_rem, GFP_KERNEL); + if (likely(skb2)) { + memcpy(skb_put(skb2, sys->len_rem), + skb->data, sys->len_rem); + skb_trim(skb2, + skb2->len - sys->len_pad); + skb2->truesize = skb2->len + + sizeof(struct sk_buff); + if (sys->drop_packet) + dev_kfree_skb_any(skb2); + else + sys->ep->client_notify( + sys->ep->priv, + IPA_RECEIVE, + (unsigned long)(skb2)); + } else { + IPAERR("copy expand failed\n"); + } + dev_kfree_skb_any(sys->prev_skb); + } + skb_pull(skb, sys->len_rem); + sys->prev_skb = NULL; + sys->len_rem = 0; + sys->len_pad = 0; + } else { + if (sys->prev_skb) { + skb2 = skb_copy_expand(sys->prev_skb, 0, + skb->len, GFP_KERNEL); + if (likely(skb2)) { + memcpy(skb_put(skb2, skb->len), + skb->data, skb->len); + } else { + IPAERR("copy expand failed\n"); + } + dev_kfree_skb_any(sys->prev_skb); + sys->prev_skb = skb2; + } + sys->len_rem -= skb->len; + return rc; + } + } + +begin: + pkt_status_sz = ipahal_pkt_status_get_size(); + while (skb->len) { + sys->drop_packet = false; + IPADBG_LOW("LEN_REM %d\n", skb->len); + + if (skb->len < pkt_status_sz) { + WARN_ON(sys->prev_skb != NULL); + IPADBG_LOW("status straddles buffer\n"); + sys->prev_skb = skb_copy(skb, GFP_KERNEL); + sys->len_partial = skb->len; + return rc; + } + + ipahal_pkt_status_parse(skb->data, &status); + IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n", + status.status_opcode, status.endp_src_idx, + status.endp_dest_idx, status.pkt_len); + if (sys->status_stat) { + sys->status_stat->status[sys->status_stat->curr] = + status; + sys->status_stat->curr++; + if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM) + sys->status_stat->curr = 0; + } + + if ((status.status_opcode != + IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) && + (status.status_opcode != + IPAHAL_PKT_STATUS_OPCODE_PACKET) && + (status.status_opcode != + IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET) && + (status.status_opcode != + IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) { + IPAERR("unsupported opcode(%d)\n", + status.status_opcode); + skb_pull(skb, pkt_status_sz); + continue; + } + IPA_STATS_EXCP_CNT(status.exception, + ipa3_ctx->stats.rx_excp_pkts); + if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes || + status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) { + IPAERR("status fields invalid\n"); + IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n", + status.status_opcode, status.endp_src_idx, + status.endp_dest_idx, status.pkt_len); + WARN_ON(1); + /* HW gave an unexpected status */ + BUG(); + } + if (IPAHAL_PKT_STATUS_MASK_FLAG_VAL( + IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT, &status)) { + struct ipa3_tag_completion *comp; + + IPADBG_LOW("TAG packet arrived\n"); + if (status.tag_info == IPA_COOKIE) { + skb_pull(skb, pkt_status_sz); + if (skb->len < sizeof(comp)) { + IPAERR("TAG arrived without packet\n"); + return rc; + } + memcpy(&comp, skb->data, sizeof(comp)); + skb_pull(skb, sizeof(comp) + + IPA_SIZE_DL_CSUM_META_TRAILER); + complete(&comp->comp); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + continue; + } else { + ptr = tag_to_pointer_wa(status.tag_info); + tx_pkt = (struct ipa3_tx_pkt_wrapper *)ptr; + IPADBG_LOW("tx_pkt recv = %pK\n", tx_pkt); + } + } + if (status.pkt_len == 0) { + IPADBG_LOW("Skip aggr close status\n"); + skb_pull(skb, pkt_status_sz); + IPA_STATS_INC_CNT(ipa3_ctx->stats.aggr_close); + IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts + [IPAHAL_PKT_STATUS_EXCEPTION_NONE]); + continue; + } + + if (status.endp_dest_idx == (sys->ep - ipa3_ctx->ep)) { + /* RX data */ + src_pipe = status.endp_src_idx; + + /* + * A packet which is received back to the AP after + * there was no route match. + */ + if (status.exception == + IPAHAL_PKT_STATUS_EXCEPTION_NONE && + ipahal_is_rule_miss_id(status.rt_rule_id)) + sys->drop_packet = true; + + if (skb->len == pkt_status_sz && + status.exception == + IPAHAL_PKT_STATUS_EXCEPTION_NONE) { + WARN_ON(sys->prev_skb != NULL); + IPADBG_LOW("Ins header in next buffer\n"); + sys->prev_skb = skb_copy(skb, GFP_KERNEL); + sys->len_partial = skb->len; + return rc; + } + + pad_len_byte = ((status.pkt_len + 3) & ~3) - + status.pkt_len; + + len = status.pkt_len + pad_len_byte + + IPA_SIZE_DL_CSUM_META_TRAILER; + IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte, + status.pkt_len, len); + + if (status.exception == + IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR) { + IPADBG_LOW( + "Dropping packet on DeAggr Exception\n"); + sys->drop_packet = true; + } + + skb2 = ipa3_skb_copy_for_client(skb, + min(status.pkt_len + pkt_status_sz, skb->len)); + if (likely(skb2)) { + if (skb->len < len + pkt_status_sz) { + IPADBG_LOW("SPL skb len %d len %d\n", + skb->len, len); + sys->prev_skb = skb2; + sys->len_rem = len - skb->len + + pkt_status_sz; + sys->len_pad = pad_len_byte; + skb_pull(skb, skb->len); + } else { + skb_trim(skb2, status.pkt_len + + pkt_status_sz); + IPADBG_LOW("rx avail for %d\n", + status.endp_dest_idx); + if (sys->drop_packet) { + dev_kfree_skb_any(skb2); + } else if (status.pkt_len > + IPA_GENERIC_AGGR_BYTE_LIMIT * + 1024) { + IPAERR("packet size invalid\n"); + IPAERR("STATUS opcode=%d\n", + status.status_opcode); + IPAERR("src=%d dst=%d len=%d\n", + status.endp_src_idx, + status.endp_dest_idx, + status.pkt_len); + /* Unexpected HW status */ + BUG(); + } else { + skb2->truesize = skb2->len + + sizeof(struct sk_buff) + + (ALIGN(len + + pkt_status_sz, 32) * + unused / used_align); + sys->ep->client_notify( + sys->ep->priv, + IPA_RECEIVE, + (unsigned long)(skb2)); + } + skb_pull(skb, len + pkt_status_sz); + } + } else { + IPAERR("fail to alloc skb\n"); + if (skb->len < len) { + sys->prev_skb = NULL; + sys->len_rem = len - skb->len + + pkt_status_sz; + sys->len_pad = pad_len_byte; + skb_pull(skb, skb->len); + } else { + skb_pull(skb, len + pkt_status_sz); + } + } + /* TX comp */ + ipa3_wq_write_done_status(src_pipe, tx_pkt); + IPADBG_LOW("tx comp imp for %d\n", src_pipe); + } else { + /* TX comp */ + ipa3_wq_write_done_status(status.endp_src_idx, tx_pkt); + IPADBG_LOW("tx comp exp for %d\n", + status.endp_src_idx); + skb_pull(skb, pkt_status_sz); + IPA_STATS_INC_CNT(ipa3_ctx->stats.stat_compl); + IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts + [IPAHAL_PKT_STATUS_EXCEPTION_NONE]); + } + }; + + return rc; +} + +static struct sk_buff *ipa3_join_prev_skb(struct sk_buff *prev_skb, + struct sk_buff *skb, unsigned int len) +{ + struct sk_buff *skb2; + + skb2 = skb_copy_expand(prev_skb, 0, + len, GFP_KERNEL); + if (likely(skb2)) { + memcpy(skb_put(skb2, len), + skb->data, len); + } else { + IPAERR("copy expand failed\n"); + skb2 = NULL; + } + dev_kfree_skb_any(prev_skb); + + return skb2; +} + +static void ipa3_wan_rx_handle_splt_pyld(struct sk_buff *skb, + struct ipa3_sys_context *sys) +{ + struct sk_buff *skb2; + + IPADBG_LOW("rem %d skb %d\n", sys->len_rem, skb->len); + if (sys->len_rem <= skb->len) { + if (sys->prev_skb) { + skb2 = ipa3_join_prev_skb(sys->prev_skb, skb, + sys->len_rem); + if (likely(skb2)) { + IPADBG_LOW( + "removing Status element from skb and sending to WAN client"); + skb_pull(skb2, ipahal_pkt_status_get_size()); + skb2->truesize = skb2->len + + sizeof(struct sk_buff); + sys->ep->client_notify(sys->ep->priv, + IPA_RECEIVE, + (unsigned long)(skb2)); + } + } + skb_pull(skb, sys->len_rem); + sys->prev_skb = NULL; + sys->len_rem = 0; + } else { + if (sys->prev_skb) { + skb2 = ipa3_join_prev_skb(sys->prev_skb, skb, + skb->len); + sys->prev_skb = skb2; + } + sys->len_rem -= skb->len; + skb_pull(skb, skb->len); + } +} + +static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb, + struct ipa3_sys_context *sys) +{ + int rc = 0; + struct ipahal_pkt_status status; + unsigned char *skb_data; + u32 pkt_status_sz; + struct sk_buff *skb2; + u16 pkt_len_with_pad; + u32 qmap_hdr; + int checksum_trailer_exists; + int frame_len; + int ep_idx; + unsigned int used = *(unsigned int *)skb->cb; + unsigned int used_align = ALIGN(used, 32); + unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used; + + IPA_DUMP_BUFF(skb->data, 0, skb->len); + if (skb->len == 0) { + IPAERR("ZLT\n"); + goto bail; + } + + if (ipa3_ctx->ipa_client_apps_wan_cons_agg_gro) { + sys->ep->client_notify(sys->ep->priv, + IPA_RECEIVE, (unsigned long)(skb)); + return rc; + } + if (sys->repl_hdlr == ipa3_replenish_rx_cache_recycle) { + IPAERR("Recycle should enable only with GRO Aggr\n"); + ipa_assert(); + } + + /* + * payload splits across 2 buff or more, + * take the start of the payload from prev_skb + */ + if (sys->len_rem) + ipa3_wan_rx_handle_splt_pyld(skb, sys); + + pkt_status_sz = ipahal_pkt_status_get_size(); + while (skb->len) { + IPADBG_LOW("LEN_REM %d\n", skb->len); + if (skb->len < pkt_status_sz) { + IPAERR("status straddles buffer\n"); + WARN_ON(1); + goto bail; + } + ipahal_pkt_status_parse(skb->data, &status); + skb_data = skb->data; + IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n", + status.status_opcode, status.endp_src_idx, + status.endp_dest_idx, status.pkt_len); + + if (sys->status_stat) { + sys->status_stat->status[sys->status_stat->curr] = + status; + sys->status_stat->curr++; + if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM) + sys->status_stat->curr = 0; + } + + if ((status.status_opcode != + IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) && + (status.status_opcode != + IPAHAL_PKT_STATUS_OPCODE_PACKET) && + (status.status_opcode != + IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) { + IPAERR("unsupported opcode(%d)\n", + status.status_opcode); + skb_pull(skb, pkt_status_sz); + continue; + } + + IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_pkts); + if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes || + status.endp_src_idx >= ipa3_ctx->ipa_num_pipes || + status.pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) { + IPAERR("status fields invalid\n"); + WARN_ON(1); + goto bail; + } + if (status.pkt_len == 0) { + IPADBG_LOW("Skip aggr close status\n"); + skb_pull(skb, pkt_status_sz); + IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_pkts); + IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_aggr_close); + continue; + } + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + if (status.endp_dest_idx != ep_idx) { + IPAERR("expected endp_dest_idx %d received %d\n", + ep_idx, status.endp_dest_idx); + WARN_ON(1); + goto bail; + } + /* RX data */ + if (skb->len == pkt_status_sz) { + IPAERR("Ins header in next buffer\n"); + WARN_ON(1); + goto bail; + } + qmap_hdr = *(u32 *)(skb_data + pkt_status_sz); + /* + * Take the pkt_len_with_pad from the last 2 bytes of the QMAP + * header + */ + + /*QMAP is BE: convert the pkt_len field from BE to LE*/ + pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff); + IPADBG_LOW("pkt_len with pad %d\n", pkt_len_with_pad); + /*get the CHECKSUM_PROCESS bit*/ + checksum_trailer_exists = IPAHAL_PKT_STATUS_MASK_FLAG_VAL( + IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT, &status); + IPADBG_LOW("checksum_trailer_exists %d\n", + checksum_trailer_exists); + + frame_len = pkt_status_sz + IPA_QMAP_HEADER_LENGTH + + pkt_len_with_pad; + if (checksum_trailer_exists) + frame_len += IPA_DL_CHECKSUM_LENGTH; + IPADBG_LOW("frame_len %d\n", frame_len); + + skb2 = skb_clone(skb, GFP_KERNEL); + if (likely(skb2)) { + /* + * the len of actual data is smaller than expected + * payload split across 2 buff + */ + if (skb->len < frame_len) { + IPADBG_LOW("SPL skb len %d len %d\n", + skb->len, frame_len); + sys->prev_skb = skb2; + sys->len_rem = frame_len - skb->len; + skb_pull(skb, skb->len); + } else { + skb_trim(skb2, frame_len); + IPADBG_LOW("rx avail for %d\n", + status.endp_dest_idx); + IPADBG_LOW( + "removing Status element from skb and sending to WAN client"); + skb_pull(skb2, pkt_status_sz); + skb2->truesize = skb2->len + + sizeof(struct sk_buff) + + (ALIGN(frame_len, 32) * + unused / used_align); + sys->ep->client_notify(sys->ep->priv, + IPA_RECEIVE, (unsigned long)(skb2)); + skb_pull(skb, frame_len); + } + } else { + IPAERR("fail to clone\n"); + if (skb->len < frame_len) { + sys->prev_skb = NULL; + sys->len_rem = frame_len - skb->len; + skb_pull(skb, skb->len); + } else { + skb_pull(skb, frame_len); + } + } + }; +bail: + sys->free_skb(skb); + return rc; +} + +static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags) +{ + return __dev_alloc_skb(len, flags); +} + +static void ipa3_free_skb_rx(struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); +} + +void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data) +{ + struct sk_buff *rx_skb = (struct sk_buff *)data; + struct ipahal_pkt_status status; + struct ipa3_ep_context *ep; + unsigned int src_pipe; + u32 metadata; + + ipahal_pkt_status_parse(rx_skb->data, &status); + src_pipe = status.endp_src_idx; + metadata = status.metadata; + ep = &ipa3_ctx->ep[src_pipe]; + if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes || + !ep->valid || + !ep->client_notify)) { + IPAERR("drop pipe=%d ep_valid=%d client_notify=%pK\n", + src_pipe, ep->valid, ep->client_notify); + dev_kfree_skb_any(rx_skb); + return; + } + if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE) + skb_pull(rx_skb, ipahal_pkt_status_get_size() + + IPA_LAN_RX_HEADER_LENGTH); + else + skb_pull(rx_skb, ipahal_pkt_status_get_size()); + + /* Metadata Info + * ------------------------------------------ + * | 3 | 2 | 1 | 0 | + * | fw_desc | vdev_id | qmap mux id | Resv | + * ------------------------------------------ + */ + *(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF); + IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n", + metadata, *(u32 *)rx_skb->cb); + + ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb)); +} + +static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt) +{ + rx_pkt->data.dma_addr = 0; + ipa3_skb_recycle(rx_pkt->data.skb); + INIT_LIST_HEAD(&rx_pkt->link); + spin_lock_bh(&rx_pkt->sys->spinlock); + list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list); + spin_unlock_bh(&rx_pkt->sys->spinlock); +} + +void ipa3_recycle_wan_skb(struct sk_buff *skb) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt; + int ep_idx = ipa3_get_ep_mapping( + IPA_CLIENT_APPS_WAN_CONS); + gfp_t flag = GFP_NOWAIT | __GFP_NOWARN; + + if (unlikely(ep_idx == -1)) { + IPAERR("dest EP does not exist\n"); + ipa_assert(); + } + + rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache, + flag); + if (!rx_pkt) + return; + + INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail); + rx_pkt->sys = ipa3_ctx->ep[ep_idx].sys; + + rx_pkt->data.skb = skb; + ipa3_recycle_rx_wrapper(rx_pkt); +} + +static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt_expected; + struct sk_buff *rx_skb; + + if (unlikely(list_empty(&sys->head_desc_list))) { + WARN_ON(1); + return; + } + rx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa3_rx_pkt_wrapper, + link); + list_del(&rx_pkt_expected->link); + sys->len--; + if (size) + rx_pkt_expected->len = size; + rx_skb = rx_pkt_expected->data.skb; + dma_unmap_single(ipa3_ctx->pdev, rx_pkt_expected->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + skb_set_tail_pointer(rx_skb, rx_pkt_expected->len); + rx_skb->len = rx_pkt_expected->len; + *(unsigned int *)rx_skb->cb = rx_skb->len; + rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff); + sys->pyld_hdlr(rx_skb, sys); + sys->free_rx_wrapper(rx_pkt_expected); + sys->repl_hdlr(sys); +} + +static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys, u32 size) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt_expected; + struct sk_buff *rx_skb; + + if (unlikely(list_empty(&sys->head_desc_list))) { + WARN_ON(1); + return; + } + rx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa3_rx_pkt_wrapper, + link); + list_del(&rx_pkt_expected->link); + sys->len--; + + if (size) + rx_pkt_expected->len = size; + + rx_skb = rx_pkt_expected->data.skb; + skb_set_tail_pointer(rx_skb, rx_pkt_expected->len); + rx_skb->len = rx_pkt_expected->len; + rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff); + sys->ep->wstats.tx_pkts_rcvd++; + if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) { + ipa3_free_skb(&rx_pkt_expected->data); + sys->ep->wstats.tx_pkts_dropped++; + } else { + sys->ep->wstats.tx_pkts_sent++; + sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, + (unsigned long)(&rx_pkt_expected->data)); + } + ipa3_replenish_wlan_rx_cache(sys); +} + +static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys, + struct ipa_mem_buffer *mem_info) +{ + IPADBG_LOW("ENTER.\n"); + if (unlikely(list_empty(&sys->head_desc_list))) { + IPAERR("descriptor list is empty!\n"); + WARN_ON(1); + return; + } + sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, + (unsigned long)(mem_info)); + IPADBG_LOW("EXIT\n"); +} + +static void ipa3_wq_rx_avail(struct work_struct *work) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt; + struct ipa3_sys_context *sys; + + rx_pkt = container_of(work, struct ipa3_rx_pkt_wrapper, work); + WARN(unlikely(rx_pkt == NULL), "rx pkt is null"); + sys = rx_pkt->sys; + ipa3_wq_rx_common(sys, 0); +} + +static int ipa3_odu_rx_pyld_hdlr(struct sk_buff *rx_skb, + struct ipa3_sys_context *sys) +{ + if (sys->ep->client_notify) { + sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, + (unsigned long)(rx_skb)); + } else { + dev_kfree_skb_any(rx_skb); + WARN(1, "client notify is null"); + } + + return 0; +} + +static void ipa3_free_rx_wrapper(struct ipa3_rx_pkt_wrapper *rk_pkt) +{ + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rk_pkt); +} + +static int ipa3_assign_policy(struct ipa_sys_connect_params *in, + struct ipa3_sys_context *sys) +{ + bool apps_wan_cons_agg_gro_flag; + unsigned long int aggr_byte_limit; + + if (in->client == IPA_CLIENT_APPS_CMD_PROD) { + sys->policy = IPA_POLICY_INTR_MODE; + sys->use_comm_evt_ring = false; + return 0; + } + + if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client)) { + sys->policy = IPA_POLICY_NOINTR_MODE; + return 0; + } + + apps_wan_cons_agg_gro_flag = + ipa3_ctx->ipa_client_apps_wan_cons_agg_gro; + aggr_byte_limit = in->ipa_ep_cfg.aggr.aggr_byte_limit; + + if (IPA_CLIENT_IS_PROD(in->client)) { + if (sys->ep->skip_ep_cfg) { + sys->policy = IPA_POLICY_INTR_POLL_MODE; + sys->use_comm_evt_ring = true; + atomic_set(&sys->curr_polling_state, 0); + } else { + sys->policy = IPA_POLICY_INTR_MODE; + sys->use_comm_evt_ring = true; + INIT_WORK(&sys->work, ipa3_send_nop_desc); + } + } else { + if (in->client == IPA_CLIENT_APPS_LAN_CONS || + in->client == IPA_CLIENT_APPS_WAN_CONS) { + sys->ep->status.status_en = true; + sys->policy = IPA_POLICY_INTR_POLL_MODE; + INIT_WORK(&sys->work, ipa3_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + ipa3_switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + ipa3_replenish_rx_work_func); + INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ( + IPA_GENERIC_RX_BUFF_BASE_SZ); + sys->get_skb = ipa3_get_skb_ipa_rx; + sys->free_skb = ipa3_free_skb_rx; + in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR; + in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC; + in->ipa_ep_cfg.aggr.aggr_time_limit = + IPA_GENERIC_AGGR_TIME_LIMIT; + if (in->client == IPA_CLIENT_APPS_LAN_CONS) { + sys->pyld_hdlr = ipa3_lan_rx_pyld_hdlr; + sys->repl_hdlr = + ipa3_replenish_rx_cache_recycle; + sys->free_rx_wrapper = + ipa3_recycle_rx_wrapper; + sys->rx_pool_sz = + ipa3_ctx->lan_rx_ring_size; + in->ipa_ep_cfg.aggr.aggr_byte_limit = + IPA_GENERIC_AGGR_BYTE_LIMIT; + in->ipa_ep_cfg.aggr.aggr_pkt_limit = + IPA_GENERIC_AGGR_PKT_LIMIT; + } else if (in->client == + IPA_CLIENT_APPS_WAN_CONS) { + sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr; + sys->free_rx_wrapper = ipa3_free_rx_wrapper; + sys->rx_pool_sz = ipa3_ctx->wan_rx_ring_size; + if (nr_cpu_ids > 1) { + sys->repl_hdlr = + ipa3_fast_replenish_rx_cache; + } else { + sys->repl_hdlr = + ipa3_replenish_rx_cache; + } + if (in->napi_enabled && in->recycle_enabled) + sys->repl_hdlr = + ipa3_replenish_rx_cache_recycle; + in->ipa_ep_cfg.aggr.aggr_sw_eof_active + = true; + if (apps_wan_cons_agg_gro_flag) { + IPAERR("get close-by %u\n", + ipa_adjust_ra_buff_base_sz( + in->ipa_ep_cfg.aggr.aggr_byte_limit)); + IPAERR("set rx_buff_sz %lu\n", + (unsigned long int) + IPA_GENERIC_RX_BUFF_SZ( + ipa_adjust_ra_buff_base_sz( + in->ipa_ep_cfg.aggr.aggr_byte_limit))); + /* disable ipa_status */ + sys->ep->status.status_en = false; + sys->rx_buff_sz = + IPA_GENERIC_RX_BUFF_SZ( + ipa_adjust_ra_buff_base_sz( + in->ipa_ep_cfg.aggr.aggr_byte_limit)); + in->ipa_ep_cfg.aggr.aggr_byte_limit = + sys->rx_buff_sz < + aggr_byte_limit ? + IPA_ADJUST_AGGR_BYTE_LIMIT( + sys->rx_buff_sz) : + IPA_ADJUST_AGGR_BYTE_LIMIT( + in->ipa_ep_cfg.aggr.aggr_byte_limit); + IPAERR("set aggr_limit %lu\n", + (unsigned long int) + in->ipa_ep_cfg.aggr.aggr_byte_limit); + } else { + in->ipa_ep_cfg.aggr.aggr_byte_limit = + IPA_GENERIC_AGGR_BYTE_LIMIT; + in->ipa_ep_cfg.aggr.aggr_pkt_limit = + IPA_GENERIC_AGGR_PKT_LIMIT; + } + } + } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) { + IPADBG("assigning policy to client:%d", + in->client); + + sys->policy = IPA_POLICY_INTR_POLL_MODE; + INIT_WORK(&sys->work, ipa3_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + ipa3_switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + ipa3_replenish_rx_work_func); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ; + sys->rx_pool_sz = in->desc_fifo_sz / + IPA_FIFO_ELEMENT_SIZE - 1; + if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ) + sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ; + sys->pyld_hdlr = NULL; + sys->repl_hdlr = ipa3_replenish_wlan_rx_cache; + sys->get_skb = ipa3_get_skb_ipa_rx; + sys->free_skb = ipa3_free_skb_rx; + sys->free_rx_wrapper = ipa3_free_rx_wrapper; + in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR; + } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) { + IPADBG("assigning policy to client:%d", + in->client); + + sys->policy = IPA_POLICY_INTR_POLL_MODE; + INIT_WORK(&sys->work, ipa3_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + ipa3_switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + ipa3_replenish_rx_work_func); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ; + sys->rx_pool_sz = in->desc_fifo_sz / + IPA_FIFO_ELEMENT_SIZE - 1; + if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ) + sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ; + sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr; + sys->get_skb = ipa3_get_skb_ipa_rx; + sys->free_skb = ipa3_free_skb_rx; + sys->free_rx_wrapper = ipa3_free_rx_wrapper; + sys->repl_hdlr = ipa3_replenish_rx_cache; + } else if (in->client == + IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) { + IPADBG("assigning policy to client:%d", + in->client); + + sys->policy = IPA_POLICY_INTR_POLL_MODE; + INIT_WORK(&sys->work, ipa3_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + ipa3_switch_to_intr_rx_work_func); + } else if (in->client == + IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) { + IPADBG("assigning policy to client:%d", + in->client); + + sys->policy = IPA_POLICY_NOINTR_MODE; + } else { + WARN(1, "Need to install a RX pipe hdlr\n"); + return -EINVAL; + } + } + + return 0; +} + +/** + * ipa3_tx_client_rx_notify_release() - Callback function + * which will call the user supplied callback function to + * release the skb, or release it on its own if no callback + * function was supplied + * + * @user1: [in] - Data Descriptor + * @user2: [in] - endpoint idx + * + * This notified callback is for the destination client + * This function is supplied in ipa3_tx_dp_mul + */ +static void ipa3_tx_client_rx_notify_release(void *user1, int user2) +{ + struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1; + int ep_idx = user2; + + IPADBG_LOW("Received data desc anchor:%pK\n", dd); + + atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc); + ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++; + + /* wlan host driver waits till tx complete before unload */ + IPADBG_LOW("ep=%d fifo_desc_free_count=%d\n", + ep_idx, atomic_read(&ipa3_ctx->ep[ep_idx].avail_fifo_desc)); + IPADBG_LOW("calling client notify callback with priv:%pK\n", + ipa3_ctx->ep[ep_idx].priv); + + if (ipa3_ctx->ep[ep_idx].client_notify) { + ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv, + IPA_WRITE_DONE, (unsigned long)user1); + ipa3_ctx->ep[ep_idx].wstats.rx_hd_reply++; + } +} +/** + * ipa3_tx_client_rx_pkt_status() - Callback function + * which will call the user supplied callback function to + * increase the available fifo descriptor + * + * @user1: [in] - Data Descriptor + * @user2: [in] - endpoint idx + * + * This notified callback is for the destination client + * This function is supplied in ipa3_tx_dp_mul + */ +static void ipa3_tx_client_rx_pkt_status(void *user1, int user2) +{ + int ep_idx = user2; + + atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc); + ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++; +} + + +/** + * ipa3_tx_dp_mul() - Data-path tx handler for multiple packets + * @src: [in] - Client that is sending data + * @ipa_tx_data_desc: [in] data descriptors from wlan + * + * this is used for to transfer data descriptors that received + * from WLAN1_PROD pipe to IPA HW + * + * The function will send data descriptors from WLAN1_PROD (one + * at a time). Will set EOT flag for last descriptor Once this send was done + * from transport point-of-view the IPA driver will get notified by the + * supplied callback - ipa_gsi_irq_tx_notify_cb() + * + * ipa_gsi_irq_tx_notify_cb will call to the user supplied callback + * + * Returns: 0 on success, negative on failure + */ +int ipa3_tx_dp_mul(enum ipa_client_type src, + struct ipa_tx_data_desc *data_desc) +{ + /* The second byte in wlan header holds qmap id */ +#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1 + struct ipa_tx_data_desc *entry; + struct ipa3_sys_context *sys; + struct ipa3_desc desc[2]; + u32 num_desc, cnt; + int ep_idx; + + IPADBG_LOW("Received data desc anchor:%pK\n", data_desc); + + spin_lock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock); + + ep_idx = ipa3_get_ep_mapping(src); + if (unlikely(ep_idx == -1)) { + IPAERR("dest EP does not exist.\n"); + goto fail_send; + } + IPADBG_LOW("ep idx:%d\n", ep_idx); + sys = ipa3_ctx->ep[ep_idx].sys; + + if (unlikely(ipa3_ctx->ep[ep_idx].valid == 0)) { + IPAERR("dest EP not valid.\n"); + goto fail_send; + } + sys->ep->wstats.rx_hd_rcvd++; + + /* Calculate the number of descriptors */ + num_desc = 0; + list_for_each_entry(entry, &data_desc->link, link) { + num_desc++; + } + IPADBG_LOW("Number of Data Descriptors:%d", num_desc); + + if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) { + IPAERR("Insufficient data descriptors available\n"); + goto fail_send; + } + + /* Assign callback only for last data descriptor */ + cnt = 0; + list_for_each_entry(entry, &data_desc->link, link) { + memset(desc, 0, 2 * sizeof(struct ipa3_desc)); + + IPADBG_LOW("Parsing data desc :%d\n", cnt); + cnt++; + ((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] = + (u8)sys->ep->cfg.meta.qmap_id; + + /* the tag field will be populated in ipa3_send() function */ + desc[0].is_tag_status = true; + desc[1].pyld = entry->pyld_buffer; + desc[1].len = entry->pyld_len; + desc[1].type = IPA_DATA_DESC_SKB; + desc[1].user1 = data_desc; + desc[1].user2 = ep_idx; + IPADBG_LOW("priv:%pK pyld_buf:0x%pK pyld_len:%d\n", + entry->priv, desc[1].pyld, desc[1].len); + + /* In case of last descriptor populate callback */ + if (cnt == num_desc) { + IPADBG_LOW("data desc:%pK\n", data_desc); + desc[1].callback = ipa3_tx_client_rx_notify_release; + } else { + desc[1].callback = ipa3_tx_client_rx_pkt_status; + } + + IPADBG_LOW("calling ipa3_send_one()\n"); + if (ipa3_send(sys, 2, desc, true)) { + IPAERR("fail to send skb\n"); + sys->ep->wstats.rx_pkt_leak += (cnt-1); + sys->ep->wstats.rx_dp_fail++; + goto fail_send; + } + + if (atomic_read(&sys->ep->avail_fifo_desc) >= 0) + atomic_dec(&sys->ep->avail_fifo_desc); + + sys->ep->wstats.rx_pkts_rcvd++; + IPADBG_LOW("ep=%d fifo desc=%d\n", + ep_idx, atomic_read(&sys->ep->avail_fifo_desc)); + } + + sys->ep->wstats.rx_hd_processed++; + spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock); + return 0; + +fail_send: + spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock); + return -EFAULT; + +} + +void ipa3_free_skb(struct ipa_rx_data *data) +{ + struct ipa3_rx_pkt_wrapper *rx_pkt; + + spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); + + ipa3_ctx->wc_memb.total_tx_pkts_freed++; + rx_pkt = container_of(data, struct ipa3_rx_pkt_wrapper, data); + + ipa3_skb_recycle(rx_pkt->data.skb); + (void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ); + + list_add_tail(&rx_pkt->link, + &ipa3_ctx->wc_memb.wlan_comm_desc_list); + ipa3_ctx->wc_memb.wlan_comm_free_cnt++; + + spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock); +} + +/* Functions added to support kernel tests */ + +int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in, + unsigned long *ipa_transport_hdl, + u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status) +{ + struct ipa3_ep_context *ep; + int ipa_ep_idx; + int result = -EINVAL; + + if (sys_in == NULL || clnt_hdl == NULL) { + IPAERR("NULL args\n"); + goto fail_gen; + } + + if (ipa_transport_hdl == NULL || ipa_pipe_num == NULL) { + IPAERR("NULL args\n"); + goto fail_gen; + } + if (sys_in->client >= IPA_CLIENT_MAX) { + IPAERR("bad parm client:%d\n", sys_in->client); + goto fail_gen; + } + + ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client :%d\n", sys_in->client); + goto fail_gen; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client); + + if (ep->valid == 1) { + if (sys_in->client != IPA_CLIENT_APPS_WAN_PROD) { + IPAERR("EP %d already allocated\n", ipa_ep_idx); + goto fail_and_disable_clocks; + } else { + if (ipa3_cfg_ep_hdr(ipa_ep_idx, + &sys_in->ipa_ep_cfg.hdr)) { + IPAERR("fail to configure hdr prop of EP %d\n", + ipa_ep_idx); + result = -EFAULT; + goto fail_and_disable_clocks; + } + if (ipa3_cfg_ep_cfg(ipa_ep_idx, + &sys_in->ipa_ep_cfg.cfg)) { + IPAERR("fail to configure cfg prop of EP %d\n", + ipa_ep_idx); + result = -EFAULT; + goto fail_and_disable_clocks; + } + IPAERR("client %d (ep: %d) overlay ok sys=%pK\n", + sys_in->client, ipa_ep_idx, ep->sys); + ep->client_notify = sys_in->notify; + ep->priv = sys_in->priv; + *clnt_hdl = ipa_ep_idx; + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + + return 0; + } + } + + memset(ep, 0, offsetof(struct ipa3_ep_context, sys)); + + ep->valid = 1; + ep->client = sys_in->client; + ep->client_notify = sys_in->notify; + ep->priv = sys_in->priv; + ep->keep_ipa_awake = true; + if (en_status) { + ep->status.status_en = true; + ep->status.status_ep = ipa_ep_idx; + } + + result = ipa3_enable_data_path(ipa_ep_idx); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", + result, ipa_ep_idx); + goto fail_gen2; + } + + if (!ep->skip_ep_cfg) { + if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto fail_gen2; + } + if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) { + IPAERR("fail to configure status of EP.\n"); + goto fail_gen2; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("skipping ep configuration\n"); + } + + *clnt_hdl = ipa_ep_idx; + + *ipa_pipe_num = ipa_ep_idx; + *ipa_transport_hdl = ipa3_ctx->gsi_dev_hdl; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + + ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + IPADBG("client %d (ep: %d) connected sys=%pK\n", sys_in->client, + ipa_ep_idx, ep->sys); + + return 0; + +fail_gen2: +fail_and_disable_clocks: + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); +fail_gen: + return result; +} + +int ipa3_sys_teardown(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm(Either endpoint or client hdl invalid)\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipa3_disable_data_path(clnt_hdl); + ep->valid = 0; + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + + return 0; +} + +int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl, + unsigned long gsi_ev_hdl) +{ + struct ipa3_ep_context *ep; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm(Either endpoint or client hdl invalid)\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + ep->gsi_chan_hdl = gsi_ch_hdl; + ep->gsi_evt_ring_hdl = gsi_ev_hdl; + + return 0; +} + +static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify) +{ + switch (notify->evt_id) { + case GSI_EVT_OUT_OF_BUFFERS_ERR: + IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n"); + break; + case GSI_EVT_OUT_OF_RESOURCES_ERR: + IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n"); + break; + case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR: + IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n"); + break; + case GSI_EVT_EVT_RING_EMPTY_ERR: + IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n"); + break; + default: + IPAERR("Unexpected err evt: %d\n", notify->evt_id); + } +} + +static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify) +{ + switch (notify->evt_id) { + case GSI_CHAN_INVALID_TRE_ERR: + IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n"); + break; + case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR: + IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n"); + break; + case GSI_CHAN_OUT_OF_BUFFERS_ERR: + IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n"); + break; + case GSI_CHAN_OUT_OF_RESOURCES_ERR: + IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n"); + break; + case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR: + IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n"); + break; + case GSI_CHAN_HWO_1_ERR: + IPAERR("Got GSI_CHAN_HWO_1_ERR\n"); + break; + default: + IPAERR("Unexpected err evt: %d\n", notify->evt_id); + } +} + +static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify) +{ + struct ipa3_tx_pkt_wrapper *tx_pkt; + + IPADBG_LOW("event %d notified\n", notify->evt_id); + + switch (notify->evt_id) { + case GSI_CHAN_EVT_EOT: + atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1); + tx_pkt = notify->xfer_user_data; + queue_work(tx_pkt->sys->wq, &tx_pkt->work); + break; + default: + IPAERR("received unexpected event id %d\n", notify->evt_id); + } +} + +static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify) +{ + struct ipa3_sys_context *sys; + struct ipa3_rx_pkt_wrapper *rx_pkt_expected, *rx_pkt_rcvd; + int clk_off; + + if (!notify) { + IPAERR("gsi notify is NULL.\n"); + return; + } + IPADBG_LOW("event %d notified\n", notify->evt_id); + + sys = (struct ipa3_sys_context *)notify->chan_user_data; + rx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa3_rx_pkt_wrapper, link); + rx_pkt_rcvd = (struct ipa3_rx_pkt_wrapper *)notify->xfer_user_data; + + if (rx_pkt_expected != rx_pkt_rcvd) { + IPAERR("Pkt was not filled in head of rx buffer.\n"); + WARN_ON(1); + return; + } + sys->ep->bytes_xfered_valid = true; + sys->ep->bytes_xfered = notify->bytes_xfered; + sys->ep->phys_base = rx_pkt_rcvd->data.dma_addr; + + switch (notify->evt_id) { + case GSI_CHAN_EVT_EOT: + case GSI_CHAN_EVT_EOB: + atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1); + if (!atomic_read(&sys->curr_polling_state)) { + /* put the gsi channel into polling mode */ + gsi_config_channel_mode(sys->ep->gsi_chan_hdl, + GSI_CHAN_MODE_POLL); + ipa3_inc_acquire_wakelock(); + atomic_set(&sys->curr_polling_state, 1); + if (sys->ep->napi_enabled) { + struct ipa_active_client_logging_info log; + + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI"); + clk_off = ipa3_inc_client_enable_clks_no_block( + &log); + if (!clk_off) + sys->ep->client_notify(sys->ep->priv, + IPA_CLIENT_START_POLL, 0); + else + queue_work(sys->wq, &sys->work); + } else { + queue_work(sys->wq, &sys->work); + } + } + break; + default: + IPAERR("received unexpected event id %d\n", notify->evt_id); + } +} + +static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify) +{ + struct ipa3_sys_context *sys; + struct ipa3_dma_xfer_wrapper *rx_pkt_expected, *rx_pkt_rcvd; + + if (!notify) { + IPAERR("gsi notify is NULL.\n"); + return; + } + IPADBG_LOW("event %d notified\n", notify->evt_id); + + sys = (struct ipa3_sys_context *)notify->chan_user_data; + if (sys->ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) { + IPAERR("IRQ_RX Callback was called for DMA_SYNC_CONS.\n"); + return; + } + rx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa3_dma_xfer_wrapper, link); + rx_pkt_rcvd = (struct ipa3_dma_xfer_wrapper *)notify + ->xfer_user_data; + if (rx_pkt_expected != rx_pkt_rcvd) { + IPAERR("Pkt was not filled in head of rx buffer.\n"); + WARN_ON(1); + return; + } + + sys->ep->bytes_xfered_valid = true; + sys->ep->bytes_xfered = notify->bytes_xfered; + sys->ep->phys_base = rx_pkt_rcvd->phys_addr_dest; + + switch (notify->evt_id) { + case GSI_CHAN_EVT_EOT: + if (!atomic_read(&sys->curr_polling_state)) { + /* put the gsi channel into polling mode */ + gsi_config_channel_mode(sys->ep->gsi_chan_hdl, + GSI_CHAN_MODE_POLL); + ipa3_inc_acquire_wakelock(); + atomic_set(&sys->curr_polling_state, 1); + queue_work(sys->wq, &sys->work); + } + break; + default: + IPAERR("received unexpected event id %d\n", notify->evt_id); + } +} + +int ipa3_alloc_common_event_ring(void) +{ + struct gsi_evt_ring_props gsi_evt_ring_props; + dma_addr_t evt_dma_addr; + int result; + + memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props)); + gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV; + gsi_evt_ring_props.intr = GSI_INTR_IRQ; + gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B; + + gsi_evt_ring_props.ring_len = IPA_COMMON_EVENT_RING_SIZE; + + gsi_evt_ring_props.ring_base_vaddr = + dma_alloc_coherent(ipa3_ctx->pdev, + gsi_evt_ring_props.ring_len, &evt_dma_addr, GFP_KERNEL); + if (!gsi_evt_ring_props.ring_base_vaddr) { + IPAERR("fail to dma alloc %u bytes\n", + gsi_evt_ring_props.ring_len); + return -ENOMEM; + } + gsi_evt_ring_props.ring_base_addr = evt_dma_addr; + gsi_evt_ring_props.int_modt = 0; + gsi_evt_ring_props.int_modc = 1; /* moderation comes from channel*/ + gsi_evt_ring_props.rp_update_addr = 0; + gsi_evt_ring_props.exclusive = false; + gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb; + gsi_evt_ring_props.user_data = NULL; + + result = gsi_alloc_evt_ring(&gsi_evt_ring_props, + ipa3_ctx->gsi_dev_hdl, &ipa3_ctx->gsi_evt_comm_hdl); + if (result) { + IPAERR("gsi_alloc_evt_ring failed %d\n", result); + return result; + } + ipa3_ctx->gsi_evt_comm_ring_rem = IPA_COMMON_EVENT_RING_SIZE; + + return 0; +} + +static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in, + struct ipa3_ep_context *ep) +{ + struct gsi_evt_ring_props gsi_evt_ring_props; + struct gsi_chan_props gsi_channel_props; + union __packed gsi_channel_scratch ch_scratch; + const struct ipa_gsi_ep_config *gsi_ep_info; + dma_addr_t dma_addr; + dma_addr_t evt_dma_addr; + int result; + + if (!ep) { + IPAERR("EP context is empty\n"); + return -EINVAL; + } + + evt_dma_addr = 0; + ep->gsi_evt_ring_hdl = ~0; + memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props)); + if (ep->sys->use_comm_evt_ring) { + if (ipa3_ctx->gsi_evt_comm_ring_rem < 2 * in->desc_fifo_sz) { + IPAERR("not enough space in common event ring\n"); + IPAERR("available: %d needed: %d\n", + ipa3_ctx->gsi_evt_comm_ring_rem, + 2 * in->desc_fifo_sz); + WARN_ON(1); + return -EFAULT; + } + ipa3_ctx->gsi_evt_comm_ring_rem -= (2 * in->desc_fifo_sz); + ep->gsi_evt_ring_hdl = ipa3_ctx->gsi_evt_comm_hdl; + } else if (ep->sys->policy != IPA_POLICY_NOINTR_MODE || + IPA_CLIENT_IS_CONS(ep->client)) { + gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV; + gsi_evt_ring_props.intr = GSI_INTR_IRQ; + gsi_evt_ring_props.re_size = + GSI_EVT_RING_RE_SIZE_16B; + + /* + * GSI ring length is calculated based on the desc_fifo_sz + * which was meant to define the BAM desc fifo. GSI descriptors + * are 16B as opposed to 8B for BAM. + */ + gsi_evt_ring_props.ring_len = 2 * in->desc_fifo_sz; + + gsi_evt_ring_props.ring_base_vaddr = + dma_alloc_coherent(ipa3_ctx->pdev, + gsi_evt_ring_props.ring_len, + &evt_dma_addr, GFP_KERNEL); + if (!gsi_evt_ring_props.ring_base_vaddr) { + IPAERR("fail to dma alloc %u bytes\n", + gsi_evt_ring_props.ring_len); + return -ENOMEM; + } + gsi_evt_ring_props.ring_base_addr = evt_dma_addr; + + /* copy mem info */ + ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len; + ep->gsi_mem_info.evt_ring_base_addr = + gsi_evt_ring_props.ring_base_addr; + ep->gsi_mem_info.evt_ring_base_vaddr = + gsi_evt_ring_props.ring_base_vaddr; + + gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT; + gsi_evt_ring_props.int_modc = 1; + + IPADBG("client=%d moderation threshold cycles=%u cnt=%u\n", + ep->client, + gsi_evt_ring_props.int_modt, + gsi_evt_ring_props.int_modc); + gsi_evt_ring_props.rp_update_addr = 0; + gsi_evt_ring_props.exclusive = true; + gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb; + gsi_evt_ring_props.user_data = NULL; + + result = gsi_alloc_evt_ring(&gsi_evt_ring_props, + ipa3_ctx->gsi_dev_hdl, &ep->gsi_evt_ring_hdl); + if (result != GSI_STATUS_SUCCESS) + goto fail_alloc_evt_ring; + } + + memset(&gsi_channel_props, 0, sizeof(gsi_channel_props)); + gsi_channel_props.prot = GSI_CHAN_PROT_GPI; + if (IPA_CLIENT_IS_PROD(ep->client)) { + gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI; + } else { + gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI; + gsi_channel_props.max_re_expected = ep->sys->rx_pool_sz; + } + + gsi_ep_info = ipa3_get_gsi_ep_info(ep->client); + if (!gsi_ep_info) { + IPAERR("Failed getting GSI EP info for client=%d\n", + ep->client); + result = -EINVAL; + goto fail_get_gsi_ep_info; + } else + gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num; + + gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl; + gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B; + + /* + * GSI ring length is calculated based on the desc_fifo_sz which was + * meant to define the BAM desc fifo. GSI descriptors are 16B as opposed + * to 8B for BAM. For PROD pipes there is also an additional descriptor + * for TAG STATUS immediate command. APPS_WAN_PROD pipe is an exception + * as this pipe do not use TAG STATUS for completion. Instead it uses + * event ring based completions. + */ + if (ep->client == IPA_CLIENT_APPS_WAN_PROD) + gsi_channel_props.ring_len = 2 * in->desc_fifo_sz; + else if (IPA_CLIENT_IS_PROD(ep->client)) + gsi_channel_props.ring_len = 4 * in->desc_fifo_sz; + else + gsi_channel_props.ring_len = 2 * in->desc_fifo_sz; + gsi_channel_props.ring_base_vaddr = + dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len, + &dma_addr, GFP_KERNEL); + if (!gsi_channel_props.ring_base_vaddr) { + IPAERR("fail to dma alloc %u bytes\n", + gsi_channel_props.ring_len); + result = -ENOMEM; + goto fail_alloc_channel_ring; + } + gsi_channel_props.ring_base_addr = dma_addr; + + /* copy mem info */ + ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len; + ep->gsi_mem_info.chan_ring_base_addr = + gsi_channel_props.ring_base_addr; + ep->gsi_mem_info.chan_ring_base_vaddr = + gsi_channel_props.ring_base_vaddr; + + gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE; + gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG; + if (ep->client == IPA_CLIENT_APPS_CMD_PROD) + gsi_channel_props.low_weight = IPA_GSI_MAX_CH_LOW_WEIGHT; + else + gsi_channel_props.low_weight = 1; + gsi_channel_props.chan_user_data = ep->sys; + gsi_channel_props.err_cb = ipa_gsi_chan_err_cb; + if (IPA_CLIENT_IS_PROD(ep->client)) + gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb; + else + gsi_channel_props.xfer_cb = ipa_gsi_irq_rx_notify_cb; + if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(ep->client)) + gsi_channel_props.xfer_cb = ipa_dma_gsi_irq_rx_notify_cb; + result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl, + &ep->gsi_chan_hdl); + if (result != GSI_STATUS_SUCCESS) + goto fail_alloc_channel; + + memset(&ch_scratch, 0, sizeof(ch_scratch)); + ch_scratch.gpi.max_outstanding_tre = gsi_ep_info->ipa_if_tlv * + GSI_CHAN_RE_SIZE_16B; + ch_scratch.gpi.outstanding_threshold = 2 * GSI_CHAN_RE_SIZE_16B; + result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("failed to write scratch %d\n", result); + goto fail_write_channel_scratch; + } + + result = gsi_start_channel(ep->gsi_chan_hdl); + if (result != GSI_STATUS_SUCCESS) + goto fail_start_channel; + if (ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) + gsi_config_channel_mode(ep->gsi_chan_hdl, + GSI_CHAN_MODE_POLL); + return 0; + +fail_start_channel: +fail_write_channel_scratch: + if (gsi_dealloc_channel(ep->gsi_chan_hdl) + != GSI_STATUS_SUCCESS) { + IPAERR("Failed to dealloc GSI chan.\n"); + WARN_ON(1); + } +fail_alloc_channel: + dma_free_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len, + gsi_channel_props.ring_base_vaddr, dma_addr); +fail_alloc_channel_ring: +fail_get_gsi_ep_info: + if (ep->gsi_evt_ring_hdl != ~0) { + gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); + ep->gsi_evt_ring_hdl = ~0; + } +fail_alloc_evt_ring: + if (gsi_evt_ring_props.ring_base_vaddr) + dma_free_coherent(ipa3_ctx->pdev, gsi_evt_ring_props.ring_len, + gsi_evt_ring_props.ring_base_vaddr, evt_dma_addr); + IPAERR("Return with err: %d\n", result); + return result; +} + +static int ipa_populate_tag_field(struct ipa3_desc *desc, + struct ipa3_tx_pkt_wrapper *tx_pkt, + struct ipahal_imm_cmd_pyld **tag_pyld_ret) +{ + struct ipahal_imm_cmd_pyld *tag_pyld; + struct ipahal_imm_cmd_ip_packet_tag_status tag_cmd = {0}; + + /* populate tag field only if it is NULL */ + if (desc->pyld == NULL) { + tag_cmd.tag = pointer_to_tag_wa(tx_pkt); + tag_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &tag_cmd, true); + if (unlikely(!tag_pyld)) { + IPAERR("Failed to construct ip_packet_tag_status\n"); + return -EFAULT; + } + /* + * This is for 32-bit pointer, will need special + * handling if 64-bit pointer is used + */ + IPADBG_LOW("tx_pkt sent in tag: 0x%pK\n", tx_pkt); + desc->pyld = tag_pyld->data; + desc->opcode = tag_pyld->opcode; + desc->len = tag_pyld->len; + desc->user1 = tag_pyld; + desc->type = IPA_IMM_CMD_DESC; + desc->callback = ipa3_tag_destroy_imm; + + *tag_pyld_ret = tag_pyld; + } + return 0; +} + +static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys, + struct ipa_mem_buffer *mem_info) +{ + int ret; + struct gsi_chan_xfer_notify xfer_notify; + struct ipa3_rx_pkt_wrapper *rx_pkt; + + if (sys->ep->bytes_xfered_valid) { + mem_info->phys_base = sys->ep->phys_base; + mem_info->size = (u32)sys->ep->bytes_xfered; + sys->ep->bytes_xfered_valid = false; + return GSI_STATUS_SUCCESS; + } + + ret = gsi_poll_channel(sys->ep->gsi_chan_hdl, + &xfer_notify); + if (ret == GSI_STATUS_POLL_EMPTY) + return ret; + else if (ret != GSI_STATUS_SUCCESS) { + IPAERR("Poll channel err: %d\n", ret); + return ret; + } + + rx_pkt = (struct ipa3_rx_pkt_wrapper *) + xfer_notify.xfer_user_data; + mem_info->phys_base = rx_pkt->data.dma_addr; + mem_info->size = xfer_notify.bytes_xfered; + + return ret; +} + +/** + * ipa3_rx_poll() - Poll the rx packets from IPA HW. This + * function is exectued in the softirq context + * + * if input budget is zero, the driver switches back to + * interrupt mode. + * + * return number of polled packets, on error 0(zero) + */ +int ipa3_rx_poll(u32 clnt_hdl, int weight) +{ + struct ipa3_ep_context *ep; + int ret; + int cnt = 0; + struct ipa_mem_buffer mem_info = {0}; + static int total_cnt; + struct ipa_active_client_logging_info log; + + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI"); + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm 0x%x\n", clnt_hdl); + return cnt; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + while (cnt < weight && + atomic_read(&ep->sys->curr_polling_state)) { + + atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1); + ret = ipa_poll_gsi_pkt(ep->sys, &mem_info); + if (ret) + break; + + ipa3_wq_rx_common(ep->sys, mem_info.size); + cnt += IPA_WAN_AGGR_PKT_CNT; + total_cnt++; + + if (ep->sys->len == 0 || total_cnt >= ep->sys->rx_pool_sz) { + total_cnt = 0; + cnt = cnt-1; + break; + } + }; + + if (cnt < weight) { + ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0); + ipa3_rx_switch_to_intr_mode(ep->sys); + ipa3_dec_client_disable_clks_no_block(&log); + } + + return cnt; +} + +static unsigned long tag_to_pointer_wa(uint64_t tag) +{ + return 0xFFFF000000000000 | (unsigned long) tag; +} + +static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt) +{ + u16 temp; + /* Add the check but it might have throughput issue */ + if (ipa3_is_msm_device()) { + temp = (u16) (~((unsigned long) tx_pkt & + 0xFFFF000000000000) >> 48); + if (temp) { + IPAERR("The 16 prefix is not all 1s (%pK)\n", + tx_pkt); + /* + * We need all addresses starting at 0xFFFF to + * pass it to HW. + */ + BUG(); + } + } + return (unsigned long)tx_pkt & 0x0000FFFFFFFFFFFF; +} + +/** + * ipa_gsi_ch20_wa() - software workaround for IPA GSI channel 20 + * + * A hardware limitation requires to avoid using GSI physical channel 20. + * This function allocates GSI physical channel 20 and holds it to prevent + * others to use it. + * + * Return codes: 0 on success, negative on failure + */ +int ipa_gsi_ch20_wa(void) +{ + struct gsi_chan_props gsi_channel_props; + dma_addr_t dma_addr; + int result; + int i; + unsigned long chan_hdl[IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC]; + unsigned long chan_hdl_to_keep; + + + memset(&gsi_channel_props, 0, sizeof(gsi_channel_props)); + gsi_channel_props.prot = GSI_CHAN_PROT_GPI; + gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI; + gsi_channel_props.evt_ring_hdl = ~0; + gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B; + gsi_channel_props.ring_len = 4 * gsi_channel_props.re_size; + gsi_channel_props.ring_base_vaddr = + dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len, + &dma_addr, 0); + gsi_channel_props.ring_base_addr = dma_addr; + gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE; + gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG; + gsi_channel_props.low_weight = 1; + gsi_channel_props.err_cb = ipa_gsi_chan_err_cb; + gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb; + + /* first allocate channels up to channel 20 */ + for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) { + gsi_channel_props.ch_id = i; + result = gsi_alloc_channel(&gsi_channel_props, + ipa3_ctx->gsi_dev_hdl, + &chan_hdl[i]); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("failed to alloc channel %d err %d\n", + i, result); + return result; + } + } + + /* allocate channel 20 */ + gsi_channel_props.ch_id = IPA_GSI_CH_20_WA_VIRT_CHAN; + result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl, + &chan_hdl_to_keep); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("failed to alloc channel %d err %d\n", + i, result); + return result; + } + + /* release all other channels */ + for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) { + result = gsi_dealloc_channel(chan_hdl[i]); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("failed to dealloc channel %d err %d\n", + i, result); + return result; + } + } + + /* DMA memory shall not be freed as it is used by channel 20 */ + return 0; +} + +/** + * ipa_adjust_ra_buff_base_sz() + * + * Return value: the largest power of two which is smaller + * than the input value + */ +static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit) +{ + aggr_byte_limit += IPA_MTU; + aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT; + aggr_byte_limit--; + aggr_byte_limit |= aggr_byte_limit >> 1; + aggr_byte_limit |= aggr_byte_limit >> 2; + aggr_byte_limit |= aggr_byte_limit >> 4; + aggr_byte_limit |= aggr_byte_limit >> 8; + aggr_byte_limit |= aggr_byte_limit >> 16; + aggr_byte_limit++; + return aggr_byte_limit >> 1; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c new file mode 100644 index 000000000000..5de5278a63fa --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c @@ -0,0 +1,1626 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "ipa_i.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_fltrt.h" + +#define IPA_FLT_STATUS_OF_ADD_FAILED (-1) +#define IPA_FLT_STATUS_OF_DEL_FAILED (-1) +#define IPA_FLT_STATUS_OF_MDFY_FAILED (-1) + +#define IPA_FLT_GET_RULE_TYPE(__entry) \ + ( \ + ((__entry)->rule.hashable) ? \ + (IPA_RULE_HASHABLE):(IPA_RULE_NON_HASHABLE) \ + ) + +/** + * ipa3_generate_flt_hw_rule() - generates the filtering hardware rule + * @ip: the ip address family type + * @entry: filtering entry + * @buf: output buffer, buf == NULL means + * caller wants to know the size of the rule as seen + * by HW so they did not pass a valid buffer, we will use a + * scratch buffer instead. + * With this scheme we are going to + * generate the rule twice, once to know size using scratch + * buffer and second to write the rule to the actual caller + * supplied buffer which is of required size + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip, + struct ipa3_flt_entry *entry, u8 *buf) +{ + struct ipahal_flt_rule_gen_params gen_params; + int res = 0; + + memset(&gen_params, 0, sizeof(gen_params)); + + gen_params.ipt = ip; + if (entry->rt_tbl) + gen_params.rt_tbl_idx = entry->rt_tbl->idx; + else + gen_params.rt_tbl_idx = entry->rule.rt_tbl_idx; + + gen_params.priority = entry->prio; + gen_params.id = entry->rule_id; + gen_params.rule = (const struct ipa_flt_rule *)&entry->rule; + + res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf); + if (res) + IPAERR("failed to generate flt h/w rule\n"); + + return 0; +} + +static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt) +{ + struct ipa3_flt_tbl *tbl; + int i; + + IPADBG_LOW("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt); + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa_is_ep_support_flt(i)) + continue; + + tbl = &ipa3_ctx->flt_tbl[i][ip]; + if (tbl->prev_mem[rlt].phys_base) { + IPADBG_LOW("reaping flt tbl (prev) pipe=%d\n", i); + ipahal_free_dma_mem(&tbl->prev_mem[rlt]); + } + + if (list_empty(&tbl->head_flt_rule_list)) { + if (tbl->curr_mem[rlt].phys_base) { + IPADBG_LOW("reaping flt tbl (curr) pipe=%d\n", + i); + ipahal_free_dma_mem(&tbl->curr_mem[rlt]); + } + } + } +} + +/** + * ipa_prep_flt_tbl_for_cmt() - preparing the flt table for commit + * assign priorities to the rules, calculate their sizes and calculate + * the overall table size + * @ip: the ip address family type + * @tbl: the flt tbl to be prepared + * @pipe_idx: the ep pipe appropriate for the given tbl + * + * Return: 0 on success, negative on failure + */ +static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip, + struct ipa3_flt_tbl *tbl, int pipe_idx) +{ + struct ipa3_flt_entry *entry; + int prio_i; + int max_prio; + u32 hdr_width; + + tbl->sz[IPA_RULE_HASHABLE] = 0; + tbl->sz[IPA_RULE_NON_HASHABLE] = 0; + + max_prio = ipahal_get_rule_max_priority(); + + prio_i = max_prio; + list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { + + if (entry->rule.max_prio) { + entry->prio = max_prio; + } else { + if (ipahal_rule_decrease_priority(&prio_i)) { + IPAERR("cannot decrease rule priority - %d\n", + prio_i); + return -EPERM; + } + entry->prio = prio_i; + } + + if (ipa3_generate_flt_hw_rule(ip, entry, NULL)) { + IPAERR("failed to calculate HW FLT rule size\n"); + return -EPERM; + } + IPADBG_LOW("pipe %d rule_id(handle) %u hw_len %d priority %u\n", + pipe_idx, entry->rule_id, entry->hw_len, entry->prio); + + if (entry->rule.hashable) + tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len; + else + tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len; + } + + if ((tbl->sz[IPA_RULE_HASHABLE] + + tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) { + IPADBG_LOW("flt tbl pipe %d is with zero total size\n", + pipe_idx); + return 0; + } + + hdr_width = ipahal_get_hw_tbl_hdr_width(); + + /* for the header word */ + if (tbl->sz[IPA_RULE_HASHABLE]) + tbl->sz[IPA_RULE_HASHABLE] += hdr_width; + if (tbl->sz[IPA_RULE_NON_HASHABLE]) + tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width; + + IPADBG_LOW("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx, + tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]); + + return 0; +} + +/** + * ipa_translate_flt_tbl_to_hw_fmt() - translate the flt driver structures + * (rules and tables) to HW format and fill it in the given buffers + * @ip: the ip address family type + * @rlt: the type of the rules to translate (hashable or non-hashable) + * @base: the rules body buffer to be filled + * @hdr: the rules header (addresses/offsets) buffer to be filled + * @body_ofst: the offset of the rules body from the rules header at + * ipa sram + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip, + enum ipa_rule_type rlt, u8 *base, u8 *hdr, u32 body_ofst) +{ + u64 offset; + u8 *body_i; + int res; + struct ipa3_flt_entry *entry; + u8 *tbl_mem_buf; + struct ipa_mem_buffer tbl_mem; + struct ipa3_flt_tbl *tbl; + int i; + int hdr_idx = 0; + + body_i = base; + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa_is_ep_support_flt(i)) + continue; + tbl = &ipa3_ctx->flt_tbl[i][ip]; + if (tbl->sz[rlt] == 0) { + hdr_idx++; + continue; + } + if (tbl->in_sys[rlt]) { + /* only body (no header) */ + tbl_mem.size = tbl->sz[rlt] - + ipahal_get_hw_tbl_hdr_width(); + if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) { + IPAERR("fail to alloc sys tbl of size %d\n", + tbl_mem.size); + goto err; + } + + if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base, + hdr, hdr_idx, true)) { + IPAERR("fail to wrt sys tbl addr to hdr\n"); + goto hdr_update_fail; + } + + tbl_mem_buf = tbl_mem.base; + + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_flt_rule_list, + link) { + if (IPA_FLT_GET_RULE_TYPE(entry) != rlt) + continue; + res = ipa3_generate_flt_hw_rule( + ip, entry, tbl_mem_buf); + if (res) { + IPAERR("failed to gen HW FLT rule\n"); + goto hdr_update_fail; + } + tbl_mem_buf += entry->hw_len; + } + + if (tbl->curr_mem[rlt].phys_base) { + WARN_ON(tbl->prev_mem[rlt].phys_base); + tbl->prev_mem[rlt] = tbl->curr_mem[rlt]; + } + tbl->curr_mem[rlt] = tbl_mem; + } else { + offset = body_i - base + body_ofst; + + /* update the hdr at the right index */ + if (ipahal_fltrt_write_addr_to_hdr(offset, hdr, + hdr_idx, true)) { + IPAERR("fail to wrt lcl tbl ofst to hdr\n"); + goto hdr_update_fail; + } + + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_flt_rule_list, + link) { + if (IPA_FLT_GET_RULE_TYPE(entry) != rlt) + continue; + res = ipa3_generate_flt_hw_rule( + ip, entry, body_i); + if (res) { + IPAERR("failed to gen HW FLT rule\n"); + goto err; + } + body_i += entry->hw_len; + } + + /** + * advance body_i to next table alignment as local + * tables are order back-to-back + */ + body_i += ipahal_get_lcl_tbl_addr_alignment(); + body_i = (u8 *)((long)body_i & + ~ipahal_get_lcl_tbl_addr_alignment()); + } + hdr_idx++; + } + + return 0; + +hdr_update_fail: + ipahal_free_dma_mem(&tbl_mem); +err: + return -EPERM; +} + +/** + * ipa_generate_flt_hw_tbl_img() - generates the flt hw tbls. + * headers and bodies are being created into buffers that will be filled into + * the local memory (sram) + * @ip: the ip address family type + * @alloc_params: In and Out parameters for the allocations of the buffers + * 4 buffers: hdr and bdy, each hashable and non-hashable + * + * Return: 0 on success, negative on failure + */ +static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip, + struct ipahal_fltrt_alloc_imgs_params *alloc_params) +{ + u32 hash_bdy_start_ofst, nhash_bdy_start_ofst; + int rc = 0; + + if (ip == IPA_IP_v4) { + nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_nhash_ofst) - + IPA_MEM_PART(v4_flt_nhash_ofst); + hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_hash_ofst) - + IPA_MEM_PART(v4_flt_hash_ofst); + } else { + nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_nhash_ofst) - + IPA_MEM_PART(v6_flt_nhash_ofst); + hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_hash_ofst) - + IPA_MEM_PART(v6_flt_hash_ofst); + } + + if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) { + IPAERR("fail to allocate FLT HW TBL images. IP %d\n", ip); + rc = -ENOMEM; + goto allocate_failed; + } + + if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE, + alloc_params->hash_bdy.base, alloc_params->hash_hdr.base, + hash_bdy_start_ofst)) { + IPAERR("fail to translate hashable flt tbls to hw format\n"); + rc = -EPERM; + goto translate_fail; + } + if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE, + alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base, + nhash_bdy_start_ofst)) { + IPAERR("fail to translate non-hash flt tbls to hw format\n"); + rc = -EPERM; + goto translate_fail; + } + + return rc; + +translate_fail: + if (alloc_params->hash_hdr.size) + ipahal_free_dma_mem(&alloc_params->hash_hdr); + ipahal_free_dma_mem(&alloc_params->nhash_hdr); + if (alloc_params->hash_bdy.size) + ipahal_free_dma_mem(&alloc_params->hash_bdy); + if (alloc_params->nhash_bdy.size) + ipahal_free_dma_mem(&alloc_params->nhash_bdy); +allocate_failed: + return rc; +} + +/** + * ipa_flt_valid_lcl_tbl_size() - validate if the space allocated for flt + * tbl bodies at the sram is enough for the commit + * @ipt: the ip address family type + * @rlt: the rule type (hashable or non-hashable) + * + * Return: true if enough space available or false in other cases + */ +static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt, + enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy) +{ + u16 avail; + + if (!bdy) { + IPAERR("Bad parameters, bdy = NULL\n"); + return false; + } + + if (ipt == IPA_IP_v4) + avail = (rlt == IPA_RULE_HASHABLE) ? + IPA_MEM_PART(apps_v4_flt_hash_size) : + IPA_MEM_PART(apps_v4_flt_nhash_size); + else + avail = (rlt == IPA_RULE_HASHABLE) ? + IPA_MEM_PART(apps_v6_flt_hash_size) : + IPA_MEM_PART(apps_v6_flt_nhash_size); + + if (bdy->size <= avail) + return true; + + IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n", + bdy->size, avail, ipt, rlt); + return false; +} + +/** + * ipa_flt_alloc_cmd_buffers() - alloc descriptors and imm cmds + * payload pointers buffers for headers and bodies of flt structure + * as well as place for flush imm. + * @ipt: the ip address family type + * @desc: [OUT] descriptor buffer + * @cmd: [OUT] imm commands payload pointers buffer + * + * Return: 0 on success, negative on failure + */ +static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip, + struct ipa3_desc **desc, struct ipahal_imm_cmd_pyld ***cmd_pyld) +{ + u16 entries; + + /* +3: 2 for bodies (hashable and non-hashable) and 1 for flushing */ + entries = (ipa3_ctx->ep_flt_num) * 2 + 3; + + *desc = kcalloc(entries, sizeof(**desc), GFP_ATOMIC); + if (*desc == NULL) { + IPAERR("fail to alloc desc blob ip %d\n", ip); + goto fail_desc_alloc; + } + + *cmd_pyld = kcalloc(entries, sizeof(**cmd_pyld), GFP_ATOMIC); + if (*cmd_pyld == NULL) { + IPAERR("fail to alloc cmd pyld blob ip %d\n", ip); + goto fail_cmd_alloc; + } + + return 0; + +fail_cmd_alloc: + kfree(*desc); +fail_desc_alloc: + return -ENOMEM; +} + +/** + * ipa_flt_skip_pipe_config() - skip ep flt configuration or not? + * will skip according to pre-configuration or modem pipes + * @pipe: the EP pipe index + * + * Return: true if to skip, false otherwize + */ +static bool ipa_flt_skip_pipe_config(int pipe) +{ + if (ipa_is_modem_pipe(pipe)) { + IPADBG_LOW("skip %d - modem owned pipe\n", pipe); + return true; + } + + if (ipa3_ctx->skip_ep_cfg_shadow[pipe]) { + IPADBG_LOW("skip %d\n", pipe); + return true; + } + + if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD) == pipe + && ipa3_ctx->modem_cfg_emb_pipe_flt)) { + IPADBG_LOW("skip %d\n", pipe); + return true; + } + + return false; +} + +/** + * __ipa_commit_flt_v3() - commit flt tables to the hw + * commit the headers and the bodies if are local with internal cache flushing. + * The headers (and local bodies) will first be created into dma buffers and + * then written via IC to the SRAM + * @ipt: the ip address family type + * + * Return: 0 on success, negative on failure + */ +int __ipa_commit_flt_v3(enum ipa_ip_type ip) +{ + struct ipahal_fltrt_alloc_imgs_params alloc_params; + int rc = 0; + struct ipa3_desc *desc; + struct ipahal_imm_cmd_register_write reg_write_cmd = {0}; + struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0}; + struct ipahal_imm_cmd_pyld **cmd_pyld; + int num_cmd = 0; + int i; + int hdr_idx; + u32 lcl_hash_hdr, lcl_nhash_hdr; + u32 lcl_hash_bdy, lcl_nhash_bdy; + bool lcl_hash, lcl_nhash; + struct ipahal_reg_fltrt_hash_flush flush; + struct ipahal_reg_valmask valmask; + u32 tbl_hdr_width; + struct ipa3_flt_tbl *tbl; + + tbl_hdr_width = ipahal_get_hw_tbl_hdr_width(); + memset(&alloc_params, 0, sizeof(alloc_params)); + alloc_params.ipt = ip; + alloc_params.tbls_num = ipa3_ctx->ep_flt_num; + + if (ip == IPA_IP_v4) { + lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_hash_ofst) + + tbl_hdr_width; /* to skip the bitmap */ + lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_nhash_ofst) + + tbl_hdr_width; /* to skip the bitmap */ + lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v4_flt_hash_ofst); + lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v4_flt_nhash_ofst); + lcl_hash = ipa3_ctx->ip4_flt_tbl_hash_lcl; + lcl_nhash = ipa3_ctx->ip4_flt_tbl_nhash_lcl; + } else { + lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_hash_ofst) + + tbl_hdr_width; /* to skip the bitmap */ + lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_nhash_ofst) + + tbl_hdr_width; /* to skip the bitmap */ + lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v6_flt_hash_ofst); + lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v6_flt_nhash_ofst); + lcl_hash = ipa3_ctx->ip6_flt_tbl_hash_lcl; + lcl_nhash = ipa3_ctx->ip6_flt_tbl_nhash_lcl; + } + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa_is_ep_support_flt(i)) + continue; + tbl = &ipa3_ctx->flt_tbl[i][ip]; + if (ipa_prep_flt_tbl_for_cmt(ip, tbl, i)) { + rc = -EPERM; + goto prep_failed; + } + if (!tbl->in_sys[IPA_RULE_HASHABLE] && + tbl->sz[IPA_RULE_HASHABLE]) { + alloc_params.num_lcl_hash_tbls++; + alloc_params.total_sz_lcl_hash_tbls += + tbl->sz[IPA_RULE_HASHABLE]; + alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width; + + } + if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] && + tbl->sz[IPA_RULE_NON_HASHABLE]) { + alloc_params.num_lcl_nhash_tbls++; + alloc_params.total_sz_lcl_nhash_tbls += + tbl->sz[IPA_RULE_NON_HASHABLE]; + alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width; + } + } + + if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) { + IPAERR("fail to generate FLT HW TBL image. IP %d\n", ip); + rc = -EFAULT; + goto prep_failed; + } + + if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE, + &alloc_params.hash_bdy)) { + rc = -EFAULT; + goto fail_size_valid; + } + if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE, + &alloc_params.nhash_bdy)) { + rc = -EFAULT; + goto fail_size_valid; + } + + if (ipa_flt_alloc_cmd_buffers(ip, &desc, &cmd_pyld)) { + rc = -ENOMEM; + goto fail_size_valid; + } + + /* flushing ipa internal hashable flt rules cache */ + memset(&flush, 0, sizeof(flush)); + if (ip == IPA_IP_v4) + flush.v4_flt = true; + else + flush.v6_flt = true; + ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask); + reg_write_cmd.skip_pipeline_clear = false; + reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH); + reg_write_cmd.value = valmask.val; + reg_write_cmd.value_mask = valmask.mask; + cmd_pyld[0] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, ®_write_cmd, false); + if (!cmd_pyld[0]) { + IPAERR("fail construct register_write imm cmd: IP %d\n", ip); + rc = -EFAULT; + goto fail_reg_write_construct; + } + desc[0].opcode = cmd_pyld[0]->opcode; + desc[0].pyld = cmd_pyld[0]->data; + desc[0].len = cmd_pyld[0]->len; + desc[0].type = IPA_IMM_CMD_DESC; + num_cmd++; + + hdr_idx = 0; + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa_is_ep_support_flt(i)) { + IPADBG_LOW("skip %d - not filtering pipe\n", i); + continue; + } + + if (ipa_flt_skip_pipe_config(i)) { + hdr_idx++; + continue; + } + + IPADBG_LOW("Prepare imm cmd for hdr at index %d for pipe %d\n", + hdr_idx, i); + + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = tbl_hdr_width; + mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base + + hdr_idx * tbl_hdr_width; + mem_cmd.local_addr = lcl_nhash_hdr + + hdr_idx * tbl_hdr_width; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct dma_shared_mem cmd: IP = %d\n", + ip); + goto fail_imm_cmd_construct; + } + desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode; + desc[num_cmd].pyld = cmd_pyld[num_cmd]->data; + desc[num_cmd].len = cmd_pyld[num_cmd]->len; + desc[num_cmd++].type = IPA_IMM_CMD_DESC; + + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = tbl_hdr_width; + mem_cmd.system_addr = alloc_params.hash_hdr.phys_base + + hdr_idx * tbl_hdr_width; + mem_cmd.local_addr = lcl_hash_hdr + + hdr_idx * tbl_hdr_width; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct dma_shared_mem cmd: IP = %d\n", + ip); + goto fail_imm_cmd_construct; + } + desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode; + desc[num_cmd].pyld = cmd_pyld[num_cmd]->data; + desc[num_cmd].len = cmd_pyld[num_cmd]->len; + desc[num_cmd++].type = IPA_IMM_CMD_DESC; + + hdr_idx++; + } + + if (lcl_nhash) { + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = alloc_params.nhash_bdy.size; + mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base; + mem_cmd.local_addr = lcl_nhash_bdy; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct dma_shared_mem cmd: IP = %d\n", + ip); + goto fail_imm_cmd_construct; + } + desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode; + desc[num_cmd].pyld = cmd_pyld[num_cmd]->data; + desc[num_cmd].len = cmd_pyld[num_cmd]->len; + desc[num_cmd++].type = IPA_IMM_CMD_DESC; + } + if (lcl_hash) { + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = alloc_params.hash_bdy.size; + mem_cmd.system_addr = alloc_params.hash_bdy.phys_base; + mem_cmd.local_addr = lcl_hash_bdy; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct dma_shared_mem cmd: IP = %d\n", + ip); + goto fail_imm_cmd_construct; + } + desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode; + desc[num_cmd].pyld = cmd_pyld[num_cmd]->data; + desc[num_cmd].len = cmd_pyld[num_cmd]->len; + desc[num_cmd++].type = IPA_IMM_CMD_DESC; + } + + if (ipa3_send_cmd(num_cmd, desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + goto fail_imm_cmd_construct; + } + + IPADBG_LOW("Hashable HEAD\n"); + IPA_DUMP_BUFF(alloc_params.hash_hdr.base, + alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size); + + IPADBG_LOW("Non-Hashable HEAD\n"); + IPA_DUMP_BUFF(alloc_params.nhash_hdr.base, + alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size); + + if (alloc_params.hash_bdy.size) { + IPADBG_LOW("Hashable BODY\n"); + IPA_DUMP_BUFF(alloc_params.hash_bdy.base, + alloc_params.hash_bdy.phys_base, + alloc_params.hash_bdy.size); + } + + if (alloc_params.nhash_bdy.size) { + IPADBG_LOW("Non-Hashable BODY\n"); + IPA_DUMP_BUFF(alloc_params.nhash_bdy.base, + alloc_params.nhash_bdy.phys_base, + alloc_params.nhash_bdy.size); + } + + __ipa_reap_sys_flt_tbls(ip, IPA_RULE_HASHABLE); + __ipa_reap_sys_flt_tbls(ip, IPA_RULE_NON_HASHABLE); + +fail_imm_cmd_construct: + for (i = 0 ; i < num_cmd ; i++) + ipahal_destroy_imm_cmd(cmd_pyld[i]); +fail_reg_write_construct: + kfree(desc); + kfree(cmd_pyld); +fail_size_valid: + if (alloc_params.hash_hdr.size) + ipahal_free_dma_mem(&alloc_params.hash_hdr); + ipahal_free_dma_mem(&alloc_params.nhash_hdr); + if (alloc_params.hash_bdy.size) + ipahal_free_dma_mem(&alloc_params.hash_bdy); + if (alloc_params.nhash_bdy.size) + ipahal_free_dma_mem(&alloc_params.nhash_bdy); +prep_failed: + return rc; +} + +static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule, + struct ipa3_rt_tbl **rt_tbl, enum ipa_ip_type ip) +{ + if (rule->action != IPA_PASS_TO_EXCEPTION) { + if (!rule->eq_attrib_type) { + if (!rule->rt_tbl_hdl) { + IPAERR("invalid RT tbl\n"); + goto error; + } + + *rt_tbl = ipa3_id_find(rule->rt_tbl_hdl); + if (*rt_tbl == NULL) { + IPAERR("RT tbl not found\n"); + goto error; + } + + if ((*rt_tbl)->cookie != IPA_RT_TBL_COOKIE) { + IPAERR("RT table cookie is invalid\n"); + goto error; + } + } else { + if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ? + IPA_MEM_PART(v4_modem_rt_index_hi) : + IPA_MEM_PART(v6_modem_rt_index_hi))) { + IPAERR("invalid RT tbl\n"); + goto error; + } + } + } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + if (rule->pdn_idx) { + if (rule->action == IPA_PASS_TO_EXCEPTION || + rule->action == IPA_PASS_TO_ROUTING) { + IPAERR( + "PDN index should be 0 when action is not pass to NAT\n"); + goto error; + } else { + if (rule->pdn_idx >= IPA_MAX_PDN_NUM) { + IPAERR("PDN index %d is too large\n", + rule->pdn_idx); + goto error; + } + } + } + } + + if (rule->rule_id) { + if (!(rule->rule_id & ipahal_get_rule_id_hi_bit())) { + IPAERR("invalid rule_id provided 0x%x\n" + "rule_id with bit 0x%x are auto generated\n", + rule->rule_id, ipahal_get_rule_id_hi_bit()); + goto error; + } + } + + return 0; + +error: + return -EPERM; +} + +static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry, + const struct ipa_flt_rule *rule, struct ipa3_rt_tbl *rt_tbl, + struct ipa3_flt_tbl *tbl) +{ + int id; + + *entry = kmem_cache_zalloc(ipa3_ctx->flt_rule_cache, GFP_KERNEL); + if (!*entry) + goto error; + INIT_LIST_HEAD(&((*entry)->link)); + (*entry)->rule = *rule; + (*entry)->cookie = IPA_FLT_COOKIE; + (*entry)->rt_tbl = rt_tbl; + (*entry)->tbl = tbl; + if (rule->rule_id) { + id = rule->rule_id; + } else { + id = ipa3_alloc_rule_id(tbl->rule_ids); + if (WARN(id < 0, "failed to allocate rule id\n")) + goto rule_id_fail; + } + (*entry)->rule_id = id; + + return 0; + +rule_id_fail: + kmem_cache_free(ipa3_ctx->flt_rule_cache, *entry); +error: + return -EPERM; +} + +static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl, + struct ipa3_flt_entry *entry, u32 *rule_hdl) +{ + int id; + + tbl->rule_cnt++; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt++; + id = ipa3_id_alloc(entry); + if (WARN(id < 0, "failed to add to tree\n")) + goto ipa_insert_failed; + *rule_hdl = id; + entry->id = id; + IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt); + + return 0; +ipa_insert_failed: + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + tbl->rule_cnt--; + return -EPERM; +} + +static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip, + const struct ipa_flt_rule *rule, u8 add_rear, + u32 *rule_hdl) +{ + struct ipa3_flt_entry *entry; + struct ipa3_rt_tbl *rt_tbl = NULL; + + if (__ipa_validate_flt_rule(rule, &rt_tbl, ip)) + goto error; + + if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl)) + goto error; + + if (add_rear) { + if (tbl->sticky_rear) + list_add_tail(&entry->link, + tbl->head_flt_rule_list.prev); + else + list_add_tail(&entry->link, &tbl->head_flt_rule_list); + } else { + list_add(&entry->link, &tbl->head_flt_rule_list); + } + + if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl)) + goto ipa_insert_failed; + + return 0; +ipa_insert_failed: + list_del(&entry->link); + /* if rule id was allocated from idr, remove it */ + if (!(entry->rule_id & ipahal_get_rule_id_hi_bit())) + idr_remove(entry->tbl->rule_ids, entry->rule_id); + kmem_cache_free(ipa3_ctx->flt_rule_cache, entry); + +error: + return -EPERM; +} + +static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl, + const struct ipa_flt_rule *rule, + u32 *rule_hdl, + enum ipa_ip_type ip, + struct ipa3_flt_entry **add_after_entry) +{ + struct ipa3_flt_entry *entry; + struct ipa3_rt_tbl *rt_tbl = NULL; + + if (!*add_after_entry) + goto error; + + if (rule == NULL || rule_hdl == NULL) { + IPAERR_RL("bad parms rule=%pK rule_hdl=%pK\n", rule, + rule_hdl); + goto error; + } + + if (__ipa_validate_flt_rule(rule, &rt_tbl, ip)) + goto error; + + if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl)) + goto error; + + list_add(&entry->link, &((*add_after_entry)->link)); + + if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl)) + goto ipa_insert_failed; + + /* + * prepare for next insertion + */ + *add_after_entry = entry; + + return 0; + +ipa_insert_failed: + list_del(&entry->link); + /* if rule id was allocated from idr, remove it */ + if (!(entry->rule_id & ipahal_get_rule_id_hi_bit())) + idr_remove(entry->tbl->rule_ids, entry->rule_id); + kmem_cache_free(ipa3_ctx->flt_rule_cache, entry); + +error: + *add_after_entry = NULL; + return -EPERM; +} + +static int __ipa_del_flt_rule(u32 rule_hdl) +{ + struct ipa3_flt_entry *entry; + int id; + + entry = ipa3_id_find(rule_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + return -EINVAL; + } + + if (entry->cookie != IPA_FLT_COOKIE) { + IPAERR_RL("bad params\n"); + return -EINVAL; + } + id = entry->id; + + list_del(&entry->link); + entry->tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + IPADBG("del flt rule rule_cnt=%d rule_id=%d\n", + entry->tbl->rule_cnt, entry->rule_id); + entry->cookie = 0; + /* if rule id was allocated from idr, remove it */ + if (!(entry->rule_id & ipahal_get_rule_id_hi_bit())) + idr_remove(entry->tbl->rule_ids, entry->rule_id); + + kmem_cache_free(ipa3_ctx->flt_rule_cache, entry); + + /* remove the handle from the database */ + ipa3_id_remove(id); + + return 0; +} + +static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule, + enum ipa_ip_type ip) +{ + struct ipa3_flt_entry *entry; + struct ipa3_rt_tbl *rt_tbl = NULL; + + entry = ipa3_id_find(frule->rule_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + goto error; + } + + if (entry->cookie != IPA_FLT_COOKIE) { + IPAERR_RL("bad params\n"); + goto error; + } + + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + + if (frule->rule.action != IPA_PASS_TO_EXCEPTION) { + if (!frule->rule.eq_attrib_type) { + if (!frule->rule.rt_tbl_hdl) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + + rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl); + if (rt_tbl == NULL) { + IPAERR_RL("RT tbl not found\n"); + goto error; + } + + if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) { + IPAERR_RL("RT table cookie is invalid\n"); + goto error; + } + } else { + if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ? + IPA_MEM_PART(v4_modem_rt_index_hi) : + IPA_MEM_PART(v6_modem_rt_index_hi))) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + } + } + + entry->rule = frule->rule; + entry->rt_tbl = rt_tbl; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt++; + entry->hw_len = 0; + entry->prio = 0; + + return 0; + +error: + return -EPERM; +} + +static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx) +{ + *ipa_ep_idx = ipa3_get_ep_mapping(ep); + if (*ipa_ep_idx < 0) { + IPAERR("ep not valid ep=%d\n", ep); + return -EINVAL; + } + if (ipa3_ctx->ep[*ipa_ep_idx].valid == 0) + IPADBG("ep not connected ep_idx=%d\n", *ipa_ep_idx); + + if (!ipa_is_ep_support_flt(*ipa_ep_idx)) { + IPAERR("ep do not support filtering ep=%d\n", ep); + return -EINVAL; + } + + return 0; +} + +static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep, + const struct ipa_flt_rule *rule, u8 add_rear, + u32 *rule_hdl) +{ + struct ipa3_flt_tbl *tbl; + int ipa_ep_idx; + + if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) { + IPAERR_RL("bad parms rule=%pK rule_hdl=%pK ep=%d\n", rule, + rule_hdl, ep); + + return -EINVAL; + } + + if (__ipa_add_flt_get_ep_idx(ep, &ipa_ep_idx)) + return -EINVAL; + + tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip]; + IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep); + + return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl); +} + +/** + * ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally + * commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) +{ + int i; + int result; + + if (rules == NULL || rules->num_rules == 0 || + rules->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < rules->num_rules; i++) { + if (!rules->global) + result = __ipa_add_ep_flt_rule(rules->ip, rules->ep, + &rules->rules[i].rule, + rules->rules[i].at_rear, + &rules->rules[i].flt_rule_hdl); + else + result = -1; + + if (result) { + IPAERR_RL("failed to add flt rule %d\n", i); + rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED; + } else { + rules->rules[i].status = 0; + } + } + + if (rules->global) { + IPAERR_RL("no support for global filter rules\n"); + result = -EPERM; + goto bail; + } + + if (rules->commit) + if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_add_flt_rule_after() - Add the specified filtering rules to SW after + * the rule which its handle is given and optionally commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules) +{ + int i; + int result; + struct ipa3_flt_tbl *tbl; + int ipa_ep_idx; + struct ipa3_flt_entry *entry; + + if (rules == NULL || rules->num_rules == 0 || + rules->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + if (rules->ep >= IPA_CLIENT_MAX) { + IPAERR_RL("bad parms ep=%d\n", rules->ep); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + + if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) { + result = -EINVAL; + goto bail; + } + + tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip]; + + entry = ipa3_id_find(rules->add_after_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + result = -EINVAL; + goto bail; + } + + if (entry->tbl != tbl) { + IPAERR_RL("given entry does not match the table\n"); + result = -EINVAL; + goto bail; + } + + if (tbl->sticky_rear) + if (&entry->link == tbl->head_flt_rule_list.prev) { + IPAERR_RL("cannot add rule at end of a sticky table"); + result = -EINVAL; + goto bail; + } + + IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n", + rules->ip, rules->ep, rules->add_after_hdl); + + /* + * we add all rules one after the other, if one insertion fails, it cuts + * the chain (all following will receive fail status) following calls to + * __ipa_add_flt_rule_after will fail (entry == NULL) + */ + + for (i = 0; i < rules->num_rules; i++) { + result = __ipa_add_flt_rule_after(tbl, + &rules->rules[i].rule, + &rules->rules[i].flt_rule_hdl, + rules->ip, + &entry); + + if (result) { + IPAERR_RL("failed to add flt rule %d\n", i); + rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED; + } else { + rules->rules[i].status = 0; + } + } + + if (rules->commit) + if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) { + IPAERR("failed to commit flt rules\n"); + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_del_flt_rule() - Remove the specified filtering rules from SW and + * optionally commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls) +{ + int i; + int result; + + if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) { + IPAERR_RL("failed to del flt rule %i\n", i); + hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) + if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_mdfy_flt_rule() - Modify the specified filtering rules in SW and + * optionally commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls) +{ + int i; + int result; + + if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < hdls->num_rules; i++) { + if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) { + IPAERR_RL("failed to mdfy flt rule %i\n", i); + hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED; + } else { + hdls->rules[i].status = 0; + } + } + + if (hdls->commit) + if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + + +/** + * ipa3_commit_flt() - Commit the current SW filtering table of specified type + * to IPA HW + * @ip: [in] the family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_commit_flt(enum ipa_ip_type ip) +{ + int result; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + + if (ipa3_ctx->ctrl->ipa3_commit_flt(ip)) { + result = -EPERM; + goto bail; + } + result = 0; + +bail: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_reset_flt() - Reset the current SW filtering table of specified type + * (does not commit to HW) + * @ip: [in] the family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_reset_flt(enum ipa_ip_type ip) +{ + struct ipa3_flt_tbl *tbl; + struct ipa3_flt_entry *entry; + struct ipa3_flt_entry *next; + int i; + int id; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if (!ipa_is_ep_support_flt(i)) + continue; + + tbl = &ipa3_ctx->flt_tbl[i][ip]; + list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, + link) { + if (ipa3_id_find(entry->id) == NULL) { + WARN_ON(1); + mutex_unlock(&ipa3_ctx->lock); + return -EFAULT; + } + list_del(&entry->link); + entry->tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + /* if rule id was allocated from idr, remove it */ + if (!(entry->rule_id & ipahal_get_rule_id_hi_bit())) + idr_remove(entry->tbl->rule_ids, + entry->rule_id); + entry->cookie = 0; + id = entry->id; + kmem_cache_free(ipa3_ctx->flt_rule_cache, entry); + + /* remove the handle from the database */ + ipa3_id_remove(id); + } + } + mutex_unlock(&ipa3_ctx->lock); + + return 0; +} + +void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx) +{ + struct ipa3_flt_tbl *tbl; + struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx]; + struct ipa_flt_rule rule; + + if (!ipa_is_ep_support_flt(ipa_ep_idx)) { + IPADBG("cannot add flt rules to non filtering pipe num %d\n", + ipa_ep_idx); + return; + } + + memset(&rule, 0, sizeof(rule)); + + mutex_lock(&ipa3_ctx->lock); + tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4]; + rule.action = IPA_PASS_TO_EXCEPTION; + __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true, + &ep->dflt_flt4_rule_hdl); + ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4); + tbl->sticky_rear = true; + + tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6]; + rule.action = IPA_PASS_TO_EXCEPTION; + __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true, + &ep->dflt_flt6_rule_hdl); + ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6); + tbl->sticky_rear = true; + mutex_unlock(&ipa3_ctx->lock); +} + +void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx) +{ + struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx]; + struct ipa3_flt_tbl *tbl; + + mutex_lock(&ipa3_ctx->lock); + if (ep->dflt_flt4_rule_hdl) { + tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4]; + __ipa_del_flt_rule(ep->dflt_flt4_rule_hdl); + ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4); + /* Reset the sticky flag. */ + tbl->sticky_rear = false; + ep->dflt_flt4_rule_hdl = 0; + } + if (ep->dflt_flt6_rule_hdl) { + tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6]; + __ipa_del_flt_rule(ep->dflt_flt6_rule_hdl); + ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6); + /* Reset the sticky flag. */ + tbl->sticky_rear = false; + ep->dflt_flt6_rule_hdl = 0; + } + mutex_unlock(&ipa3_ctx->lock); +} + +/** + * ipa3_set_flt_tuple_mask() - Sets the flt tuple masking for the given pipe + * Pipe must be for AP EP (not modem) and support filtering + * updates the the filtering masking values without changing the rt ones. + * + * @pipe_idx: filter pipe index to configure the tuple masking + * @tuple: the tuple members masking + * Returns: 0 on success, negative on failure + * + */ +int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple) +{ + struct ipahal_reg_fltrt_hash_tuple fltrt_tuple; + + if (!tuple) { + IPAERR("bad tuple\n"); + return -EINVAL; + } + + if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) { + IPAERR("bad pipe index!\n"); + return -EINVAL; + } + + if (!ipa_is_ep_support_flt(pipe_idx)) { + IPAERR("pipe %d not filtering pipe\n", pipe_idx); + return -EINVAL; + } + + if (ipa_is_modem_pipe(pipe_idx)) { + IPAERR("modem pipe tuple is not configured by AP\n"); + return -EINVAL; + } + + ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n, + pipe_idx, &fltrt_tuple); + fltrt_tuple.flt = *tuple; + ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n, + pipe_idx, &fltrt_tuple); + + return 0; +} + +/** + * ipa3_flt_read_tbl_from_hw() -Read filtering table from IPA HW + * @pipe_idx: IPA endpoint index + * @ip_type: IPv4 or IPv6 table + * @hashable: hashable or non-hashable table + * @entry: array to fill the table entries + * @num_entry: number of entries in entry array. set by the caller to indicate + * entry array size. Then set by this function as an output parameter to + * indicate the number of entries in the array + * + * This function reads the filtering table from IPA SRAM and prepares an array + * of entries. This function is mainly used for debugging purposes. + * + * If empty table or Modem Apps table, zero entries will be returned. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, enum ipa_ip_type ip_type, + bool hashable, struct ipahal_flt_rule_entry entry[], int *num_entry) +{ + void *ipa_sram_mmio; + u64 hdr_base_ofst; + int tbl_entry_idx; + int i; + int res = 0; + u64 tbl_addr; + bool is_sys; + u8 *rule_addr; + struct ipa_mem_buffer *sys_tbl_mem; + int rule_idx; + struct ipa3_flt_tbl *flt_tbl_ptr; + + IPADBG("pipe_idx=%d ip=%d hashable=%d entry=0x%pK num_entry=0x%pK\n", + pipe_idx, ip_type, hashable, entry, num_entry); + + if (pipe_idx >= ipa3_ctx->ipa_num_pipes || ip_type >= IPA_IP_MAX || + !entry || !num_entry) { + IPAERR("Invalid params\n"); + return -EFAULT; + } + + if (!ipa_is_ep_support_flt(pipe_idx)) { + IPAERR("pipe %d does not support filtering\n", pipe_idx); + return -EINVAL; + } + + flt_tbl_ptr = &ipa3_ctx->flt_tbl[pipe_idx][ip_type]; + /* map IPA SRAM */ + ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, + ipa3_ctx->smem_restricted_bytes / 4), + ipa3_ctx->smem_sz); + if (!ipa_sram_mmio) { + IPAERR("fail to ioremap IPA SRAM\n"); + return -ENOMEM; + } + + memset(entry, 0, sizeof(*entry) * (*num_entry)); + if (hashable) { + if (ip_type == IPA_IP_v4) + hdr_base_ofst = + IPA_MEM_PART(v4_flt_hash_ofst); + else + hdr_base_ofst = + IPA_MEM_PART(v6_flt_hash_ofst); + } else { + if (ip_type == IPA_IP_v4) + hdr_base_ofst = + IPA_MEM_PART(v4_flt_nhash_ofst); + else + hdr_base_ofst = + IPA_MEM_PART(v6_flt_nhash_ofst); + } + + /* calculate the index of the tbl entry */ + tbl_entry_idx = 1; /* skip the bitmap */ + for (i = 0; i < pipe_idx; i++) + if (ipa3_ctx->ep_flt_bitmap & (1 << i)) + tbl_entry_idx++; + + IPADBG("hdr_base_ofst=0x%llx tbl_entry_idx=%d\n", + hdr_base_ofst, tbl_entry_idx); + + res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst, + tbl_entry_idx, &tbl_addr, &is_sys); + if (res) { + IPAERR("failed to read table address from header structure\n"); + goto bail; + } + IPADBG("flt tbl ep=%d: tbl_addr=0x%llx is_sys=%d\n", + pipe_idx, tbl_addr, is_sys); + if (!tbl_addr) { + IPAERR("invalid flt tbl addr\n"); + res = -EFAULT; + goto bail; + } + + /* for tables resides in DDR access it from the virtual memory */ + if (is_sys) { + sys_tbl_mem = + &flt_tbl_ptr->curr_mem[hashable ? IPA_RULE_HASHABLE : + IPA_RULE_NON_HASHABLE]; + if (sys_tbl_mem->phys_base && + sys_tbl_mem->phys_base != tbl_addr) { + IPAERR("mismatch addr: parsed=%llx sw=%pad\n", + tbl_addr, &sys_tbl_mem->phys_base); + } + if (sys_tbl_mem->phys_base) + rule_addr = sys_tbl_mem->base; + else + rule_addr = NULL; + } else { + rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr; + } + + IPADBG("First rule addr 0x%pK\n", rule_addr); + + if (!rule_addr) { + /* Modem table in system memory or empty table */ + *num_entry = 0; + goto bail; + } + + rule_idx = 0; + while (rule_idx < *num_entry) { + res = ipahal_flt_parse_hw_rule(rule_addr, &entry[rule_idx]); + if (res) { + IPAERR("failed parsing flt rule\n"); + goto bail; + } + + IPADBG("rule_size=%d\n", entry[rule_idx].rule_size); + if (!entry[rule_idx].rule_size) + break; + + rule_addr += entry[rule_idx].rule_size; + rule_idx++; + } + *num_entry = rule_idx; +bail: + iounmap(ipa_sram_mmio); + return 0; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c new file mode 100644 index 000000000000..b143aedfff0f --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c @@ -0,0 +1,1252 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "ipa_i.h" +#include "ipahal/ipahal.h" + +static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 64}; +static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64}; + +#define HDR_TYPE_IS_VALID(type) \ + ((type) >= 0 && (type) < IPA_HDR_L2_MAX) + +#define HDR_PROC_TYPE_IS_VALID(type) \ + ((type) >= 0 && (type) < IPA_HDR_PROC_MAX) + +/** + * ipa3_generate_hdr_hw_tbl() - generates the headers table + * @mem: [out] buffer to put the header table + * + * Returns: 0 on success, negative on failure + */ +static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem) +{ + struct ipa3_hdr_entry *entry; + + mem->size = ipa3_ctx->hdr_tbl.end; + + if (mem->size == 0) { + IPAERR("hdr tbl empty\n"); + return -EPERM; + } + IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end); + + mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem->size); + return -ENOMEM; + } + + memset(mem->base, 0, mem->size); + list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list, + link) { + if (entry->is_hdr_proc_ctx) + continue; + IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len, + entry->offset_entry->offset); + ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset, + entry->hdr, entry->hdr_len); + } + + return 0; +} + +static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem, + u32 hdr_base_addr) +{ + struct ipa3_hdr_proc_ctx_entry *entry; + int ret; + + list_for_each_entry(entry, + &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list, + link) { + IPADBG_LOW("processing type %d ofst=%d\n", + entry->type, entry->offset_entry->offset); + ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base, + entry->offset_entry->offset, + entry->hdr->hdr_len, + entry->hdr->is_hdr_proc_ctx, + entry->hdr->phys_base, + hdr_base_addr, + entry->hdr->offset_entry, + entry->l2tp_params); + if (ret) + return ret; + } + + return 0; +} + +/** + * ipa3_generate_hdr_proc_ctx_hw_tbl() - + * generates the headers processing context table. + * @mem: [out] buffer to put the processing context table + * @aligned_mem: [out] actual processing context table (with alignment). + * Processing context table needs to be 8 Bytes aligned. + * + * Returns: 0 on success, negative on failure + */ +static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr, + struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem) +{ + u32 hdr_base_addr; + + mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4; + + /* make sure table is aligned */ + mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE; + + IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end); + + mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem->size); + return -ENOMEM; + } + + aligned_mem->phys_base = + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base); + aligned_mem->base = mem->base + + (aligned_mem->phys_base - mem->phys_base); + aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE; + memset(aligned_mem->base, 0, aligned_mem->size); + hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) : + hdr_sys_addr; + return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr); +} + +/** + * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW + * + * Returns: 0 on success, negative on failure + */ +int __ipa_commit_hdr_v3_0(void) +{ + struct ipa3_desc desc[2]; + struct ipa_mem_buffer hdr_mem; + struct ipa_mem_buffer ctx_mem; + struct ipa_mem_buffer aligned_ctx_mem; + struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0}; + struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0}; + struct ipahal_imm_cmd_register_write reg_write_cmd = {0}; + struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0}; + struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL; + struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL; + int rc = -EFAULT; + u32 proc_ctx_size; + u32 proc_ctx_ofst; + u32 proc_ctx_size_ddr; + + memset(desc, 0, 2 * sizeof(struct ipa3_desc)); + + if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) { + IPAERR("fail to generate HDR HW TBL\n"); + goto end; + } + + if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem, + &aligned_ctx_mem)) { + IPAERR("fail to generate HDR PROC CTX HW TBL\n"); + goto end; + } + + if (ipa3_ctx->hdr_tbl_lcl) { + if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) { + IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size, + IPA_MEM_PART(apps_hdr_size)); + goto end; + } else { + dma_cmd_hdr.is_read = false; /* write operation */ + dma_cmd_hdr.skip_pipeline_clear = false; + dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR; + dma_cmd_hdr.system_addr = hdr_mem.phys_base; + dma_cmd_hdr.size = hdr_mem.size; + dma_cmd_hdr.local_addr = + ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_hdr_ofst); + hdr_cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, + &dma_cmd_hdr, false); + if (!hdr_cmd_pyld) { + IPAERR("fail construct dma_shared_mem cmd\n"); + goto end; + } + desc[0].opcode = hdr_cmd_pyld->opcode; + desc[0].pyld = hdr_cmd_pyld->data; + desc[0].len = hdr_cmd_pyld->len; + } + } else { + if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) { + IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size, + IPA_MEM_PART(apps_hdr_size_ddr)); + goto end; + } else { + hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base; + hdr_cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_HDR_INIT_SYSTEM, + &hdr_init_cmd, false); + if (!hdr_cmd_pyld) { + IPAERR("fail construct hdr_init_system cmd\n"); + goto end; + } + desc[0].opcode = hdr_cmd_pyld->opcode; + desc[0].pyld = hdr_cmd_pyld->data; + desc[0].len = hdr_cmd_pyld->len; + } + } + desc[0].type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size); + + proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size); + proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst); + if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) { + if (aligned_ctx_mem.size > proc_ctx_size) { + IPAERR("tbl too big needed %d avail %d\n", + aligned_ctx_mem.size, + proc_ctx_size); + goto end; + } else { + dma_cmd_ctx.is_read = false; /* Write operation */ + dma_cmd_ctx.skip_pipeline_clear = false; + dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR; + dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base; + dma_cmd_ctx.size = aligned_ctx_mem.size; + dma_cmd_ctx.local_addr = + ipa3_ctx->smem_restricted_bytes + + proc_ctx_ofst; + ctx_cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, + &dma_cmd_ctx, false); + if (!ctx_cmd_pyld) { + IPAERR("fail construct dma_shared_mem cmd\n"); + goto end; + } + desc[1].opcode = ctx_cmd_pyld->opcode; + desc[1].pyld = ctx_cmd_pyld->data; + desc[1].len = ctx_cmd_pyld->len; + } + } else { + proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr); + if (aligned_ctx_mem.size > proc_ctx_size_ddr) { + IPAERR("tbl too big, needed %d avail %d\n", + aligned_ctx_mem.size, + proc_ctx_size_ddr); + goto end; + } else { + reg_write_cmd.skip_pipeline_clear = false; + reg_write_cmd.pipeline_clear_options = + IPAHAL_HPS_CLEAR; + reg_write_cmd.offset = + ipahal_get_reg_ofst( + IPA_SYS_PKT_PROC_CNTXT_BASE); + reg_write_cmd.value = aligned_ctx_mem.phys_base; + reg_write_cmd.value_mask = + ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1); + ctx_cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, + ®_write_cmd, false); + if (!ctx_cmd_pyld) { + IPAERR("fail construct register_write cmd\n"); + goto end; + } + desc[1].opcode = ctx_cmd_pyld->opcode; + desc[1].pyld = ctx_cmd_pyld->data; + desc[1].len = ctx_cmd_pyld->len; + } + } + desc[1].type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size); + + if (ipa3_send_cmd(2, desc)) + IPAERR("fail to send immediate command\n"); + else + rc = 0; + + if (ipa3_ctx->hdr_tbl_lcl) { + dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base, + hdr_mem.phys_base); + } else { + if (!rc) { + if (ipa3_ctx->hdr_mem.phys_base) + dma_free_coherent(ipa3_ctx->pdev, + ipa3_ctx->hdr_mem.size, + ipa3_ctx->hdr_mem.base, + ipa3_ctx->hdr_mem.phys_base); + ipa3_ctx->hdr_mem = hdr_mem; + } + } + + if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) { + dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base, + ctx_mem.phys_base); + } else { + if (!rc) { + if (ipa3_ctx->hdr_proc_ctx_mem.phys_base) + dma_free_coherent(ipa3_ctx->pdev, + ipa3_ctx->hdr_proc_ctx_mem.size, + ipa3_ctx->hdr_proc_ctx_mem.base, + ipa3_ctx->hdr_proc_ctx_mem.phys_base); + ipa3_ctx->hdr_proc_ctx_mem = ctx_mem; + } + } + +end: + if (ctx_cmd_pyld) + ipahal_destroy_imm_cmd(ctx_cmd_pyld); + + if (hdr_cmd_pyld) + ipahal_destroy_imm_cmd(hdr_cmd_pyld); + + return rc; +} + +static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, + bool add_ref_hdr) +{ + struct ipa3_hdr_entry *hdr_entry; + struct ipa3_hdr_proc_ctx_entry *entry; + struct ipa3_hdr_proc_ctx_offset_entry *offset; + u32 bin; + struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl; + int id; + int needed_len; + int mem_size; + + IPADBG_LOW("Add processing type %d hdr_hdl %d\n", + proc_ctx->type, proc_ctx->hdr_hdl); + + if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) { + IPAERR_RL("invalid processing type %d\n", proc_ctx->type); + return -EINVAL; + } + + hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl); + if (!hdr_entry) { + IPAERR_RL("hdr_hdl is invalid\n"); + return -EINVAL; + } + if (hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie); + WARN_ON(1); + return -EINVAL; + } + IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n", + hdr_entry->name, hdr_entry->is_hdr_proc_ctx); + + entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL); + if (!entry) { + IPAERR("failed to alloc proc_ctx object\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&entry->link); + + entry->type = proc_ctx->type; + entry->hdr = hdr_entry; + entry->l2tp_params = proc_ctx->l2tp_params; + if (add_ref_hdr) + hdr_entry->ref_cnt++; + entry->cookie = IPA_PROC_HDR_COOKIE; + + needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type); + + if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) { + bin = IPA_HDR_PROC_CTX_BIN0; + } else if (needed_len <= + ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) { + bin = IPA_HDR_PROC_CTX_BIN1; + } else { + IPAERR_RL("unexpected needed len %d\n", needed_len); + WARN_ON(1); + goto bad_len; + } + + mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ? + IPA_MEM_PART(apps_hdr_proc_ctx_size) : + IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr); + if (list_empty(&htbl->head_free_offset_list[bin])) { + if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) { + IPAERR_RL("hdr proc ctx table overflow\n"); + goto bad_len; + } + + offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache, + GFP_KERNEL); + if (!offset) { + IPAERR("failed to alloc offset object\n"); + goto bad_len; + } + INIT_LIST_HEAD(&offset->link); + /* + * for a first item grow, set the bin and offset which are set + * in stone + */ + offset->offset = htbl->end; + offset->bin = bin; + htbl->end += ipa_hdr_proc_ctx_bin_sz[bin]; + list_add(&offset->link, + &htbl->head_offset_list[bin]); + } else { + /* get the first free slot */ + offset = + list_first_entry(&htbl->head_free_offset_list[bin], + struct ipa3_hdr_proc_ctx_offset_entry, link); + list_move(&offset->link, &htbl->head_offset_list[bin]); + } + + entry->offset_entry = offset; + list_add(&entry->link, &htbl->head_proc_ctx_entry_list); + htbl->proc_ctx_cnt++; + IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len, + htbl->proc_ctx_cnt, offset->offset); + + id = ipa3_id_alloc(entry); + if (id < 0) { + IPAERR("failed to alloc id\n"); + WARN_ON(1); + goto ipa_insert_failed; + } + entry->id = id; + proc_ctx->proc_ctx_hdl = id; + entry->ref_cnt++; + + return 0; + +ipa_insert_failed: + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + list_del(&entry->link); + htbl->proc_ctx_cnt--; + +bad_len: + if (add_ref_hdr) + hdr_entry->ref_cnt--; + entry->cookie = 0; + kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry); + return -EPERM; +} + + +static int __ipa_add_hdr(struct ipa_hdr_add *hdr) +{ + struct ipa3_hdr_entry *entry; + struct ipa_hdr_offset_entry *offset = NULL; + u32 bin; + struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl; + int id; + int mem_size; + + if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) { + IPAERR_RL("bad param\n"); + goto error; + } + + if (!HDR_TYPE_IS_VALID(hdr->type)) { + IPAERR_RL("invalid hdr type %d\n", hdr->type); + goto error; + } + + entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL); + if (!entry) + goto error; + + INIT_LIST_HEAD(&entry->link); + + memcpy(entry->hdr, hdr->hdr, hdr->hdr_len); + entry->hdr_len = hdr->hdr_len; + strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX); + entry->is_partial = hdr->is_partial; + entry->type = hdr->type; + entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid; + entry->eth2_ofst = hdr->eth2_ofst; + entry->cookie = IPA_HDR_COOKIE; + + if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0]) + bin = IPA_HDR_BIN0; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1]) + bin = IPA_HDR_BIN1; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2]) + bin = IPA_HDR_BIN2; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3]) + bin = IPA_HDR_BIN3; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4]) + bin = IPA_HDR_BIN4; + else { + IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len); + goto bad_hdr_len; + } + + mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) : + IPA_MEM_PART(apps_hdr_size_ddr); + + if (list_empty(&htbl->head_free_offset_list[bin])) { + /* if header does not fit to table, place it in DDR */ + if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) { + entry->is_hdr_proc_ctx = true; + entry->phys_base = dma_map_single(ipa3_ctx->pdev, + entry->hdr, + entry->hdr_len, + DMA_TO_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, + entry->phys_base)) { + IPAERR("dma_map_single failure for entry\n"); + goto fail_dma_mapping; + } + } else { + entry->is_hdr_proc_ctx = false; + offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache, + GFP_KERNEL); + if (!offset) { + IPAERR("failed to alloc hdr offset object\n"); + goto bad_hdr_len; + } + INIT_LIST_HEAD(&offset->link); + /* + * for a first item grow, set the bin and offset which + * are set in stone + */ + offset->offset = htbl->end; + offset->bin = bin; + htbl->end += ipa_hdr_bin_sz[bin]; + list_add(&offset->link, + &htbl->head_offset_list[bin]); + entry->offset_entry = offset; + } + } else { + entry->is_hdr_proc_ctx = false; + /* get the first free slot */ + offset = list_first_entry(&htbl->head_free_offset_list[bin], + struct ipa_hdr_offset_entry, link); + list_move(&offset->link, &htbl->head_offset_list[bin]); + entry->offset_entry = offset; + } + + list_add(&entry->link, &htbl->head_hdr_entry_list); + htbl->hdr_cnt++; + if (entry->is_hdr_proc_ctx) + IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n", + hdr->hdr_len, + htbl->hdr_cnt, + &entry->phys_base); + else + IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n", + hdr->hdr_len, + htbl->hdr_cnt, + entry->offset_entry->offset); + + id = ipa3_id_alloc(entry); + if (id < 0) { + IPAERR("failed to alloc id\n"); + WARN_ON(1); + goto ipa_insert_failed; + } + entry->id = id; + hdr->hdr_hdl = id; + entry->ref_cnt++; + + if (entry->is_hdr_proc_ctx) { + struct ipa_hdr_proc_ctx_add proc_ctx; + + IPADBG("adding processing context for header %s\n", hdr->name); + proc_ctx.type = IPA_HDR_PROC_NONE; + proc_ctx.hdr_hdl = id; + if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) { + IPAERR("failed to add hdr proc ctx\n"); + goto fail_add_proc_ctx; + } + entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl); + } + + return 0; + +fail_add_proc_ctx: + entry->ref_cnt--; + hdr->hdr_hdl = 0; + ipa3_id_remove(id); +ipa_insert_failed: + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa3_ctx->pdev, entry->phys_base, + entry->hdr_len, DMA_TO_DEVICE); + } else { + if (offset) + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + } + htbl->hdr_cnt--; + list_del(&entry->link); + +fail_dma_mapping: + entry->is_hdr_proc_ctx = false; + +bad_hdr_len: + entry->cookie = 0; + kmem_cache_free(ipa3_ctx->hdr_cache, entry); +error: + return -EPERM; +} + +static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl, + bool release_hdr, bool by_user) +{ + struct ipa3_hdr_proc_ctx_entry *entry; + struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl; + + entry = ipa3_id_find(proc_ctx_hdl); + if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + IPADBG("del proc ctx cnt=%d ofst=%d\n", + htbl->proc_ctx_cnt, entry->offset_entry->offset); + + if (by_user && entry->user_deleted) { + IPAERR_RL("proc_ctx already deleted by user\n"); + return -EINVAL; + } + + if (by_user) + entry->user_deleted = true; + + if (--entry->ref_cnt) { + IPADBG("proc_ctx_hdl %x ref_cnt %d\n", + proc_ctx_hdl, entry->ref_cnt); + return 0; + } + + if (release_hdr) + __ipa3_del_hdr(entry->hdr->id, false); + + /* move the offset entry to appropriate free list */ + list_move(&entry->offset_entry->link, + &htbl->head_free_offset_list[entry->offset_entry->bin]); + list_del(&entry->link); + htbl->proc_ctx_cnt--; + entry->cookie = 0; + kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry); + + /* remove the handle from the database */ + ipa3_id_remove(proc_ctx_hdl); + + return 0; +} + + +int __ipa3_del_hdr(u32 hdr_hdl, bool by_user) +{ + struct ipa3_hdr_entry *entry; + struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl; + + entry = ipa3_id_find(hdr_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + return -EINVAL; + } + + if (entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + if (entry->is_hdr_proc_ctx) + IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n", + entry->hdr_len, htbl->hdr_cnt, &entry->phys_base); + else + IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n", + entry->hdr_len, htbl->hdr_cnt, + entry->offset_entry->offset); + + if (by_user && entry->user_deleted) { + IPAERR_RL("proc_ctx already deleted by user\n"); + return -EINVAL; + } + + if (by_user) + entry->user_deleted = true; + + if (--entry->ref_cnt) { + IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt); + return 0; + } + + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa3_ctx->pdev, + entry->phys_base, + entry->hdr_len, + DMA_TO_DEVICE); + __ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false, false); + } else { + /* move the offset entry to appropriate free list */ + list_move(&entry->offset_entry->link, + &htbl->head_free_offset_list[entry->offset_entry->bin]); + } + list_del(&entry->link); + htbl->hdr_cnt--; + entry->cookie = 0; + kmem_cache_free(ipa3_ctx->hdr_cache, entry); + + /* remove the handle from the database */ + ipa3_id_remove(hdr_hdl); + + return 0; +} + +/** + * ipa3_add_hdr() - add the specified headers to SW and optionally commit them + * to IPA HW + * @hdrs: [inout] set of headers to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs) +{ + int i; + int result = -EFAULT; + + if (hdrs == NULL || hdrs->num_hdrs == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + IPADBG("adding %d headers to IPA driver internal data struct\n", + hdrs->num_hdrs); + for (i = 0; i < hdrs->num_hdrs; i++) { + if (__ipa_add_hdr(&hdrs->hdr[i])) { + IPAERR_RL("failed to add hdr %d\n", i); + hdrs->hdr[i].status = -1; + } else { + hdrs->hdr[i].status = 0; + } + } + + if (hdrs->commit) { + IPADBG("committing all headers to IPA core"); + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return result; +} + +/** + * ipa3_del_hdr_by_user() - Remove the specified headers + * from SW and optionally commit them to IPA HW + * @hdls: [inout] set of headers to delete + * @by_user: Operation requested by user? + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user) +{ + int i; + int result = -EFAULT; + + if (hdls == NULL || hdls->num_hdls == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) { + IPAERR_RL("failed to del hdr %i\n", i); + hdls->hdl[i].status = -1; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) { + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return result; +} + +/** + * ipa3_del_hdr() - Remove the specified headers from SW + * and optionally commit them to IPA HW + * @hdls: [inout] set of headers to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls) +{ + return ipa3_del_hdr_by_user(hdls, false); +} + +/** + * ipa3_add_hdr_proc_ctx() - add the specified headers to SW + * and optionally commit them to IPA HW + * @proc_ctxs: [inout] set of processing context headers to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs) +{ + int i; + int result = -EFAULT; + + if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + IPADBG("adding %d header processing contextes to IPA driver\n", + proc_ctxs->num_proc_ctxs); + for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) { + if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) { + IPAERR_RL("failed to add hdr pric ctx %d\n", i); + proc_ctxs->proc_ctx[i].status = -1; + } else { + proc_ctxs->proc_ctx[i].status = 0; + } + } + + if (proc_ctxs->commit) { + IPADBG("committing all headers to IPA core"); + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return result; +} + +/** + * ipa3_del_hdr_proc_ctx_by_user() - + * Remove the specified processing context headers from SW and + * optionally commit them to IPA HW. + * @hdls: [inout] set of processing context headers to delete + * @by_user: Operation requested by user? + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls, + bool by_user) +{ + int i; + int result; + + if (hdls == NULL || hdls->num_hdls == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) { + IPAERR_RL("failed to del hdr %i\n", i); + hdls->hdl[i].status = -1; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) { + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return result; +} + +/** + * ipa3_del_hdr_proc_ctx() - + * Remove the specified processing context headers from SW and + * optionally commit them to IPA HW. + * @hdls: [inout] set of processing context headers to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls) +{ + return ipa3_del_hdr_proc_ctx_by_user(hdls, false); +} + +/** + * ipa3_commit_hdr() - commit to IPA HW the current header table in SW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_commit_hdr(void) +{ + int result = -EFAULT; + + /* + * issue a commit on the routing module since routing rules point to + * header table entries + */ + if (ipa3_commit_rt(IPA_IP_v4)) + return -EPERM; + if (ipa3_commit_rt(IPA_IP_v6)) + return -EPERM; + + mutex_lock(&ipa3_ctx->lock); + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return result; +} + +/** + * ipa3_reset_hdr() - reset the current header table in SW (does not commit to + * HW) + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_reset_hdr(void) +{ + struct ipa3_hdr_entry *entry; + struct ipa3_hdr_entry *next; + struct ipa3_hdr_proc_ctx_entry *ctx_entry; + struct ipa3_hdr_proc_ctx_entry *ctx_next; + struct ipa_hdr_offset_entry *off_entry; + struct ipa_hdr_offset_entry *off_next; + struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry; + struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next; + int i; + + /* + * issue a reset on the routing module since routing rules point to + * header table entries + */ + if (ipa3_reset_rt(IPA_IP_v4)) + IPAERR("fail to reset v4 rt\n"); + if (ipa3_reset_rt(IPA_IP_v6)) + IPAERR("fail to reset v4 rt\n"); + + mutex_lock(&ipa3_ctx->lock); + IPADBG("reset hdr\n"); + list_for_each_entry_safe(entry, next, + &ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) { + + /* do not remove the default header */ + if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) { + if (entry->is_hdr_proc_ctx) { + IPAERR("default header is proc ctx\n"); + mutex_unlock(&ipa3_ctx->lock); + WARN_ON(1); + return -EFAULT; + } + continue; + } + + if (ipa3_id_find(entry->id) == NULL) { + mutex_unlock(&ipa3_ctx->lock); + WARN_ON(1); + return -EFAULT; + } + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa3_ctx->pdev, + entry->phys_base, + entry->hdr_len, + DMA_TO_DEVICE); + entry->proc_ctx = NULL; + } + list_del(&entry->link); + entry->ref_cnt = 0; + entry->cookie = 0; + + /* remove the handle from the database */ + ipa3_id_remove(entry->id); + kmem_cache_free(ipa3_ctx->hdr_cache, entry); + + } + for (i = 0; i < IPA_HDR_BIN_MAX; i++) { + list_for_each_entry_safe(off_entry, off_next, + &ipa3_ctx->hdr_tbl.head_offset_list[i], + link) { + + /* + * do not remove the default exception header which is + * at offset 0 + */ + if (off_entry->offset == 0) + continue; + + list_del(&off_entry->link); + kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry); + } + list_for_each_entry_safe(off_entry, off_next, + &ipa3_ctx->hdr_tbl.head_free_offset_list[i], + link) { + list_del(&off_entry->link); + kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry); + } + } + /* there is one header of size 8 */ + ipa3_ctx->hdr_tbl.end = 8; + ipa3_ctx->hdr_tbl.hdr_cnt = 1; + + IPADBG("reset hdr proc ctx\n"); + list_for_each_entry_safe( + ctx_entry, + ctx_next, + &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list, + link) { + + if (ipa3_id_find(ctx_entry->id) == NULL) { + mutex_unlock(&ipa3_ctx->lock); + WARN_ON(1); + return -EFAULT; + } + list_del(&ctx_entry->link); + ctx_entry->ref_cnt = 0; + ctx_entry->cookie = 0; + + /* remove the handle from the database */ + ipa3_id_remove(ctx_entry->id); + kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, ctx_entry); + + } + for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) { + list_for_each_entry_safe(ctx_off_entry, ctx_off_next, + &ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i], + link) { + + list_del(&ctx_off_entry->link); + kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache, + ctx_off_entry); + } + list_for_each_entry_safe(ctx_off_entry, ctx_off_next, + &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i], + link) { + list_del(&ctx_off_entry->link); + kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache, + ctx_off_entry); + } + } + ipa3_ctx->hdr_proc_ctx_tbl.end = 0; + ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0; + mutex_unlock(&ipa3_ctx->lock); + + return 0; +} + +static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name) +{ + struct ipa3_hdr_entry *entry; + + if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) { + IPAERR_RL("Header name too long: %s\n", name); + return NULL; + } + + list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list, + link) { + if (!strcmp(name, entry->name)) + return entry; + } + + return NULL; +} + +/** + * ipa3_get_hdr() - Lookup the specified header resource + * @lookup: [inout] header to lookup and its handle + * + * lookup the specified header resource and return handle if it exists + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + * Caller should call ipa3_put_hdr later if this function succeeds + */ +int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup) +{ + struct ipa3_hdr_entry *entry; + int result = -1; + + if (lookup == NULL) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + mutex_lock(&ipa3_ctx->lock); + entry = __ipa_find_hdr(lookup->name); + if (entry) { + lookup->hdl = entry->id; + result = 0; + } + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * __ipa3_release_hdr() - drop reference to header and cause + * deletion if reference count permits + * @hdr_hdl: [in] handle of header to be released + * + * Returns: 0 on success, negative on failure + */ +int __ipa3_release_hdr(u32 hdr_hdl) +{ + int result = 0; + + if (__ipa3_del_hdr(hdr_hdl, false)) { + IPADBG("fail to del hdr %x\n", hdr_hdl); + result = -EFAULT; + goto bail; + } + + /* commit for put */ + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + IPAERR("fail to commit hdr\n"); + result = -EFAULT; + goto bail; + } + +bail: + return result; +} + +/** + * __ipa3_release_hdr_proc_ctx() - drop reference to processing context + * and cause deletion if reference count permits + * @proc_ctx_hdl: [in] handle of processing context to be released + * + * Returns: 0 on success, negative on failure + */ +int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl) +{ + int result = 0; + + if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true, false)) { + IPADBG("fail to del hdr %x\n", proc_ctx_hdl); + result = -EFAULT; + goto bail; + } + + /* commit for put */ + if (ipa3_ctx->ctrl->ipa3_commit_hdr()) { + IPAERR("fail to commit hdr\n"); + result = -EFAULT; + goto bail; + } + +bail: + return result; +} + +/** + * ipa3_put_hdr() - Release the specified header handle + * @hdr_hdl: [in] the header handle to release + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_put_hdr(u32 hdr_hdl) +{ + struct ipa3_hdr_entry *entry; + int result = -EFAULT; + + mutex_lock(&ipa3_ctx->lock); + + entry = ipa3_id_find(hdr_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + result = -EINVAL; + goto bail; + } + + if (entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("invalid header entry\n"); + result = -EINVAL; + goto bail; + } + + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return result; +} + +/** + * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of + * it + * @copy: [inout] header to lookup and its copy + * + * lookup the specified header resource and return a copy of it (along with its + * attributes) if it exists, this would be called for partial headers + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy) +{ + struct ipa3_hdr_entry *entry; + int result = -EFAULT; + + if (copy == NULL) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + mutex_lock(&ipa3_ctx->lock); + entry = __ipa_find_hdr(copy->name); + if (entry) { + memcpy(copy->hdr, entry->hdr, entry->hdr_len); + copy->hdr_len = entry->hdr_len; + copy->type = entry->type; + copy->is_partial = entry->is_partial; + copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid; + copy->eth2_ofst = entry->eth2_ofst; + result = 0; + } + mutex_unlock(&ipa3_ctx->lock); + + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h new file mode 100644 index 000000000000..c48faa340fc0 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h @@ -0,0 +1,44 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_HW_DEFS_H +#define _IPA_HW_DEFS_H +#include + +/* This header defines various HW related data types */ + + +#define IPA_A5_MUX_HDR_EXCP_FLAG_IP BIT(7) +#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT BIT(6) +#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT BIT(5) +#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG BIT(4) +#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED BIT(3) +#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL BIT(2) + +/** + * struct ipa3_a5_mux_hdr - A5 MUX header definition + * @interface_id: interface ID + * @src_pipe_index: source pipe index + * @flags: flags + * @metadata: metadata + * + * A5 MUX header is in BE, A5 runs in LE. This struct definition + * allows A5 SW to correctly parse the header + */ +struct ipa3_a5_mux_hdr { + u16 interface_id; + u8 src_pipe_index; + u8 flags; + u32 metadata; +}; + +#endif /* _IPA_HW_DEFS_H */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c new file mode 100644 index 000000000000..65fa40b2681b --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c @@ -0,0 +1,1971 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include "ipa_i.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_hw_stats.h" + +#define IPA_CLIENT_BIT_32(client) \ + ((ipa3_get_ep_mapping(client) >= 0 && \ + ipa3_get_ep_mapping(client) < IPA_STATS_MAX_PIPE_BIT) ? \ + (1 << ipa3_get_ep_mapping(client)) : 0) + +int ipa_hw_stats_init(void) +{ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) + return 0; + + /* initialize stats here */ + ipa3_ctx->hw_stats.enabled = true; + return 0; +} + +int ipa_init_quota_stats(u32 pipe_bitmask) +{ + struct ipahal_stats_init_pyld *pyld; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipahal_imm_cmd_register_write quota_base = {0}; + struct ipahal_imm_cmd_pyld *quota_base_pyld; + struct ipahal_imm_cmd_register_write quota_mask = {0}; + struct ipahal_imm_cmd_pyld *quota_mask_pyld; + struct ipa3_desc desc[3] = { {0} }; + dma_addr_t dma_address; + int ret; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + /* reset driver's cache */ + memset(&ipa3_ctx->hw_stats.quota, 0, sizeof(ipa3_ctx->hw_stats.quota)); + ipa3_ctx->hw_stats.quota.init.enabled_bitmask = pipe_bitmask; + IPADBG_LOW("pipe_bitmask=0x%x\n", pipe_bitmask); + + pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_QUOTA, + &ipa3_ctx->hw_stats.quota.init, false); + if (!pyld) { + IPAERR("failed to generate pyld\n"); + return -EPERM; + } + + if (pyld->len > IPA_MEM_PART(stats_quota_size)) { + IPAERR("SRAM partition too small: %d needed %d\n", + IPA_MEM_PART(stats_quota_size), pyld->len); + ret = -EPERM; + goto destroy_init_pyld; + } + + dma_address = dma_map_single(ipa3_ctx->pdev, + pyld->data, + pyld->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) { + IPAERR("failed to DMA map\n"); + ret = -EPERM; + goto destroy_init_pyld; + } + + /* setting the registers and init the stats pyld are done atomically */ + quota_mask.skip_pipeline_clear = false; + quota_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + quota_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_MASK_n, + ipa3_ctx->ee); + quota_mask.value = pipe_bitmask; + quota_mask.value_mask = ~0; + quota_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + "a_mask, false); + if (!quota_mask_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto unmap; + } + desc[0].opcode = quota_mask_pyld->opcode; + desc[0].pyld = quota_mask_pyld->data; + desc[0].len = quota_mask_pyld->len; + desc[0].type = IPA_IMM_CMD_DESC; + + quota_base.skip_pipeline_clear = false; + quota_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + quota_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_BASE_n, + ipa3_ctx->ee); + quota_base.value = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_quota_ofst); + quota_base.value_mask = ~0; + quota_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + "a_base, false); + if (!quota_base_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto destroy_quota_mask; + } + desc[1].opcode = quota_base_pyld->opcode; + desc[1].pyld = quota_base_pyld->data; + desc[1].len = quota_base_pyld->len; + desc[1].type = IPA_IMM_CMD_DESC; + + cmd.is_read = false; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + cmd.size = pyld->len; + cmd.system_addr = dma_address; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_quota_ofst); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto destroy_quota_base; + } + desc[2].opcode = cmd_pyld->opcode; + desc[2].pyld = cmd_pyld->data; + desc[2].len = cmd_pyld->len; + desc[2].type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(3, desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + ret = 0; + +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +destroy_quota_base: + ipahal_destroy_imm_cmd(quota_base_pyld); +destroy_quota_mask: + ipahal_destroy_imm_cmd(quota_mask_pyld); +unmap: + dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE); +destroy_init_pyld: + ipahal_destroy_stats_init_pyld(pyld); + return ret; +} + +int ipa_get_quota_stats(struct ipa_quota_stats_all *out) +{ + int i; + int ret; + struct ipahal_stats_get_offset_quota get_offset = { { 0 } }; + struct ipahal_stats_offset offset = { 0 }; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipa_mem_buffer mem; + struct ipa3_desc desc = { 0 }; + struct ipahal_stats_quota_all *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + get_offset.init = ipa3_ctx->hw_stats.quota.init; + ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_QUOTA, &get_offset, + &offset); + if (ret) { + IPAERR("failed to get offset from hal %d\n", ret); + return ret; + } + + IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size); + + mem.size = offset.size; + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, + mem.size, + &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA memory"); + return ret; + } + + cmd.is_read = true; + cmd.clear_after_read = true; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + cmd.size = mem.size; + cmd.system_addr = mem.phys_base; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_quota_ofst) + offset.offset; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto free_dma_mem; + } + desc.opcode = cmd_pyld->opcode; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + desc.type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(1, &desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) { + ret = -ENOMEM; + goto destroy_imm; + } + + ret = ipahal_parse_stats(IPAHAL_HW_STATS_QUOTA, + &ipa3_ctx->hw_stats.quota.init, mem.base, stats); + if (ret) { + IPAERR("failed to parse stats (error %d)\n", ret); + goto free_stats; + } + + /* + * update driver cache. + * the stats were read from hardware with clear_after_read meaning + * hardware stats are 0 now + */ + for (i = 0; i < IPA_CLIENT_MAX; i++) { + int ep_idx = ipa3_get_ep_mapping(i); + + if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES) + continue; + + if (ipa3_ctx->ep[ep_idx].client != i) + continue; + + ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv4_bytes += + stats->stats[ep_idx].num_ipv4_bytes; + ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv4_pkts += + stats->stats[ep_idx].num_ipv4_pkts; + ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv6_bytes += + stats->stats[ep_idx].num_ipv6_bytes; + ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv6_pkts += + stats->stats[ep_idx].num_ipv6_pkts; + } + + /* copy results to out parameter */ + if (out) + *out = ipa3_ctx->hw_stats.quota.stats; + ret = 0; +free_stats: + kfree(stats); +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +free_dma_mem: + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + return ret; + +} + +int ipa_reset_quota_stats(enum ipa_client_type client) +{ + int ret; + struct ipa_quota_stats *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (client >= IPA_CLIENT_MAX) { + IPAERR("invalid client %d\n", client); + return -EINVAL; + } + + /* reading stats will reset them in hardware */ + ret = ipa_get_quota_stats(NULL); + if (ret) { + IPAERR("ipa_get_quota_stats failed %d\n", ret); + return ret; + } + + /* reset driver's cache */ + stats = &ipa3_ctx->hw_stats.quota.stats.client[client]; + memset(stats, 0, sizeof(*stats)); + return 0; +} + +int ipa_reset_all_quota_stats(void) +{ + int ret; + struct ipa_quota_stats_all *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + /* reading stats will reset them in hardware */ + ret = ipa_get_quota_stats(NULL); + if (ret) { + IPAERR("ipa_get_quota_stats failed %d\n", ret); + return ret; + } + + /* reset driver's cache */ + stats = &ipa3_ctx->hw_stats.quota.stats; + memset(stats, 0, sizeof(*stats)); + return 0; +} + +int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in) +{ + struct ipahal_stats_init_pyld *pyld; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipahal_imm_cmd_register_write teth_base = {0}; + struct ipahal_imm_cmd_pyld *teth_base_pyld; + struct ipahal_imm_cmd_register_write teth_mask = { 0 }; + struct ipahal_imm_cmd_pyld *teth_mask_pyld; + struct ipa3_desc desc[3] = { {0} }; + dma_addr_t dma_address; + int ret; + int i; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (!in || !in->prod_mask) { + IPAERR("invalid params\n"); + return -EINVAL; + } + + for (i = 0; i < IPA_STATS_MAX_PIPE_BIT; i++) { + if ((in->prod_mask & (1 << i)) && !in->dst_ep_mask[i]) { + IPAERR("prod %d doesn't have cons\n", i); + return -EINVAL; + } + } + IPADBG_LOW("prod_mask=0x%x\n", in->prod_mask); + + /* reset driver's cache */ + memset(&ipa3_ctx->hw_stats.teth.init, 0, + sizeof(ipa3_ctx->hw_stats.teth.init)); + for (i = 0; i < IPA_CLIENT_MAX; i++) + memset(&ipa3_ctx->hw_stats.teth.prod_stats[i], 0, + sizeof(ipa3_ctx->hw_stats.teth.prod_stats[i])); + ipa3_ctx->hw_stats.teth.init.prod_bitmask = in->prod_mask; + memcpy(ipa3_ctx->hw_stats.teth.init.cons_bitmask, in->dst_ep_mask, + sizeof(ipa3_ctx->hw_stats.teth.init.cons_bitmask)); + + + pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_TETHERING, + &ipa3_ctx->hw_stats.teth.init, false); + if (!pyld) { + IPAERR("failed to generate pyld\n"); + return -EPERM; + } + + if (pyld->len > IPA_MEM_PART(stats_tethering_size)) { + IPAERR("SRAM partition too small: %d needed %d\n", + IPA_MEM_PART(stats_tethering_size), pyld->len); + ret = -EPERM; + goto destroy_init_pyld; + } + + dma_address = dma_map_single(ipa3_ctx->pdev, + pyld->data, + pyld->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) { + IPAERR("failed to DMA map\n"); + ret = -EPERM; + goto destroy_init_pyld; + } + + /* setting the registers and init the stats pyld are done atomically */ + teth_mask.skip_pipeline_clear = false; + teth_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + teth_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_TETHERING_MASK_n, + ipa3_ctx->ee); + teth_mask.value = in->prod_mask; + teth_mask.value_mask = ~0; + teth_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + &teth_mask, false); + if (!teth_mask_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto unmap; + } + desc[0].opcode = teth_mask_pyld->opcode; + desc[0].pyld = teth_mask_pyld->data; + desc[0].len = teth_mask_pyld->len; + desc[0].type = IPA_IMM_CMD_DESC; + + teth_base.skip_pipeline_clear = false; + teth_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + teth_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_TETHERING_BASE_n, + ipa3_ctx->ee); + teth_base.value = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_tethering_ofst); + teth_base.value_mask = ~0; + teth_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + &teth_base, false); + if (!teth_base_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto destroy_teth_mask; + } + desc[1].opcode = teth_base_pyld->opcode; + desc[1].pyld = teth_base_pyld->data; + desc[1].len = teth_base_pyld->len; + desc[1].type = IPA_IMM_CMD_DESC; + + cmd.is_read = false; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + cmd.size = pyld->len; + cmd.system_addr = dma_address; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_tethering_ofst); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto destroy_teth_base; + } + desc[2].opcode = cmd_pyld->opcode; + desc[2].pyld = cmd_pyld->data; + desc[2].len = cmd_pyld->len; + desc[2].type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(3, desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + ret = 0; + +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +destroy_teth_base: + ipahal_destroy_imm_cmd(teth_base_pyld); +destroy_teth_mask: + ipahal_destroy_imm_cmd(teth_mask_pyld); +unmap: + dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE); +destroy_init_pyld: + ipahal_destroy_stats_init_pyld(pyld); + return ret; +} + +int ipa_get_teth_stats(enum ipa_client_type prod, + struct ipa_quota_stats_all *out) +{ + int i, j; + int ret; + struct ipahal_stats_get_offset_tethering get_offset = { { 0 } }; + struct ipahal_stats_offset offset = {0}; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipa_mem_buffer mem; + struct ipa3_desc desc = { 0 }; + struct ipahal_stats_tethering_all *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (!IPA_CLIENT_IS_PROD(prod) || ipa3_get_ep_mapping(prod) == -1) { + IPAERR("invalid prod %d\n", prod); + return -EINVAL; + } + + get_offset.init = ipa3_ctx->hw_stats.teth.init; + ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_TETHERING, &get_offset, + &offset); + if (ret) { + IPAERR("failed to get offset from hal %d\n", ret); + return ret; + } + + IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size); + + mem.size = offset.size; + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, + mem.size, + &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA memory\n"); + return ret; + } + + cmd.is_read = true; + cmd.clear_after_read = true; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + cmd.size = mem.size; + cmd.system_addr = mem.phys_base; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_tethering_ofst) + offset.offset; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto free_dma_mem; + } + desc.opcode = cmd_pyld->opcode; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + desc.type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(1, &desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) { + IPADBG("failed to alloc memory\n"); + ret = -ENOMEM; + goto destroy_imm; + } + + ret = ipahal_parse_stats(IPAHAL_HW_STATS_TETHERING, + &ipa3_ctx->hw_stats.teth.init, mem.base, stats); + if (ret) { + IPAERR("failed to parse stats (error %d)\n", ret); + goto free_stats; + } + + /* + * update driver cache. + * the stats were read from hardware with clear_after_read meaning + * hardware stats are 0 now + */ + for (i = 0; i < IPA_CLIENT_MAX; i++) { + for (j = 0; j < IPA_CLIENT_MAX; j++) { + struct ipa_hw_stats_teth *sw_stats = + &ipa3_ctx->hw_stats.teth; + int prod_idx = ipa3_get_ep_mapping(i); + int cons_idx = ipa3_get_ep_mapping(j); + + if (prod_idx == -1 || prod_idx >= IPA3_MAX_NUM_PIPES) + continue; + + if (cons_idx == -1 || cons_idx >= IPA3_MAX_NUM_PIPES) + continue; + + if (ipa3_ctx->ep[prod_idx].client != i || + ipa3_ctx->ep[cons_idx].client != j) + continue; + + sw_stats->prod_stats[i].client[j].num_ipv4_bytes += + stats->stats[prod_idx][cons_idx].num_ipv4_bytes; + sw_stats->prod_stats[i].client[j].num_ipv4_pkts += + stats->stats[prod_idx][cons_idx].num_ipv4_pkts; + sw_stats->prod_stats[i].client[j].num_ipv6_bytes += + stats->stats[prod_idx][cons_idx].num_ipv6_bytes; + sw_stats->prod_stats[i].client[j].num_ipv6_pkts += + stats->stats[prod_idx][cons_idx].num_ipv6_pkts; + } + } + + if (!out) { + ret = 0; + goto free_stats; + } + + /* copy results to out parameter */ + *out = ipa3_ctx->hw_stats.teth.prod_stats[prod]; + + ret = 0; +free_stats: + kfree(stats); +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +free_dma_mem: + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + return ret; + +} + +int ipa_reset_teth_stats(enum ipa_client_type prod, enum ipa_client_type cons) +{ + int ret; + struct ipa_quota_stats *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (!IPA_CLIENT_IS_PROD(prod) || !IPA_CLIENT_IS_CONS(cons)) { + IPAERR("invalid prod %d or cons %d\n", prod, cons); + return -EINVAL; + } + + /* reading stats will reset them in hardware */ + ret = ipa_get_teth_stats(prod, NULL); + if (ret) { + IPAERR("ipa_get_teth_stats failed %d\n", ret); + return ret; + } + + /* reset driver's cache */ + stats = &ipa3_ctx->hw_stats.teth.prod_stats[prod].client[cons]; + memset(stats, 0, sizeof(*stats)); + return 0; +} + +int ipa_reset_all_cons_teth_stats(enum ipa_client_type prod) +{ + int ret; + int i; + struct ipa_quota_stats *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (!IPA_CLIENT_IS_PROD(prod)) { + IPAERR("invalid prod %d\n", prod); + return -EINVAL; + } + + /* reading stats will reset them in hardware */ + ret = ipa_get_teth_stats(prod, NULL); + if (ret) { + IPAERR("ipa_get_teth_stats failed %d\n", ret); + return ret; + } + + /* reset driver's cache */ + for (i = 0; i < IPA_CLIENT_MAX; i++) { + stats = &ipa3_ctx->hw_stats.teth.prod_stats[prod].client[i]; + memset(stats, 0, sizeof(*stats)); + } + + return 0; +} + +int ipa_reset_all_teth_stats(void) +{ + int i; + int ret; + struct ipa_quota_stats_all *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + /* reading stats will reset them in hardware */ + for (i = 0; i < IPA_CLIENT_MAX; i++) { + if (IPA_CLIENT_IS_PROD(i) && ipa3_get_ep_mapping(i) != -1) { + ret = ipa_get_teth_stats(i, NULL); + if (ret) { + IPAERR("ipa_get_teth_stats failed %d\n", ret); + return ret; + } + /* a single iteration will reset all hardware stats */ + break; + } + } + + /* reset driver's cache */ + for (i = 0; i < IPA_CLIENT_MAX; i++) { + stats = &ipa3_ctx->hw_stats.teth.prod_stats[i]; + memset(stats, 0, sizeof(*stats)); + } + + return 0; +} + +int ipa_flt_rt_stats_add_rule_id(enum ipa_ip_type ip, bool filtering, + u16 rule_id) +{ + int rule_idx, rule_bit; + u32 *bmsk_ptr; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (ip < 0 || ip >= IPA_IP_MAX) { + IPAERR("wrong ip type %d\n", ip); + return -EINVAL; + } + + rule_idx = rule_id / 32; + rule_bit = rule_id % 32; + + if (rule_idx >= IPAHAL_MAX_RULE_ID_32) { + IPAERR("invalid rule_id %d\n", rule_id); + return -EINVAL; + } + + if (ip == IPA_IP_v4 && filtering) + bmsk_ptr = + ipa3_ctx->hw_stats.flt_rt.flt_v4_init.rule_id_bitmask; + else if (ip == IPA_IP_v4) + bmsk_ptr = + ipa3_ctx->hw_stats.flt_rt.rt_v4_init.rule_id_bitmask; + else if (ip == IPA_IP_v6 && filtering) + bmsk_ptr = + ipa3_ctx->hw_stats.flt_rt.flt_v6_init.rule_id_bitmask; + else + bmsk_ptr = + ipa3_ctx->hw_stats.flt_rt.rt_v6_init.rule_id_bitmask; + + bmsk_ptr[rule_idx] |= (1 << rule_bit); + + return 0; +} + +int ipa_flt_rt_stats_start(enum ipa_ip_type ip, bool filtering) +{ + struct ipahal_stats_init_pyld *pyld; + int smem_ofst, smem_size, stats_base, start_id_ofst, end_id_ofst; + int start_id, end_id; + struct ipahal_stats_init_flt_rt *init; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipahal_imm_cmd_register_write flt_rt_base = {0}; + struct ipahal_imm_cmd_pyld *flt_rt_base_pyld; + struct ipahal_imm_cmd_register_write flt_rt_start_id = {0}; + struct ipahal_imm_cmd_pyld *flt_rt_start_id_pyld; + struct ipahal_imm_cmd_register_write flt_rt_end_id = { 0 }; + struct ipahal_imm_cmd_pyld *flt_rt_end_id_pyld; + struct ipa3_desc desc[4] = { {0} }; + dma_addr_t dma_address; + int ret; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (ip == IPA_IP_v4 && filtering) { + init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init; + smem_ofst = IPA_MEM_PART(stats_flt_v4_ofst); + smem_size = IPA_MEM_PART(stats_flt_v4_size); + stats_base = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_BASE); + start_id_ofst = + ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_START_ID); + end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_END_ID); + } else if (ip == IPA_IP_v4) { + init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init; + smem_ofst = IPA_MEM_PART(stats_rt_v4_ofst); + smem_size = IPA_MEM_PART(stats_rt_v4_size); + stats_base = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_BASE); + start_id_ofst = + ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_START_ID); + end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_END_ID); + } else if (ip == IPA_IP_v6 && filtering) { + init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init; + smem_ofst = IPA_MEM_PART(stats_flt_v6_ofst); + smem_size = IPA_MEM_PART(stats_flt_v6_size); + stats_base = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_BASE); + start_id_ofst = + ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_START_ID); + end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_END_ID); + } else { + init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init; + smem_ofst = IPA_MEM_PART(stats_rt_v6_ofst); + smem_size = IPA_MEM_PART(stats_rt_v6_size); + stats_base = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_BASE); + start_id_ofst = + ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_START_ID); + end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_END_ID); + } + + for (start_id = 0; start_id < IPAHAL_MAX_RULE_ID_32; start_id++) { + if (init->rule_id_bitmask[start_id]) + break; + } + + if (start_id == IPAHAL_MAX_RULE_ID_32) { + IPAERR("empty rule ids\n"); + return -EINVAL; + } + + /* every rule_id_bitmask contains 32 rules */ + start_id *= 32; + + for (end_id = IPAHAL_MAX_RULE_ID_32 - 1; end_id >= 0; end_id--) { + if (init->rule_id_bitmask[end_id]) + break; + } + end_id = (end_id + 1) * 32 - 1; + + pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_FNR, init, + false); + if (!pyld) { + IPAERR("failed to generate pyld\n"); + return -EPERM; + } + + if (pyld->len > smem_size) { + IPAERR("SRAM partition too small: %d needed %d\n", + smem_size, pyld->len); + ret = -EPERM; + goto destroy_init_pyld; + } + + dma_address = dma_map_single(ipa3_ctx->pdev, + pyld->data, + pyld->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) { + IPAERR("failed to DMA map\n"); + ret = -EPERM; + goto destroy_init_pyld; + } + + /* setting the registers and init the stats pyld are done atomically */ + flt_rt_start_id.skip_pipeline_clear = false; + flt_rt_start_id.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + flt_rt_start_id.offset = start_id_ofst; + flt_rt_start_id.value = start_id; + flt_rt_start_id.value_mask = 0x3FF; + flt_rt_start_id_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, &flt_rt_start_id, false); + if (!flt_rt_start_id_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto unmap; + } + desc[0].opcode = flt_rt_start_id_pyld->opcode; + desc[0].pyld = flt_rt_start_id_pyld->data; + desc[0].len = flt_rt_start_id_pyld->len; + desc[0].type = IPA_IMM_CMD_DESC; + + flt_rt_end_id.skip_pipeline_clear = false; + flt_rt_end_id.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + flt_rt_end_id.offset = end_id_ofst; + flt_rt_end_id.value = end_id; + flt_rt_end_id.value_mask = 0x3FF; + flt_rt_end_id_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, &flt_rt_end_id, false); + if (!flt_rt_end_id_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto destroy_flt_rt_start_id; + } + desc[1].opcode = flt_rt_end_id_pyld->opcode; + desc[1].pyld = flt_rt_end_id_pyld->data; + desc[1].len = flt_rt_end_id_pyld->len; + desc[1].type = IPA_IMM_CMD_DESC; + + flt_rt_base.skip_pipeline_clear = false; + flt_rt_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + flt_rt_base.offset = stats_base; + flt_rt_base.value = ipa3_ctx->smem_restricted_bytes + + smem_ofst; + flt_rt_base.value_mask = ~0; + flt_rt_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + &flt_rt_base, false); + if (!flt_rt_base_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto destroy_flt_rt_end_id; + } + desc[2].opcode = flt_rt_base_pyld->opcode; + desc[2].pyld = flt_rt_base_pyld->data; + desc[2].len = flt_rt_base_pyld->len; + desc[2].type = IPA_IMM_CMD_DESC; + + cmd.is_read = false; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + cmd.size = pyld->len; + cmd.system_addr = dma_address; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + smem_ofst; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto destroy_flt_rt_base; + } + desc[3].opcode = cmd_pyld->opcode; + desc[3].pyld = cmd_pyld->data; + desc[3].len = cmd_pyld->len; + desc[3].type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(4, desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + ret = 0; + +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +destroy_flt_rt_base: + ipahal_destroy_imm_cmd(flt_rt_base_pyld); +destroy_flt_rt_end_id: + ipahal_destroy_imm_cmd(flt_rt_end_id_pyld); +destroy_flt_rt_start_id: + ipahal_destroy_imm_cmd(flt_rt_start_id_pyld); +unmap: + dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE); +destroy_init_pyld: + ipahal_destroy_stats_init_pyld(pyld); + return ret; +} + +int ipa_flt_rt_stats_clear_rule_ids(enum ipa_ip_type ip, bool filtering) +{ + struct ipahal_stats_init_flt_rt *init; + int i; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (ip < 0 || ip >= IPA_IP_MAX) { + IPAERR("wrong ip type %d\n", ip); + return -EINVAL; + } + + if (ip == IPA_IP_v4 && filtering) + init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init; + else if (ip == IPA_IP_v4) + init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init; + else if (ip == IPA_IP_v6 && filtering) + init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init; + else + init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init; + + for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) + init->rule_id_bitmask[i] = 0; + + return 0; +} + +static int __ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering, + u16 rule_id, struct ipa_flt_rt_stats *out) +{ + int ret; + int smem_ofst; + bool clear = false; + struct ipahal_stats_get_offset_flt_rt *get_offset; + struct ipahal_stats_offset offset = { 0 }; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipa_mem_buffer mem; + struct ipa3_desc desc = { 0 }; + struct ipahal_stats_flt_rt stats; + + if (rule_id >= IPAHAL_MAX_RULE_ID_32 * 32) { + IPAERR("invalid rule_id %d\n", rule_id); + return -EINVAL; + } + + if (out == NULL) + clear = true; + + get_offset = kzalloc(sizeof(*get_offset), GFP_KERNEL); + if (!get_offset) { + IPADBG("no mem\n"); + return -ENOMEM; + } + + if (ip == IPA_IP_v4 && filtering) { + get_offset->init = ipa3_ctx->hw_stats.flt_rt.flt_v4_init; + smem_ofst = IPA_MEM_PART(stats_flt_v4_ofst); + } else if (ip == IPA_IP_v4) { + get_offset->init = ipa3_ctx->hw_stats.flt_rt.rt_v4_init; + smem_ofst = IPA_MEM_PART(stats_rt_v4_ofst); + } else if (ip == IPA_IP_v6 && filtering) { + get_offset->init = ipa3_ctx->hw_stats.flt_rt.flt_v6_init; + smem_ofst = IPA_MEM_PART(stats_flt_v6_ofst); + } else { + get_offset->init = ipa3_ctx->hw_stats.flt_rt.rt_v6_init; + smem_ofst = IPA_MEM_PART(stats_rt_v6_ofst); + } + + get_offset->rule_id = rule_id; + + ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_FNR, get_offset, + &offset); + if (ret) { + IPAERR("failed to get offset from hal %d\n", ret); + goto free_offset; + } + + IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size); + + mem.size = offset.size; + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, + mem.size, + &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA memory\n"); + goto free_offset; + } + + cmd.is_read = true; + cmd.clear_after_read = clear; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + cmd.size = mem.size; + cmd.system_addr = mem.phys_base; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + smem_ofst + offset.offset; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto free_dma_mem; + } + desc.opcode = cmd_pyld->opcode; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + desc.type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(1, &desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + ret = ipahal_parse_stats(IPAHAL_HW_STATS_FNR, + &get_offset->init, mem.base, &stats); + if (ret) { + IPAERR("failed to parse stats (error %d)\n", ret); + goto destroy_imm; + } + + if (out) { + out->num_pkts = stats.num_packets; + out->num_pkts_hash = stats.num_packets_hash; + } + + ret = 0; + +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +free_dma_mem: + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); +free_offset: + kfree(get_offset); + return ret; + +} + + +int ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id, + struct ipa_flt_rt_stats *out) +{ + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (ip < 0 || ip >= IPA_IP_MAX) { + IPAERR("wrong ip type %d\n", ip); + return -EINVAL; + } + + return __ipa_get_flt_rt_stats(ip, filtering, rule_id, out); +} + +int ipa_reset_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id) +{ + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (ip < 0 || ip >= IPA_IP_MAX) { + IPAERR("wrong ip type %d\n", ip); + return -EINVAL; + } + + return __ipa_get_flt_rt_stats(ip, filtering, rule_id, NULL); +} + +int ipa_reset_all_flt_rt_stats(enum ipa_ip_type ip, bool filtering) +{ + struct ipahal_stats_init_flt_rt *init; + int i; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + if (ip < 0 || ip >= IPA_IP_MAX) { + IPAERR("wrong ip type %d\n", ip); + return -EINVAL; + } + + if (ip == IPA_IP_v4 && filtering) + init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init; + else if (ip == IPA_IP_v4) + init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init; + else if (ip == IPA_IP_v6 && filtering) + init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init; + else + init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init; + + for (i = 0; i < IPAHAL_MAX_RULE_ID_32 * 32; i++) { + int idx = i / 32; + int bit = i % 32; + + if (init->rule_id_bitmask[idx] & (1 << bit)) + __ipa_get_flt_rt_stats(ip, filtering, i, NULL); + } + + return 0; +} + +int ipa_init_drop_stats(u32 pipe_bitmask) +{ + struct ipahal_stats_init_pyld *pyld; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipahal_imm_cmd_register_write drop_base = {0}; + struct ipahal_imm_cmd_pyld *drop_base_pyld; + struct ipahal_imm_cmd_register_write drop_mask = {0}; + struct ipahal_imm_cmd_pyld *drop_mask_pyld; + struct ipa3_desc desc[3] = { {0} }; + dma_addr_t dma_address; + int ret; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + /* reset driver's cache */ + memset(&ipa3_ctx->hw_stats.drop, 0, sizeof(ipa3_ctx->hw_stats.drop)); + ipa3_ctx->hw_stats.drop.init.enabled_bitmask = pipe_bitmask; + IPADBG_LOW("pipe_bitmask=0x%x\n", pipe_bitmask); + + pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_DROP, + &ipa3_ctx->hw_stats.drop.init, false); + if (!pyld) { + IPAERR("failed to generate pyld\n"); + return -EPERM; + } + + if (pyld->len > IPA_MEM_PART(stats_drop_size)) { + IPAERR("SRAM partition too small: %d needed %d\n", + IPA_MEM_PART(stats_drop_size), pyld->len); + ret = -EPERM; + goto destroy_init_pyld; + } + + dma_address = dma_map_single(ipa3_ctx->pdev, + pyld->data, + pyld->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) { + IPAERR("failed to DMA map\n"); + ret = -EPERM; + goto destroy_init_pyld; + } + + /* setting the registers and init the stats pyld are done atomically */ + drop_mask.skip_pipeline_clear = false; + drop_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + drop_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_DROP_CNT_MASK_n, + ipa3_ctx->ee); + drop_mask.value = pipe_bitmask; + drop_mask.value_mask = ~0; + drop_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + &drop_mask, false); + if (!drop_mask_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto unmap; + } + desc[0].opcode = drop_mask_pyld->opcode; + desc[0].pyld = drop_mask_pyld->data; + desc[0].len = drop_mask_pyld->len; + desc[0].type = IPA_IMM_CMD_DESC; + + drop_base.skip_pipeline_clear = false; + drop_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + drop_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_DROP_CNT_BASE_n, + ipa3_ctx->ee); + drop_base.value = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_drop_ofst); + drop_base.value_mask = ~0; + drop_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + &drop_base, false); + if (!drop_base_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + ret = -ENOMEM; + goto destroy_drop_mask; + } + desc[1].opcode = drop_base_pyld->opcode; + desc[1].pyld = drop_base_pyld->data; + desc[1].len = drop_base_pyld->len; + desc[1].type = IPA_IMM_CMD_DESC; + + cmd.is_read = false; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR; + cmd.size = pyld->len; + cmd.system_addr = dma_address; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_drop_ofst); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto destroy_drop_base; + } + desc[2].opcode = cmd_pyld->opcode; + desc[2].pyld = cmd_pyld->data; + desc[2].len = cmd_pyld->len; + desc[2].type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(3, desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + ret = 0; + +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +destroy_drop_base: + ipahal_destroy_imm_cmd(drop_base_pyld); +destroy_drop_mask: + ipahal_destroy_imm_cmd(drop_mask_pyld); +unmap: + dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE); +destroy_init_pyld: + ipahal_destroy_stats_init_pyld(pyld); + return ret; +} + +int ipa_get_drop_stats(struct ipa_drop_stats_all *out) +{ + int i; + int ret; + struct ipahal_stats_get_offset_drop get_offset = { { 0 } }; + struct ipahal_stats_offset offset = { 0 }; + struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 }; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipa_mem_buffer mem; + struct ipa3_desc desc = { 0 }; + struct ipahal_stats_drop_all *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + get_offset.init = ipa3_ctx->hw_stats.drop.init; + ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_DROP, &get_offset, + &offset); + if (ret) { + IPAERR("failed to get offset from hal %d\n", ret); + return ret; + } + + IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size); + + mem.size = offset.size; + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, + mem.size, + &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA memory\n"); + return ret; + } + + cmd.is_read = true; + cmd.clear_after_read = true; + cmd.skip_pipeline_clear = false; + cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + cmd.size = mem.size; + cmd.system_addr = mem.phys_base; + cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(stats_drop_ofst) + offset.offset; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct dma_shared_mem imm cmd\n"); + ret = -ENOMEM; + goto free_dma_mem; + } + desc.opcode = cmd_pyld->opcode; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + desc.type = IPA_IMM_CMD_DESC; + + ret = ipa3_send_cmd(1, &desc); + if (ret) { + IPAERR("failed to send immediate command (error %d)\n", ret); + goto destroy_imm; + } + + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) { + ret = -ENOMEM; + goto destroy_imm; + } + + ret = ipahal_parse_stats(IPAHAL_HW_STATS_DROP, + &ipa3_ctx->hw_stats.drop.init, mem.base, stats); + if (ret) { + IPAERR("failed to parse stats (error %d)\n", ret); + goto free_stats; + } + + /* + * update driver cache. + * the stats were read from hardware with clear_after_read meaning + * hardware stats are 0 now + */ + for (i = 0; i < IPA_CLIENT_MAX; i++) { + int ep_idx = ipa3_get_ep_mapping(i); + + if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES) + continue; + + if (ipa3_ctx->ep[ep_idx].client != i) + continue; + + ipa3_ctx->hw_stats.drop.stats.client[i].drop_byte_cnt += + stats->stats[ep_idx].drop_byte_cnt; + ipa3_ctx->hw_stats.drop.stats.client[i].drop_packet_cnt += + stats->stats[ep_idx].drop_packet_cnt; + } + + + if (!out) { + ret = 0; + goto free_stats; + } + + /* copy results to out parameter */ + *out = ipa3_ctx->hw_stats.drop.stats; + + ret = 0; +free_stats: + kfree(stats); +destroy_imm: + ipahal_destroy_imm_cmd(cmd_pyld); +free_dma_mem: + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + return ret; + +} + +int ipa_reset_drop_stats(enum ipa_client_type client) +{ + int ret; + struct ipa_drop_stats *stats; + + if (client >= IPA_CLIENT_MAX) { + IPAERR("invalid client %d\n", client); + return -EINVAL; + } + + /* reading stats will reset them in hardware */ + ret = ipa_get_drop_stats(NULL); + if (ret) { + IPAERR("ipa_get_drop_stats failed %d\n", ret); + return ret; + } + + /* reset driver's cache */ + stats = &ipa3_ctx->hw_stats.drop.stats.client[client]; + memset(stats, 0, sizeof(*stats)); + return 0; +} + +int ipa_reset_all_drop_stats(void) +{ + int ret; + struct ipa_drop_stats_all *stats; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + /* reading stats will reset them in hardware */ + ret = ipa_get_drop_stats(NULL); + if (ret) { + IPAERR("ipa_get_drop_stats failed %d\n", ret); + return ret; + } + + /* reset driver's cache */ + stats = &ipa3_ctx->hw_stats.drop.stats; + memset(stats, 0, sizeof(*stats)); + return 0; +} + + +#ifndef CONFIG_DEBUG_FS +int ipa_debugfs_init_stats(struct dentry *parent) { return 0; } +#else +#define IPA_MAX_MSG_LEN 4096 +static char dbg_buff[IPA_MAX_MSG_LEN]; + +static ssize_t ipa_debugfs_reset_quota_stats(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 client = 0; + int ret; + + mutex_lock(&ipa3_ctx->lock); + if (sizeof(dbg_buff) < count + 1) { + ret = -EFAULT; + goto bail; + } + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) { + ret = -EFAULT; + goto bail; + } + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &client)) { + ret = -EFAULT; + goto bail; + } + + if (client == -1) + ipa_reset_all_quota_stats(); + else + ipa_reset_quota_stats(client); + + ret = count; +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +static ssize_t ipa_debugfs_print_quota_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int nbytes = 0; + struct ipa_quota_stats_all *out; + int i; + int res; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + mutex_lock(&ipa3_ctx->lock); + res = ipa_get_quota_stats(out); + if (res) { + mutex_unlock(&ipa3_ctx->lock); + kfree(out); + return res; + } + for (i = 0; i < IPA_CLIENT_MAX; i++) { + int ep_idx = ipa3_get_ep_mapping(i); + + if (ep_idx == -1) + continue; + + if (IPA_CLIENT_IS_TEST(i)) + continue; + + if (!(ipa3_ctx->hw_stats.quota.init.enabled_bitmask & + (1 << ep_idx))) + continue; + + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "%s:\n", + ipa_clients_strings[i]); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv4_bytes=%llu\n", + out->client[i].num_ipv4_bytes); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv6_bytes=%llu\n", + out->client[i].num_ipv6_bytes); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv4_pkts=%u\n", + out->client[i].num_ipv4_pkts); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv6_pkts=%u\n", + out->client[i].num_ipv6_pkts); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "\n"); + + } + mutex_unlock(&ipa3_ctx->lock); + kfree(out); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_debugfs_reset_tethering_stats(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 client = 0; + int ret; + + mutex_lock(&ipa3_ctx->lock); + if (sizeof(dbg_buff) < count + 1) { + ret = -EFAULT; + goto bail; + } + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) { + ret = -EFAULT; + goto bail; + } + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &client)) { + ret = -EFAULT; + goto bail; + } + + if (client == -1) + ipa_reset_all_teth_stats(); + else + ipa_reset_all_cons_teth_stats(client); + + ret = count; +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +static ssize_t ipa_debugfs_print_tethering_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int nbytes = 0; + struct ipa_quota_stats_all *out; + int i, j; + int res; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < IPA_CLIENT_MAX; i++) { + int ep_idx = ipa3_get_ep_mapping(i); + + if (ep_idx == -1) + continue; + + if (!IPA_CLIENT_IS_PROD(i)) + continue; + + if (IPA_CLIENT_IS_TEST(i)) + continue; + + if (!(ipa3_ctx->hw_stats.teth.init.prod_bitmask & + (1 << ep_idx))) + continue; + + res = ipa_get_teth_stats(i, out); + if (res) { + mutex_unlock(&ipa3_ctx->lock); + kfree(out); + return res; + } + + for (j = 0; j < IPA_CLIENT_MAX; j++) { + int cons_idx = ipa3_get_ep_mapping(j); + + if (cons_idx == -1) + continue; + + if (IPA_CLIENT_IS_TEST(j)) + continue; + + if (!(ipa3_ctx->hw_stats.teth.init.cons_bitmask[ep_idx] + & (1 << cons_idx))) + continue; + + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "%s->%s:\n", + ipa_clients_strings[i], + ipa_clients_strings[j]); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv4_bytes=%llu\n", + out->client[j].num_ipv4_bytes); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv6_bytes=%llu\n", + out->client[j].num_ipv6_bytes); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv4_pkts=%u\n", + out->client[j].num_ipv4_pkts); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_ipv6_pkts=%u\n", + out->client[j].num_ipv6_pkts); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "\n"); + } + } + mutex_unlock(&ipa3_ctx->lock); + kfree(out); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_debugfs_control_flt_rt_stats(enum ipa_ip_type ip, + bool filtering, struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + u16 rule_id = 0; + int ret; + + mutex_lock(&ipa3_ctx->lock); + if (sizeof(dbg_buff) < count + 1) { + ret = -EFAULT; + goto bail; + } + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) { + ret = -EFAULT; + goto bail; + } + + dbg_buff[count] = '\0'; + if (strcmp(dbg_buff, "start\n") == 0) { + ipa_flt_rt_stats_start(ip, filtering); + } else if (strcmp(dbg_buff, "clear\n") == 0) { + ipa_flt_rt_stats_clear_rule_ids(ip, filtering); + } else if (strcmp(dbg_buff, "reset\n") == 0) { + ipa_reset_all_flt_rt_stats(ip, filtering); + } else { + if (kstrtou16(dbg_buff, 0, &rule_id)) { + ret = -EFAULT; + goto bail; + } + ipa_flt_rt_stats_add_rule_id(ip, filtering, rule_id); + } + + ret = count; +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +static ssize_t ipa_debugfs_print_flt_rt_stats(enum ipa_ip_type ip, + bool filtering, struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int nbytes = 0; + struct ipahal_stats_init_flt_rt *init; + struct ipa_flt_rt_stats out; + int i; + int res; + + if (ip == IPA_IP_v4 && filtering) + init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init; + else if (ip == IPA_IP_v4) + init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init; + else if (ip == IPA_IP_v6 && filtering) + init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init; + else + init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init; + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < IPAHAL_MAX_RULE_ID_32 * 32; i++) { + int idx = i / 32; + int bit = i % 32; + + if (init->rule_id_bitmask[idx] & (1 << bit)) { + res = ipa_get_flt_rt_stats(ip, filtering, i, &out); + if (res) { + mutex_unlock(&ipa3_ctx->lock); + return res; + } + + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "rule_id: %d\n", i); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_pkts: %d\n", + out.num_pkts); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "num_pkts_hash: %d\n", + out.num_pkts_hash); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "\n"); + } + } + + mutex_unlock(&ipa3_ctx->lock); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_debugfs_reset_drop_stats(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 client = 0; + int ret; + + mutex_lock(&ipa3_ctx->lock); + if (sizeof(dbg_buff) < count + 1) { + ret = -EFAULT; + goto bail; + } + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) { + ret = -EFAULT; + goto bail; + } + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &client)) { + ret = -EFAULT; + goto bail; + } + + if (client == -1) + ipa_reset_all_drop_stats(); + else + ipa_reset_drop_stats(client); + + ret = count; +bail: + mutex_unlock(&ipa3_ctx->lock); + return count; +} + +static ssize_t ipa_debugfs_print_drop_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int nbytes = 0; + struct ipa_drop_stats_all *out; + int i; + int res; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + mutex_lock(&ipa3_ctx->lock); + res = ipa_get_drop_stats(out); + if (res) { + mutex_unlock(&ipa3_ctx->lock); + kfree(out); + return res; + } + + for (i = 0; i < IPA_CLIENT_MAX; i++) { + int ep_idx = ipa3_get_ep_mapping(i); + + if (ep_idx == -1) + continue; + + if (!IPA_CLIENT_IS_CONS(i)) + continue; + + if (IPA_CLIENT_IS_TEST(i)) + continue; + + if (!(ipa3_ctx->hw_stats.drop.init.enabled_bitmask & + (1 << ep_idx))) + continue; + + + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "%s:\n", + ipa_clients_strings[i]); + + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "drop_byte_cnt=%u\n", + out->client[i].drop_byte_cnt); + + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "drop_packet_cnt=%u\n", + out->client[i].drop_packet_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "\n"); + } + mutex_unlock(&ipa3_ctx->lock); + kfree(out); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_debugfs_control_flt_v4_stats(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_control_flt_rt_stats(IPA_IP_v4, true, file, ubuf, + count, ppos); +} + +static ssize_t ipa_debugfs_control_flt_v6_stats(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_control_flt_rt_stats(IPA_IP_v6, true, file, ubuf, + count, ppos); +} + +static ssize_t ipa_debugfs_control_rt_v4_stats(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_control_flt_rt_stats(IPA_IP_v4, false, file, ubuf, + count, ppos); +} + +static ssize_t ipa_debugfs_control_rt_v6_stats(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_control_flt_rt_stats(IPA_IP_v6, false, file, ubuf, + count, ppos); +} + +static ssize_t ipa_debugfs_print_flt_v4_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_print_flt_rt_stats(IPA_IP_v4, true, file, ubuf, + count, ppos); +} + +static ssize_t ipa_debugfs_print_flt_v6_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_print_flt_rt_stats(IPA_IP_v6, true, file, ubuf, + count, ppos); +} + +static ssize_t ipa_debugfs_print_rt_v4_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_print_flt_rt_stats(IPA_IP_v4, false, file, ubuf, + count, ppos); +} + +static ssize_t ipa_debugfs_print_rt_v6_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + return ipa_debugfs_print_flt_rt_stats(IPA_IP_v6, false, file, ubuf, + count, ppos); +} + +static const struct file_operations ipa3_quota_ops = { + .read = ipa_debugfs_print_quota_stats, + .write = ipa_debugfs_reset_quota_stats, +}; + +static const struct file_operations ipa3_tethering_ops = { + .read = ipa_debugfs_print_tethering_stats, + .write = ipa_debugfs_reset_tethering_stats, +}; + +static const struct file_operations ipa3_flt_v4_ops = { + .read = ipa_debugfs_print_flt_v4_stats, + .write = ipa_debugfs_control_flt_v4_stats, +}; + +static const struct file_operations ipa3_flt_v6_ops = { + .read = ipa_debugfs_print_flt_v6_stats, + .write = ipa_debugfs_control_flt_v6_stats, +}; + +static const struct file_operations ipa3_rt_v4_ops = { + .read = ipa_debugfs_print_rt_v4_stats, + .write = ipa_debugfs_control_rt_v4_stats, +}; + +static const struct file_operations ipa3_rt_v6_ops = { + .read = ipa_debugfs_print_rt_v6_stats, + .write = ipa_debugfs_control_rt_v6_stats, +}; + +static const struct file_operations ipa3_drop_ops = { + .read = ipa_debugfs_print_drop_stats, + .write = ipa_debugfs_reset_drop_stats, +}; + + +int ipa_debugfs_init_stats(struct dentry *parent) +{ + const mode_t read_write_mode = 0664; + struct dentry *file; + struct dentry *dent; + + if (!ipa3_ctx->hw_stats.enabled) + return 0; + + dent = debugfs_create_dir("hw_stats", parent); + if (IS_ERR_OR_NULL(dent)) { + IPAERR("fail to create folder in debug_fs\n"); + return -EFAULT; + } + + file = debugfs_create_file("quota", read_write_mode, dent, NULL, + &ipa3_quota_ops); + if (IS_ERR_OR_NULL(file)) { + IPAERR("fail to create file %s\n", "quota"); + goto fail; + } + + file = debugfs_create_file("drop", read_write_mode, dent, NULL, + &ipa3_drop_ops); + if (IS_ERR_OR_NULL(file)) { + IPAERR("fail to create file %s\n", "drop"); + goto fail; + } + + file = debugfs_create_file("tethering", read_write_mode, dent, NULL, + &ipa3_tethering_ops); + if (IS_ERR_OR_NULL(file)) { + IPAERR("fail to create file %s\n", "tethering"); + goto fail; + } + + file = debugfs_create_file("flt_v4", read_write_mode, dent, NULL, + &ipa3_flt_v4_ops); + if (IS_ERR_OR_NULL(file)) { + IPAERR("fail to create file %s\n", "flt_v4"); + goto fail; + } + + file = debugfs_create_file("flt_v6", read_write_mode, dent, NULL, + &ipa3_flt_v6_ops); + if (IS_ERR_OR_NULL(file)) { + IPAERR("fail to create file %s\n", "flt_v6"); + goto fail; + } + + file = debugfs_create_file("rt_v4", read_write_mode, dent, NULL, + &ipa3_rt_v4_ops); + if (IS_ERR_OR_NULL(file)) { + IPAERR("fail to create file %s\n", "rt_v4"); + goto fail; + } + + file = debugfs_create_file("rt_v6", read_write_mode, dent, NULL, + &ipa3_rt_v6_ops); + if (IS_ERR_OR_NULL(file)) { + IPAERR("fail to create file %s\n", "rt_v6"); + goto fail; + } + + return 0; +fail: + debugfs_remove_recursive(dent); + return -EFAULT; +} +#endif diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h new file mode 100644 index 000000000000..866f35f1c87f --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -0,0 +1,2231 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA3_I_H_ +#define _IPA3_I_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_hw_defs.h" +#include "ipa_qmi_service.h" +#include "../ipa_api.h" +#include "ipahal/ipahal_reg.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_fltrt.h" +#include "ipahal/ipahal_hw_stats.h" +#include "../ipa_common_i.h" +#include "ipa_uc_offload_i.h" + +#define DRV_NAME "ipa" +#define NAT_DEV_NAME "ipaNatTable" +#define IPA_COOKIE 0x57831603 +#define IPA_RT_RULE_COOKIE 0x57831604 +#define IPA_RT_TBL_COOKIE 0x57831605 +#define IPA_FLT_COOKIE 0x57831606 +#define IPA_HDR_COOKIE 0x57831607 +#define IPA_PROC_HDR_COOKIE 0x57831608 + +#define MTU_BYTE 1500 + +#define IPA_EP_NOT_ALLOCATED (-1) +#define IPA3_MAX_NUM_PIPES 31 +#define IPA_SYS_DESC_FIFO_SZ 0x800 +#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000 +#define IPA_COMMON_EVENT_RING_SIZE 0x7C00 +#define IPA_LAN_RX_HEADER_LENGTH (2) +#define IPA_QMAP_HEADER_LENGTH (4) +#define IPA_DL_CHECKSUM_LENGTH (8) +#define IPA_NUM_DESC_PER_SW_TX (3) +#define IPA_GENERIC_RX_POOL_SZ 192 +#define IPA_UC_FINISH_MAX 6 +#define IPA_UC_WAIT_MIN_SLEEP 1000 +#define IPA_UC_WAII_MAX_SLEEP 1200 +/* + * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but + * IPA users still use sps_iovec size as FIFO element size. + */ +#define IPA_FIFO_ELEMENT_SIZE 8 + +#define IPA_MAX_STATUS_STAT_NUM 30 + +#define IPA_IPC_LOG_PAGES 50 + +#define IPADBG(fmt, args...) \ + do { \ + pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ + if (ipa3_ctx) { \ + IPA_IPC_LOGGING(ipa3_ctx->logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define IPADBG_LOW(fmt, args...) \ + do { \ + pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ + if (ipa3_ctx) \ + IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAERR(fmt, args...) \ + do { \ + pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ + if (ipa3_ctx) { \ + IPA_IPC_LOGGING(ipa3_ctx->logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define IPAERR_RL(fmt, args...) \ + do { \ + pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__,\ + __LINE__, ## args);\ + if (ipa3_ctx) { \ + IPA_IPC_LOGGING(ipa3_ctx->logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define WLAN_AMPDU_TX_EP 15 +#define WLAN_PROD_TX_EP 19 +#define WLAN1_CONS_RX_EP 14 +#define WLAN2_CONS_RX_EP 16 +#define WLAN3_CONS_RX_EP 17 +#define WLAN4_CONS_RX_EP 18 + +#define IPA_RAM_NAT_OFST 0 +#define IPA_RAM_NAT_SIZE 0 +#define IPA_MEM_CANARY_VAL 0xdeadbeef + +#define IPA_STATS + +#ifdef IPA_STATS +#define IPA_STATS_INC_CNT(val) (++val) +#define IPA_STATS_DEC_CNT(val) (--val) +#define IPA_STATS_EXCP_CNT(__excp, __base) do { \ + if (__excp < 0 || __excp >= IPAHAL_PKT_STATUS_EXCEPTION_MAX) \ + break; \ + ++__base[__excp]; \ + } while (0) +#else +#define IPA_STATS_INC_CNT(x) do { } while (0) +#define IPA_STATS_DEC_CNT(x) +#define IPA_STATS_EXCP_CNT(__excp, __base) do { } while (0) +#endif + +#define IPA_HDR_BIN0 0 +#define IPA_HDR_BIN1 1 +#define IPA_HDR_BIN2 2 +#define IPA_HDR_BIN3 3 +#define IPA_HDR_BIN4 4 +#define IPA_HDR_BIN_MAX 5 + +#define IPA_HDR_PROC_CTX_BIN0 0 +#define IPA_HDR_PROC_CTX_BIN1 1 +#define IPA_HDR_PROC_CTX_BIN_MAX 2 + +#define IPA_RX_POOL_CEIL 32 +#define IPA_RX_SKB_SIZE 1792 + +#define IPA_A5_MUX_HDR_NAME "ipa_excp_hdr" +#define IPA_LAN_RX_HDR_NAME "ipa_lan_hdr" +#define IPA_INVALID_L4_PROTOCOL 0xFF + +#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8 +#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \ + (((start_ofst) + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1) & \ + ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1)) + +#define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX) +#define IPA_MEM_PART(x_) (ipa3_ctx->ctrl->mem_partition.x_) + +#define IPA_GSI_CHANNEL_STOP_MAX_RETRY 10 +#define IPA_GSI_CHANNEL_STOP_PKT_SIZE 1 + +#define IPA_GSI_CHANNEL_EMPTY_MAX_RETRY 15 +#define IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC (1000) +#define IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC (2000) + +#define IPA_SLEEP_CLK_RATE_KHZ (32) + +#define IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120 +#define IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN 96 +#define IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50 +#define IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN 40 +#define SMEM_IPA_FILTER_TABLE 497 + +enum { + SMEM_APPS, + SMEM_MODEM, + SMEM_Q6, + SMEM_DSPS, + SMEM_WCNSS, + SMEM_CDSP, + SMEM_RPM, + SMEM_TZ, + SMEM_SPSS, + SMEM_HYP, + NUM_SMEM_SUBSYSTEMS, +}; + +struct ipa3_active_client_htable_entry { + struct hlist_node list; + char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN]; + int count; + enum ipa_active_client_log_type type; +}; + +struct ipa3_active_clients_log_ctx { + spinlock_t lock; + char *log_buffer[IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES]; + int log_head; + int log_tail; + bool log_rdy; + struct hlist_head htable[IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE]; +}; + +struct ipa3_client_names { + enum ipa_client_type names[MAX_RESOURCE_TO_CLIENTS]; + int length; +}; + +struct ipa_smmu_cb_ctx { + bool valid; + struct device *dev; + struct dma_iommu_mapping *mapping; + struct iommu_domain *iommu; + unsigned long next_addr; + u32 va_start; + u32 va_size; + u32 va_end; +}; + +/** + * struct ipa3_flt_entry - IPA filtering table entry + * @link: entry's link in global filtering enrties list + * @rule: filter rule + * @cookie: cookie used for validity check + * @tbl: filter table + * @rt_tbl: routing table + * @hw_len: entry's size + * @id: rule handle - globally unique + * @prio: rule 10bit priority which defines the order of the rule + * among other rules at the same integrated table + * @rule_id: rule 10bit ID to be returned in packet status + */ +struct ipa3_flt_entry { + struct list_head link; + u32 cookie; + struct ipa_flt_rule rule; + struct ipa3_flt_tbl *tbl; + struct ipa3_rt_tbl *rt_tbl; + u32 hw_len; + int id; + u16 prio; + u16 rule_id; +}; + +/** + * struct ipa3_rt_tbl - IPA routing table + * @link: table's link in global routing tables list + * @head_rt_rule_list: head of routing rules list + * @name: routing table name + * @idx: routing table index + * @rule_cnt: number of rules in routing table + * @ref_cnt: reference counter of routing table + * @set: collection of routing tables + * @cookie: cookie used for validity check + * @in_sys: flag indicating if the table is located in system memory + * @sz: the size of the routing table + * @curr_mem: current routing tables block in sys memory + * @prev_mem: previous routing table block in sys memory + * @id: routing table id + * @rule_ids: common idr structure that holds the rule_id for each rule + */ +struct ipa3_rt_tbl { + struct list_head link; + u32 cookie; + struct list_head head_rt_rule_list; + char name[IPA_RESOURCE_NAME_MAX]; + u32 idx; + u32 rule_cnt; + u32 ref_cnt; + struct ipa3_rt_tbl_set *set; + bool in_sys[IPA_RULE_TYPE_MAX]; + u32 sz[IPA_RULE_TYPE_MAX]; + struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX]; + struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX]; + int id; + struct idr *rule_ids; +}; + +/** + * struct ipa3_hdr_entry - IPA header table entry + * @link: entry's link in global header table entries list + * @hdr: the header + * @hdr_len: header length + * @name: name of header table entry + * @type: l2 header type + * @is_partial: flag indicating if header table entry is partial + * @is_hdr_proc_ctx: false - hdr entry resides in hdr table, + * true - hdr entry resides in DDR and pointed to by proc ctx + * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true, + * else 0 + * @proc_ctx: processing context header + * @offset_entry: entry's offset + * @cookie: cookie used for validity check + * @ref_cnt: reference counter of routing table + * @id: header entry id + * @is_eth2_ofst_valid: is eth2_ofst field valid? + * @eth2_ofst: offset to start of Ethernet-II/802.3 header + * @user_deleted: is the header deleted by the user? + */ +struct ipa3_hdr_entry { + struct list_head link; + u32 cookie; + u8 hdr[IPA_HDR_MAX_SIZE]; + u32 hdr_len; + char name[IPA_RESOURCE_NAME_MAX]; + enum ipa_hdr_l2_type type; + u8 is_partial; + bool is_hdr_proc_ctx; + dma_addr_t phys_base; + struct ipa3_hdr_proc_ctx_entry *proc_ctx; + struct ipa_hdr_offset_entry *offset_entry; + u32 ref_cnt; + int id; + u8 is_eth2_ofst_valid; + u16 eth2_ofst; + bool user_deleted; +}; + +/** + * struct ipa3_hdr_tbl - IPA header table + * @head_hdr_entry_list: header entries list + * @head_offset_list: header offset list + * @head_free_offset_list: header free offset list + * @hdr_cnt: number of headers + * @end: the last header index + */ +struct ipa3_hdr_tbl { + struct list_head head_hdr_entry_list; + struct list_head head_offset_list[IPA_HDR_BIN_MAX]; + struct list_head head_free_offset_list[IPA_HDR_BIN_MAX]; + u32 hdr_cnt; + u32 end; +}; + +/** + * struct ipa3_hdr_offset_entry - IPA header offset entry + * @link: entry's link in global processing context header offset entries list + * @offset: the offset + * @bin: bin + */ +struct ipa3_hdr_proc_ctx_offset_entry { + struct list_head link; + u32 offset; + u32 bin; +}; + +/** + * struct ipa3_hdr_proc_ctx_entry - IPA processing context header table entry + * @link: entry's link in global header table entries list + * @type: header processing context type + * @l2tp_params: L2TP parameters + * @offset_entry: entry's offset + * @hdr: the header + * @cookie: cookie used for validity check + * @ref_cnt: reference counter of routing table + * @id: processing context header entry id + * @user_deleted: is the hdr processing context deleted by the user? + */ +struct ipa3_hdr_proc_ctx_entry { + struct list_head link; + u32 cookie; + enum ipa_hdr_proc_type type; + union ipa_l2tp_hdr_proc_ctx_params l2tp_params; + struct ipa3_hdr_proc_ctx_offset_entry *offset_entry; + struct ipa3_hdr_entry *hdr; + u32 ref_cnt; + int id; + bool user_deleted; +}; + +/** + * struct ipa3_hdr_proc_ctx_tbl - IPA processing context header table + * @head_proc_ctx_entry_list: header entries list + * @head_offset_list: header offset list + * @head_free_offset_list: header free offset list + * @proc_ctx_cnt: number of processing context headers + * @end: the last processing context header index + * @start_offset: offset in words of processing context header table + */ +struct ipa3_hdr_proc_ctx_tbl { + struct list_head head_proc_ctx_entry_list; + struct list_head head_offset_list[IPA_HDR_PROC_CTX_BIN_MAX]; + struct list_head head_free_offset_list[IPA_HDR_PROC_CTX_BIN_MAX]; + u32 proc_ctx_cnt; + u32 end; + u32 start_offset; +}; + +/** + * struct ipa3_flt_tbl - IPA filter table + * @head_flt_rule_list: filter rules list + * @rule_cnt: number of filter rules + * @in_sys: flag indicating if filter table is located in system memory + * @sz: the size of the filter tables + * @end: the last header index + * @curr_mem: current filter tables block in sys memory + * @prev_mem: previous filter table block in sys memory + * @rule_ids: common idr structure that holds the rule_id for each rule + */ +struct ipa3_flt_tbl { + struct list_head head_flt_rule_list; + u32 rule_cnt; + bool in_sys[IPA_RULE_TYPE_MAX]; + u32 sz[IPA_RULE_TYPE_MAX]; + struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX]; + struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX]; + bool sticky_rear; + struct idr *rule_ids; +}; + +/** + * struct ipa3_rt_entry - IPA routing table entry + * @link: entry's link in global routing table entries list + * @rule: routing rule + * @cookie: cookie used for validity check + * @tbl: routing table + * @hdr: header table + * @proc_ctx: processing context table + * @hw_len: the length of the table + * @id: rule handle - globaly unique + * @prio: rule 10bit priority which defines the order of the rule + * among other rules at the integrated same table + * @rule_id: rule 10bit ID to be returned in packet status + */ +struct ipa3_rt_entry { + struct list_head link; + u32 cookie; + struct ipa_rt_rule rule; + struct ipa3_rt_tbl *tbl; + struct ipa3_hdr_entry *hdr; + struct ipa3_hdr_proc_ctx_entry *proc_ctx; + u32 hw_len; + int id; + u16 prio; + u16 rule_id; +}; + +/** + * struct ipa3_rt_tbl_set - collection of routing tables + * @head_rt_tbl_list: collection of routing tables + * @tbl_cnt: number of routing tables + * @rule_ids: idr structure that holds the rule_id for each rule + */ +struct ipa3_rt_tbl_set { + struct list_head head_rt_tbl_list; + u32 tbl_cnt; + struct idr rule_ids; +}; + +/** + * struct ipa3_wlan_stats - Wlan stats for each wlan endpoint + * @rx_pkts_rcvd: Packets sent by wlan driver + * @rx_pkts_status_rcvd: Status packets received from ipa hw + * @rx_hd_processed: Data Descriptors processed by IPA Driver + * @rx_hd_reply: Data Descriptors recycled by wlan driver + * @rx_hd_rcvd: Data Descriptors sent by wlan driver + * @rx_pkt_leak: Packet count that are not recycled + * @rx_dp_fail: Packets failed to transfer to IPA HW + * @tx_pkts_rcvd: SKB Buffers received from ipa hw + * @tx_pkts_sent: SKB Buffers sent to wlan driver + * @tx_pkts_dropped: Dropped packets count + */ +struct ipa3_wlan_stats { + u32 rx_pkts_rcvd; + u32 rx_pkts_status_rcvd; + u32 rx_hd_processed; + u32 rx_hd_reply; + u32 rx_hd_rcvd; + u32 rx_pkt_leak; + u32 rx_dp_fail; + u32 tx_pkts_rcvd; + u32 tx_pkts_sent; + u32 tx_pkts_dropped; +}; + +/** + * struct ipa3_wlan_comm_memb - Wlan comm members + * @wlan_spinlock: protects wlan comm buff list and its size + * @ipa_tx_mul_spinlock: protects tx dp mul transfer + * @wlan_comm_total_cnt: wlan common skb buffers allocated count + * @wlan_comm_free_cnt: wlan common skb buffer free count + * @total_tx_pkts_freed: Recycled Buffer count + * @wlan_comm_desc_list: wlan common skb buffer list + */ +struct ipa3_wlan_comm_memb { + spinlock_t wlan_spinlock; + spinlock_t ipa_tx_mul_spinlock; + u32 wlan_comm_total_cnt; + u32 wlan_comm_free_cnt; + u32 total_tx_pkts_freed; + struct list_head wlan_comm_desc_list; + atomic_t active_clnt_cnt; +}; + +struct ipa_gsi_ep_mem_info { + u16 evt_ring_len; + u64 evt_ring_base_addr; + void *evt_ring_base_vaddr; + u16 chan_ring_len; + u64 chan_ring_base_addr; + void *chan_ring_base_vaddr; +}; + +struct ipa3_status_stats { + struct ipahal_pkt_status status[IPA_MAX_STATUS_STAT_NUM]; + unsigned int curr; +}; + +/** + * struct ipa3_ep_context - IPA end point context + * @valid: flag indicating id EP context is valid + * @client: EP client type + * @gsi_chan_hdl: EP's GSI channel handle + * @gsi_evt_ring_hdl: EP's GSI channel event ring handle + * @gsi_mem_info: EP's GSI channel rings info + * @chan_scratch: EP's GSI channel scratch info + * @cfg: EP cionfiguration + * @dst_pipe_index: destination pipe index + * @rt_tbl_idx: routing table index + * @priv: user provided information which will forwarded once the user is + * notified for new data avail + * @client_notify: user provided CB for EP events notification, the event is + * data revived. + * @skip_ep_cfg: boolean field that determines if EP should be configured + * by IPA driver + * @keep_ipa_awake: when true, IPA will not be clock gated + * @disconnect_in_progress: Indicates client disconnect in progress. + * @qmi_request_sent: Indicates whether QMI request to enable clear data path + * request is sent or not. + * @napi_enabled: when true, IPA call client callback to start polling + */ +struct ipa3_ep_context { + int valid; + enum ipa_client_type client; + unsigned long gsi_chan_hdl; + unsigned long gsi_evt_ring_hdl; + struct ipa_gsi_ep_mem_info gsi_mem_info; + union __packed gsi_channel_scratch chan_scratch; + bool bytes_xfered_valid; + u16 bytes_xfered; + dma_addr_t phys_base; + struct ipa_ep_cfg cfg; + struct ipa_ep_cfg_holb holb; + struct ipahal_reg_ep_cfg_status status; + u32 dst_pipe_index; + u32 rt_tbl_idx; + void *priv; + void (*client_notify)(void *priv, enum ipa_dp_evt_type evt, + unsigned long data); + atomic_t avail_fifo_desc; + u32 dflt_flt4_rule_hdl; + u32 dflt_flt6_rule_hdl; + bool skip_ep_cfg; + bool keep_ipa_awake; + struct ipa3_wlan_stats wstats; + u32 uc_offload_state; + bool disconnect_in_progress; + u32 qmi_request_sent; + bool napi_enabled; + u32 eot_in_poll_err; + + /* sys MUST be the last element of this struct */ + struct ipa3_sys_context *sys; +}; + +/** + * ipa_usb_xdci_chan_params - xDCI channel related properties + * + * @ipa_ep_cfg: IPA EP configuration + * @client: type of "client" + * @priv: callback cookie + * @notify: callback + * priv - callback cookie evt - type of event data - data relevant + * to event. May not be valid. See event_type enum for valid + * cases. + * @skip_ep_cfg: boolean field that determines if EP should be + * configured by IPA driver + * @keep_ipa_awake: when true, IPA will not be clock gated + * @evt_ring_params: parameters for the channel's event ring + * @evt_scratch: parameters for the channel's event ring scratch + * @chan_params: parameters for the channel + * @chan_scratch: parameters for the channel's scratch + * + */ +struct ipa_request_gsi_channel_params { + struct ipa_ep_cfg ipa_ep_cfg; + enum ipa_client_type client; + void *priv; + ipa_notify_cb notify; + bool skip_ep_cfg; + bool keep_ipa_awake; + struct gsi_evt_ring_props evt_ring_params; + union __packed gsi_evt_scratch evt_scratch; + struct gsi_chan_props chan_params; + union __packed gsi_channel_scratch chan_scratch; +}; + +enum ipa3_sys_pipe_policy { + IPA_POLICY_INTR_MODE, + IPA_POLICY_NOINTR_MODE, + IPA_POLICY_INTR_POLL_MODE, +}; + +struct ipa3_repl_ctx { + struct ipa3_rx_pkt_wrapper **cache; + atomic_t head_idx; + atomic_t tail_idx; + u32 capacity; +}; + +/** + * struct ipa3_sys_context - IPA GPI pipes context + * @head_desc_list: header descriptors list + * @len: the size of the above list + * @spinlock: protects the list and its size + * @ep: IPA EP context + * + * IPA context specific to the GPI pipes a.k.a LAN IN/OUT and WAN + */ +struct ipa3_sys_context { + u32 len; + u32 len_pending_xfer; + atomic_t curr_polling_state; + struct delayed_work switch_to_intr_work; + enum ipa3_sys_pipe_policy policy; + bool use_comm_evt_ring; + int (*pyld_hdlr)(struct sk_buff *skb, struct ipa3_sys_context *sys); + struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags); + void (*free_skb)(struct sk_buff *skb); + void (*free_rx_wrapper)(struct ipa3_rx_pkt_wrapper *rk_pkt); + u32 rx_buff_sz; + u32 rx_pool_sz; + struct sk_buff *prev_skb; + unsigned int len_rem; + unsigned int len_pad; + unsigned int len_partial; + bool drop_packet; + struct work_struct work; + struct delayed_work replenish_rx_work; + struct work_struct repl_work; + void (*repl_hdlr)(struct ipa3_sys_context *sys); + struct ipa3_repl_ctx repl; + + /* ordering is important - mutable fields go above */ + struct ipa3_ep_context *ep; + struct list_head head_desc_list; + struct list_head rcycl_list; + spinlock_t spinlock; + struct hrtimer db_timer; + struct workqueue_struct *wq; + struct workqueue_struct *repl_wq; + struct ipa3_status_stats *status_stat; + /* ordering is important - other immutable fields go below */ +}; + +/** + * enum ipa3_desc_type - IPA decriptors type + * + * IPA decriptors type, IPA supports DD and ICD but no CD + */ +enum ipa3_desc_type { + IPA_DATA_DESC, + IPA_DATA_DESC_SKB, + IPA_DATA_DESC_SKB_PAGED, + IPA_IMM_CMD_DESC, +}; + +/** + * struct ipa3_tx_pkt_wrapper - IPA Tx packet wrapper + * @type: specify if this packet is for the skb or immediate command + * @mem: memory buffer used by this Tx packet + * @work: work struct for current Tx packet + * @link: linked to the wrappers on that pipe + * @callback: IPA client provided callback + * @user1: cookie1 for above callback + * @user2: cookie2 for above callback + * @sys: corresponding IPA sys context + * @cnt: 1 for single transfers, + * >1 and <0xFFFF for first of a "multiple" transfer, + * 0xFFFF for last desc, 0 for rest of "multiple' transfer + * @bounce: va of bounce buffer + * @unmap_dma: in case this is true, the buffer will not be dma unmapped + * + * This struct can wrap both data packet and immediate command packet. + */ +struct ipa3_tx_pkt_wrapper { + enum ipa3_desc_type type; + struct ipa_mem_buffer mem; + struct work_struct work; + struct list_head link; + void (*callback)(void *user1, int user2); + void *user1; + int user2; + struct ipa3_sys_context *sys; + u32 cnt; + void *bounce; + bool no_unmap_dma; +}; + +/** + * struct ipa3_dma_xfer_wrapper - IPADMA transfer descr wrapper + * @phys_addr_src: physical address of the source data to copy + * @phys_addr_dest: physical address to store the copied data + * @len: len in bytes to copy + * @link: linked to the wrappers list on the proper(sync/async) cons pipe + * @xfer_done: completion object for sync_memcpy completion + * @callback: IPADMA client provided completion callback + * @user1: cookie1 for above callback + * + * This struct can wrap both sync and async memcpy transfers descriptors. + */ +struct ipa3_dma_xfer_wrapper { + u64 phys_addr_src; + u64 phys_addr_dest; + u16 len; + struct list_head link; + struct completion xfer_done; + void (*callback)(void *user1); + void *user1; +}; + +/** + * struct ipa3_desc - IPA descriptor + * @type: skb or immediate command or plain old data + * @pyld: points to skb + * @frag: points to paged fragment + * or kmalloc'ed immediate command parameters/plain old data + * @dma_address: dma mapped address of pyld + * @dma_address_valid: valid field for dma_address + * @is_tag_status: flag for IP_PACKET_TAG_STATUS imd cmd + * @len: length of the pyld + * @opcode: for immediate commands + * @callback: IPA client provided completion callback + * @user1: cookie1 for above callback + * @user2: cookie2 for above callback + * @xfer_done: completion object for sync completion + * @skip_db_ring: specifies whether GSI doorbell should not be rang + */ +struct ipa3_desc { + enum ipa3_desc_type type; + void *pyld; + skb_frag_t *frag; + dma_addr_t dma_address; + bool dma_address_valid; + bool is_tag_status; + u16 len; + u16 opcode; + void (*callback)(void *user1, int user2); + void *user1; + int user2; + struct completion xfer_done; + bool skip_db_ring; +}; + +/** + * struct ipa3_rx_pkt_wrapper - IPA Rx packet wrapper + * @skb: skb + * @dma_address: DMA address of this Rx packet + * @link: linked to the Rx packets on that pipe + * @len: how many bytes are copied into skb's flat buffer + */ +struct ipa3_rx_pkt_wrapper { + struct list_head link; + struct ipa_rx_data data; + u32 len; + struct work_struct work; + struct ipa3_sys_context *sys; +}; + +/** + * struct ipa_pdn_entry - IPA PDN config table entry + * @public_ip: the PDN's public ip + * @src_metadata: the PDN's metadata to be replaced for source NAT + * @dst_metadata: the PDN's metadata to be replaced for destination NAT + * @resrvd: reserved field + */ +struct ipa_pdn_entry { + u32 public_ip; + u32 src_metadata; + u32 dst_metadata; + u32 resrvd; +}; +/** + * struct ipa3_nat_mem - IPA NAT memory description + * @class: pointer to the struct class + * @dev: the dev_t of the device + * @cdev: cdev of the device + * @dev_num: device number + * @vaddr: virtual address + * @dma_handle: DMA handle + * @size: NAT memory size + * @is_mapped: flag indicating if NAT memory is mapped + * @is_sys_mem: flag indicating if NAT memory is sys memory + * @is_dev_init: flag indicating if NAT device is initialized + * @lock: NAT memory mutex + * @nat_base_address: nat table virutal address + * @ipv4_rules_addr: base nat table address + * @ipv4_expansion_rules_addr: expansion table address + * @index_table_addr: index table address + * @index_table_expansion_addr: index expansion table address + * @size_base_tables: base table size + * @size_expansion_tables: expansion table size + * @public_ip_addr: ip address of nat table + * @pdn_mem: pdn config table SW cache memory structure + */ +struct ipa3_nat_mem { + struct class *class; + struct device *dev; + struct cdev cdev; + dev_t dev_num; + void *vaddr; + dma_addr_t dma_handle; + size_t size; + bool is_mapped; + bool is_sys_mem; + bool is_dev_init; + bool is_dev; + struct mutex lock; + void *nat_base_address; + char *ipv4_rules_addr; + char *ipv4_expansion_rules_addr; + char *index_table_addr; + char *index_table_expansion_addr; + u32 size_base_tables; + u32 size_expansion_tables; + u32 public_ip_addr; + void *tmp_vaddr; + dma_addr_t tmp_dma_handle; + bool is_tmp_mem; + struct ipa_mem_buffer pdn_mem; +}; + +/** + * enum ipa3_hw_mode - IPA hardware mode + * @IPA_HW_Normal: Regular IPA hardware + * @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation + * @IPA_HW_PCIE: IPA hardware supporting memory allocation over PCIE Bridge + */ +enum ipa3_hw_mode { + IPA_HW_MODE_NORMAL = 0, + IPA_HW_MODE_VIRTUAL = 1, + IPA_HW_MODE_PCIE = 2 +}; + +enum ipa3_config_this_ep { + IPA_CONFIGURE_THIS_EP, + IPA_DO_NOT_CONFIGURE_THIS_EP, +}; + +struct ipa3_stats { + u32 tx_sw_pkts; + u32 tx_hw_pkts; + u32 rx_pkts; + u32 rx_excp_pkts[IPAHAL_PKT_STATUS_EXCEPTION_MAX]; + u32 rx_repl_repost; + u32 tx_pkts_compl; + u32 rx_q_len; + u32 msg_w[IPA_EVENT_MAX_NUM]; + u32 msg_r[IPA_EVENT_MAX_NUM]; + u32 stat_compl; + u32 aggr_close; + u32 wan_aggr_close; + u32 wan_rx_empty; + u32 wan_repl_rx_empty; + u32 lan_rx_empty; + u32 lan_repl_rx_empty; + u32 flow_enable; + u32 flow_disable; + u32 tx_non_linear; +}; + +struct ipa3_active_clients { + struct mutex mutex; + atomic_t cnt; +}; + +struct ipa3_wakelock_ref_cnt { + spinlock_t spinlock; + int cnt; +}; + +struct ipa3_tag_completion { + struct completion comp; + atomic_t cnt; +}; + +struct ipa3_controller; + +/** + * struct ipa3_uc_hdlrs - IPA uC callback functions + * @ipa_uc_loaded_hdlr: Function handler when uC is loaded + * @ipa_uc_event_hdlr: Event handler function + * @ipa3_uc_response_hdlr: Response handler function + * @ipa_uc_event_log_info_hdlr: Log event handler function + */ +struct ipa3_uc_hdlrs { + void (*ipa_uc_loaded_hdlr)(void); + + void (*ipa_uc_event_hdlr) + (struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio); + + int (*ipa3_uc_response_hdlr) + (struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio, + u32 *uc_status); + + void (*ipa_uc_event_log_info_hdlr) + (struct IpaHwEventLogInfoData_t *uc_event_top_mmio); +}; + +/** + * enum ipa3_hw_flags - flags which defines the behavior of HW + * + * @IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE: Halt system in case of assert + * failure. + * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR: Channel error would be reported + * in the event ring only. No event to CPU. + * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP: No need to report event + * IPA_HW_2_CPU_EVENT_MHI_WAKE_UP_REQUEST + * @IPA_HW_FLAG_WORK_OVER_DDR: Perform all transaction to external addresses by + * QMB (avoid memcpy) + * @IPA_HW_FLAG_NO_REPORT_OOB: If set do not report that the device is OOB in + * IN Channel + * @IPA_HW_FLAG_NO_REPORT_DB_MODE: If set, do not report that the device is + * entering a mode where it expects a doorbell to be rung for OUT Channel + * @IPA_HW_FLAG_NO_START_OOB_TIMER + */ +enum ipa3_hw_flags { + IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE = 0x01, + IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR = 0x02, + IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP = 0x04, + IPA_HW_FLAG_WORK_OVER_DDR = 0x08, + IPA_HW_FLAG_NO_REPORT_OOB = 0x10, + IPA_HW_FLAG_NO_REPORT_DB_MODE = 0x20, + IPA_HW_FLAG_NO_START_OOB_TIMER = 0x40 +}; + +/** + * struct ipa3_uc_ctx - IPA uC context + * @uc_inited: Indicates if uC interface has been initialized + * @uc_loaded: Indicates if uC has loaded + * @uc_failed: Indicates if uC has failed / returned an error + * @uc_lock: uC interface lock to allow only one uC interaction at a time + * @uc_spinlock: same as uc_lock but for irq contexts + * @uc_completation: Completion mechanism to wait for uC commands + * @uc_sram_mmio: Pointer to uC mapped memory + * @pending_cmd: The last command sent waiting to be ACKed + * @uc_status: The last status provided by the uC + * @uc_error_type: error type from uC error event + * @uc_error_timestamp: tag timer sampled after uC crashed + */ +struct ipa3_uc_ctx { + bool uc_inited; + bool uc_loaded; + bool uc_failed; + struct mutex uc_lock; + spinlock_t uc_spinlock; + struct completion uc_completion; + struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio; + struct IpaHwEventLogInfoData_t *uc_event_top_mmio; + u32 uc_event_top_ofst; + u32 pending_cmd; + u32 uc_status; + u32 uc_error_type; + u32 uc_error_timestamp; + phys_addr_t rdy_ring_base_pa; + phys_addr_t rdy_ring_rp_pa; + u32 rdy_ring_size; + phys_addr_t rdy_comp_ring_base_pa; + phys_addr_t rdy_comp_ring_wp_pa; + u32 rdy_comp_ring_size; + u32 *rdy_ring_rp_va; + u32 *rdy_comp_ring_wp_va; +}; + +/** + * struct ipa3_uc_wdi_ctx + * @wdi_uc_top_ofst: + * @wdi_uc_top_mmio: + * @wdi_uc_stats_ofst: + * @wdi_uc_stats_mmio: + */ +struct ipa3_uc_wdi_ctx { + /* WDI specific fields */ + u32 wdi_uc_stats_ofst; + struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio; + void *priv; + ipa_uc_ready_cb uc_ready_cb; + /* for AP+STA stats update */ +#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN + ipa_wdi_meter_notifier_cb stats_notify; +#endif +}; + +/** + * struct ipa3_transport_pm - transport power management related members + * @transport_pm_mutex: Mutex to protect the transport_pm functionality. + */ +struct ipa3_transport_pm { + atomic_t dec_clients; + atomic_t eot_activity; + struct mutex transport_pm_mutex; +}; + +/** + * struct ipa3cm_client_info - the client-info indicated from IPACM + * @ipacm_client_enum: the enum to indicate tether-client + * @ipacm_client_uplink: the bool to indicate pipe for uplink + */ +struct ipa3cm_client_info { + enum ipacm_client_enum client_enum; + bool uplink; +}; + +struct ipa3_smp2p_info { + u32 out_base_id; + u32 in_base_id; + bool ipa_clk_on; + bool res_sent; +}; + +/** + * struct ipa3_ready_cb_info - A list of all the registrations + * for an indication of IPA driver readiness + * + * @link: linked list link + * @ready_cb: callback + * @user_data: User data + * + */ +struct ipa3_ready_cb_info { + struct list_head link; + ipa_ready_cb ready_cb; + void *user_data; +}; + +struct ipa_tz_unlock_reg_info { + u64 reg_addr; + u32 size; +}; + +struct ipa_dma_task_info { + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_pyld *cmd_pyld; +}; + +struct ipa_quota_stats { + u64 num_ipv4_bytes; + u64 num_ipv6_bytes; + u32 num_ipv4_pkts; + u32 num_ipv6_pkts; +}; + +struct ipa_quota_stats_all { + struct ipa_quota_stats client[IPA_CLIENT_MAX]; +}; + +struct ipa_drop_stats { + u32 drop_packet_cnt; + u32 drop_byte_cnt; +}; + +struct ipa_drop_stats_all { + struct ipa_drop_stats client[IPA_CLIENT_MAX]; +}; + +struct ipa_hw_stats_quota { + struct ipahal_stats_init_quota init; + struct ipa_quota_stats_all stats; +}; + +struct ipa_hw_stats_teth { + struct ipahal_stats_init_tethering init; + struct ipa_quota_stats_all prod_stats[IPA_CLIENT_MAX]; +}; + +struct ipa_hw_stats_flt_rt { + struct ipahal_stats_init_flt_rt flt_v4_init; + struct ipahal_stats_init_flt_rt flt_v6_init; + struct ipahal_stats_init_flt_rt rt_v4_init; + struct ipahal_stats_init_flt_rt rt_v6_init; +}; + +struct ipa_hw_stats_drop { + struct ipahal_stats_init_drop init; + struct ipa_drop_stats_all stats; +}; + +struct ipa_hw_stats { + bool enabled; + struct ipa_hw_stats_quota quota; + struct ipa_hw_stats_teth teth; + struct ipa_hw_stats_flt_rt flt_rt; + struct ipa_hw_stats_drop drop; +}; + +/** + * struct ipa3_context - IPA context + * @class: pointer to the struct class + * @dev_num: device number + * @dev: the dev_t of the device + * @cdev: cdev of the device + * @ep: list of all end points + * @skip_ep_cfg_shadow: state to update filter table correctly across + power-save + * @ep_flt_bitmap: End-points supporting filtering bitmap + * @ep_flt_num: End-points supporting filtering number + * @resume_on_connect: resume ep on ipa connect + * @flt_tbl: list of all IPA filter tables + * @flt_rule_ids: idr structure that holds the rule_id for each rule + * @mode: IPA operating mode + * @mmio: iomem + * @ipa_wrapper_base: IPA wrapper base address + * @hdr_tbl: IPA header table + * @hdr_proc_ctx_tbl: IPA processing context table + * @rt_tbl_set: list of routing tables each of which is a list of rules + * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped + * @flt_rule_cache: filter rule cache + * @rt_rule_cache: routing rule cache + * @hdr_cache: header cache + * @hdr_offset_cache: header offset cache + * @hdr_proc_ctx_cache: processing context cache + * @hdr_proc_ctx_offset_cache: processing context offset cache + * @rt_tbl_cache: routing table cache + * @tx_pkt_wrapper_cache: Tx packets cache + * @rx_pkt_wrapper_cache: Rx packets cache + * @rt_idx_bitmap: routing table index bitmap + * @lock: this does NOT protect the linked lists within ipa3_sys_context + * @smem_sz: shared memory size available for SW use starting + * from non-restricted bytes + * @smem_restricted_bytes: the bytes that SW should not use in the shared mem + * @nat_mem: NAT memory + * @excp_hdr_hdl: exception header handle + * @dflt_v4_rt_rule_hdl: default v4 routing rule handle + * @dflt_v6_rt_rule_hdl: default v6 routing rule handle + * @aggregation_type: aggregation type used on USB client endpoint + * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint + * @aggregation_time_limit: aggregation time limit used on USB client endpoint + * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system + * @hdr_proc_ctx_tbl_lcl: where proc_ctx tbl resides true-local, false-system + * @hdr_mem: header memory + * @hdr_proc_ctx_mem: processing context memory + * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system + * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system + * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system + * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system + * @power_mgmt_wq: workqueue for power management + * @transport_power_mgmt_wq: workqueue transport related power management + * @tag_process_before_gating: indicates whether to start tag process before + * gating IPA clocks + * @transport_pm: transport power management related information + * @disconnect_lock: protects LAN_CONS packet receive notification CB + * @ipa3_active_clients: structure for reference counting connected IPA clients + * @ipa_hw_type: type of IPA HW type (e.g. IPA 1.0, IPA 1.1 etc') + * @ipa3_hw_mode: mode of IPA HW mode (e.g. Normal, Virtual or over PCIe) + * @use_ipa_teth_bridge: use tethering bridge driver + * @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules + * @logbuf: ipc log buffer for high priority messages + * @logbuf_low: ipc log buffer for low priority messages + * @ipa_wdi2: using wdi-2.0 + * @use_64_bit_dma_mask: using 64bits dma mask + * @ipa_bus_hdl: msm driver handle for the data path bus + * @ctrl: holds the core specific operations based on + * core version (vtable like) + * @pkt_init_imm_opcode: opcode for IP_PACKET_INIT imm cmd + * @enable_clock_scaling: clock scaling is enabled ? + * @curr_ipa_clk_rate: IPA current clock rate + * @wcstats: wlan common buffer stats + * @uc_ctx: uC interface context + * @uc_wdi_ctx: WDI specific fields for uC interface + * @ipa_num_pipes: The number of pipes used by IPA HW + * @skip_uc_pipe_reset: Indicates whether pipe reset via uC needs to be avoided + * @ipa_client_apps_wan_cons_agg_gro: RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA + * @apply_rg10_wa: Indicates whether to use register group 10 workaround + * @gsi_ch20_wa: Indicates whether to apply GSI physical channel 20 workaround + * @w_lock: Indicates the wakeup source. + * @wakelock_ref_cnt: Indicates the number of times wakelock is acquired + * @ipa_initialization_complete: Indicates that IPA is fully initialized + * @ipa_ready_cb_list: A list of all the clients who require a CB when IPA + * driver is ready/initialized. + * @init_completion_obj: Completion object to be used in case IPA driver hasn't + * finished initializing. Example of use - IOCTLs to /dev/ipa + * IPA context - holds all relevant info about IPA driver and its state + */ +struct ipa3_context { + struct class *class; + dev_t dev_num; + struct device *dev; + struct cdev cdev; + struct ipa3_ep_context ep[IPA3_MAX_NUM_PIPES]; + bool skip_ep_cfg_shadow[IPA3_MAX_NUM_PIPES]; + u32 ep_flt_bitmap; + u32 ep_flt_num; + bool resume_on_connect[IPA_CLIENT_MAX]; + struct ipa3_flt_tbl flt_tbl[IPA3_MAX_NUM_PIPES][IPA_IP_MAX]; + struct idr flt_rule_ids[IPA_IP_MAX]; + void __iomem *mmio; + u32 ipa_wrapper_base; + u32 ipa_wrapper_size; + struct ipa3_hdr_tbl hdr_tbl; + struct ipa3_hdr_proc_ctx_tbl hdr_proc_ctx_tbl; + struct ipa3_rt_tbl_set rt_tbl_set[IPA_IP_MAX]; + struct ipa3_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX]; + struct kmem_cache *flt_rule_cache; + struct kmem_cache *rt_rule_cache; + struct kmem_cache *hdr_cache; + struct kmem_cache *hdr_offset_cache; + struct kmem_cache *hdr_proc_ctx_cache; + struct kmem_cache *hdr_proc_ctx_offset_cache; + struct kmem_cache *rt_tbl_cache; + struct kmem_cache *tx_pkt_wrapper_cache; + struct kmem_cache *rx_pkt_wrapper_cache; + unsigned long rt_idx_bitmap[IPA_IP_MAX]; + struct mutex lock; + u16 smem_sz; + u16 smem_restricted_bytes; + u16 smem_reqd_sz; + struct ipa3_nat_mem nat_mem; + u32 excp_hdr_hdl; + u32 dflt_v4_rt_rule_hdl; + u32 dflt_v6_rt_rule_hdl; + uint aggregation_type; + uint aggregation_byte_limit; + uint aggregation_time_limit; + bool hdr_tbl_lcl; + bool hdr_proc_ctx_tbl_lcl; + struct ipa_mem_buffer hdr_mem; + struct ipa_mem_buffer hdr_proc_ctx_mem; + bool ip4_rt_tbl_hash_lcl; + bool ip4_rt_tbl_nhash_lcl; + bool ip6_rt_tbl_hash_lcl; + bool ip6_rt_tbl_nhash_lcl; + bool ip4_flt_tbl_hash_lcl; + bool ip4_flt_tbl_nhash_lcl; + bool ip6_flt_tbl_hash_lcl; + bool ip6_flt_tbl_nhash_lcl; + struct ipa3_active_clients ipa3_active_clients; + struct ipa3_active_clients_log_ctx ipa3_active_clients_logging; + struct workqueue_struct *power_mgmt_wq; + struct workqueue_struct *transport_power_mgmt_wq; + bool tag_process_before_gating; + struct ipa3_transport_pm transport_pm; + unsigned long gsi_evt_comm_hdl; + u32 gsi_evt_comm_ring_rem; + u32 clnt_hdl_cmd; + u32 clnt_hdl_data_in; + u32 clnt_hdl_data_out; + spinlock_t disconnect_lock; + u8 a5_pipe_index; + struct list_head intf_list; + struct list_head msg_list; + struct list_head pull_msg_list; + struct mutex msg_lock; + wait_queue_head_t msg_waitq; + enum ipa_hw_type ipa_hw_type; + enum ipa3_hw_mode ipa3_hw_mode; + bool ipa_config_is_mhi; + bool use_ipa_teth_bridge; + bool modem_cfg_emb_pipe_flt; + bool ipa_wdi2; + bool use_64_bit_dma_mask; + /* featurize if memory footprint becomes a concern */ + struct ipa3_stats stats; + void *smem_pipe_mem; + void *logbuf; + void *logbuf_low; + u32 ipa_bus_hdl; + struct ipa3_controller *ctrl; + struct idr ipa_idr; + struct device *pdev; + struct device *uc_pdev; + spinlock_t idr_lock; + u32 enable_clock_scaling; + u32 curr_ipa_clk_rate; + bool q6_proxy_clk_vote_valid; + u32 ipa_num_pipes; + dma_addr_t pkt_init_imm[IPA3_MAX_NUM_PIPES]; + u32 pkt_init_imm_opcode; + + struct ipa3_wlan_comm_memb wc_memb; + + struct ipa3_uc_ctx uc_ctx; + + struct ipa3_uc_wdi_ctx uc_wdi_ctx; + struct ipa3_uc_ntn_ctx uc_ntn_ctx; + u32 wan_rx_ring_size; + u32 lan_rx_ring_size; + bool skip_uc_pipe_reset; + unsigned long gsi_dev_hdl; + u32 ee; + bool apply_rg10_wa; + bool gsi_ch20_wa; + bool smmu_present; + bool smmu_s1_bypass; + u32 wdi_map_cnt; + struct wakeup_source w_lock; + struct ipa3_wakelock_ref_cnt wakelock_ref_cnt; + /* RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA */ + bool ipa_client_apps_wan_cons_agg_gro; + /* M-release support to know client pipes */ + struct ipa3cm_client_info ipacm_client[IPA3_MAX_NUM_PIPES]; + bool tethered_flow_control; + bool ipa_initialization_complete; + struct list_head ipa_ready_cb_list; + struct completion init_completion_obj; + struct completion uc_loaded_completion_obj; + struct ipa3_smp2p_info smp2p_info; + u32 ipa_tz_unlock_reg_num; + struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg; + struct ipa_dma_task_info dma_task_info; + struct ipa_hw_stats hw_stats; +}; + +struct ipa3_plat_drv_res { + bool use_ipa_teth_bridge; + u32 ipa_mem_base; + u32 ipa_mem_size; + u32 transport_mem_base; + u32 transport_mem_size; + u32 ipa_irq; + u32 transport_irq; + u32 ipa_pipe_mem_start_ofst; + u32 ipa_pipe_mem_size; + enum ipa_hw_type ipa_hw_type; + enum ipa3_hw_mode ipa3_hw_mode; + u32 ee; + bool modem_cfg_emb_pipe_flt; + bool ipa_wdi2; + bool use_64_bit_dma_mask; + bool use_bw_vote; + u32 wan_rx_ring_size; + u32 lan_rx_ring_size; + bool skip_uc_pipe_reset; + bool apply_rg10_wa; + bool gsi_ch20_wa; + bool tethered_flow_control; + u32 ipa_tz_unlock_reg_num; + struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg; +}; + +/** + * struct ipa3_mem_partition - represents IPA RAM Map as read from DTS + * Order and type of members should not be changed without a suitable change + * to DTS file or the code that reads it. + * + * IPA SRAM memory layout: + * +-------------------------+ + * | UC MEM | + * +-------------------------+ + * | UC INFO | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V4 FLT HDR HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V4 FLT HDR NON-HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V6 FLT HDR HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V6 FLT HDR NON-HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V4 RT HDR HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V4 RT HDR NON-HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V6 RT HDR HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V6 RT HDR NON-HASHABLE | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | MODEM HDR | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | MODEM PROC CTX | + * +-------------------------+ + * | APPS PROC CTX | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | PDN CONFIG | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | QUOTA STATS | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | TETH STATS | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V4 FLT STATS | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V6 FLT STATS | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V4 RT STATS | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | V6 RT STATS | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | DROP STATS | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | MODEM MEM | + * +-------------------------+ + * | CANARY | + * +-------------------------+ + * | UC EVENT RING | From IPA 3.5 + * +-------------------------+ + */ +struct ipa3_mem_partition { + u32 ofst_start; + u32 nat_ofst; + u32 nat_size; + u32 v4_flt_hash_ofst; + u32 v4_flt_hash_size; + u32 v4_flt_hash_size_ddr; + u32 v4_flt_nhash_ofst; + u32 v4_flt_nhash_size; + u32 v4_flt_nhash_size_ddr; + u32 v6_flt_hash_ofst; + u32 v6_flt_hash_size; + u32 v6_flt_hash_size_ddr; + u32 v6_flt_nhash_ofst; + u32 v6_flt_nhash_size; + u32 v6_flt_nhash_size_ddr; + u32 v4_rt_num_index; + u32 v4_modem_rt_index_lo; + u32 v4_modem_rt_index_hi; + u32 v4_apps_rt_index_lo; + u32 v4_apps_rt_index_hi; + u32 v4_rt_hash_ofst; + u32 v4_rt_hash_size; + u32 v4_rt_hash_size_ddr; + u32 v4_rt_nhash_ofst; + u32 v4_rt_nhash_size; + u32 v4_rt_nhash_size_ddr; + u32 v6_rt_num_index; + u32 v6_modem_rt_index_lo; + u32 v6_modem_rt_index_hi; + u32 v6_apps_rt_index_lo; + u32 v6_apps_rt_index_hi; + u32 v6_rt_hash_ofst; + u32 v6_rt_hash_size; + u32 v6_rt_hash_size_ddr; + u32 v6_rt_nhash_ofst; + u32 v6_rt_nhash_size; + u32 v6_rt_nhash_size_ddr; + u32 modem_hdr_ofst; + u32 modem_hdr_size; + u32 apps_hdr_ofst; + u32 apps_hdr_size; + u32 apps_hdr_size_ddr; + u32 modem_hdr_proc_ctx_ofst; + u32 modem_hdr_proc_ctx_size; + u32 apps_hdr_proc_ctx_ofst; + u32 apps_hdr_proc_ctx_size; + u32 apps_hdr_proc_ctx_size_ddr; + u32 modem_comp_decomp_ofst; + u32 modem_comp_decomp_size; + u32 modem_ofst; + u32 modem_size; + u32 apps_v4_flt_hash_ofst; + u32 apps_v4_flt_hash_size; + u32 apps_v4_flt_nhash_ofst; + u32 apps_v4_flt_nhash_size; + u32 apps_v6_flt_hash_ofst; + u32 apps_v6_flt_hash_size; + u32 apps_v6_flt_nhash_ofst; + u32 apps_v6_flt_nhash_size; + u32 uc_info_ofst; + u32 uc_info_size; + u32 end_ofst; + u32 apps_v4_rt_hash_ofst; + u32 apps_v4_rt_hash_size; + u32 apps_v4_rt_nhash_ofst; + u32 apps_v4_rt_nhash_size; + u32 apps_v6_rt_hash_ofst; + u32 apps_v6_rt_hash_size; + u32 apps_v6_rt_nhash_ofst; + u32 apps_v6_rt_nhash_size; + u32 uc_event_ring_ofst; + u32 uc_event_ring_size; + u32 pdn_config_ofst; + u32 pdn_config_size; + u32 stats_quota_ofst; + u32 stats_quota_size; + u32 stats_tethering_ofst; + u32 stats_tethering_size; + u32 stats_flt_v4_ofst; + u32 stats_flt_v4_size; + u32 stats_flt_v6_ofst; + u32 stats_flt_v6_size; + u32 stats_rt_v4_ofst; + u32 stats_rt_v4_size; + u32 stats_rt_v6_ofst; + u32 stats_rt_v6_size; + u32 stats_drop_ofst; + u32 stats_drop_size; +}; + +struct ipa3_controller { + struct ipa3_mem_partition mem_partition; + u32 ipa_clk_rate_turbo; + u32 ipa_clk_rate_nominal; + u32 ipa_clk_rate_svs; + u32 clock_scaling_bw_threshold_turbo; + u32 clock_scaling_bw_threshold_nominal; + u32 ipa_reg_base_ofst; + u32 max_holb_tmr_val; + void (*ipa_sram_read_settings)(void); + int (*ipa_init_sram)(void); + int (*ipa_init_hdr)(void); + int (*ipa_init_rt4)(void); + int (*ipa_init_rt6)(void); + int (*ipa_init_flt4)(void); + int (*ipa_init_flt6)(void); + int (*ipa3_read_ep_reg)(char *buff, int max_len, int pipe); + int (*ipa3_commit_flt)(enum ipa_ip_type ip); + int (*ipa3_commit_rt)(enum ipa_ip_type ip); + int (*ipa3_commit_hdr)(void); + void (*ipa3_enable_clks)(void); + void (*ipa3_disable_clks)(void); + struct msm_bus_scale_pdata *msm_bus_data_ptr; +}; + +extern struct ipa3_context *ipa3_ctx; + +/* public APIs */ +/* Generic GSI channels functions */ +int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params, + struct ipa_req_chan_out_params *out_params); + +int ipa3_release_gsi_channel(u32 clnt_hdl); + +int ipa3_start_gsi_channel(u32 clnt_hdl); + +int ipa3_stop_gsi_channel(u32 clnt_hdl); + +int ipa3_reset_gsi_channel(u32 clnt_hdl); + +int ipa3_reset_gsi_event_ring(u32 clnt_hdl); + +/* Specific xDCI channels functions */ +int ipa3_set_usb_max_packet_size( + enum ipa_usb_max_usb_packet_size usb_max_packet_size); + +int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid); + +int ipa3_xdci_connect(u32 clnt_hdl); + +int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id); + +int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + bool should_force_clear, u32 qmi_req_id, bool is_dpl); + +int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl); + +/* + * Remove ep delay + */ +int ipa3_clear_endpoint_delay(u32 clnt_hdl); + +/* + * Configuration + */ +int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg); + +int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg); + +int ipa3_cfg_ep_conn_track(u32 clnt_hdl, + const struct ipa_ep_cfg_conn_track *ep_conn_track); + +int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg); + +int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg); + +int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg); + +int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg); + +int ipa3_cfg_ep_deaggr(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ipa_ep_cfg); + +int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg); + +int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg); + +int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg); + +int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg); + +int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ipa_ep_cfg); + +int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl); + +/* + * Header removal / addition + */ +int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs); + +int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls); + +int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user); + +int ipa3_commit_hdr(void); + +int ipa3_reset_hdr(void); + +int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup); + +int ipa3_put_hdr(u32 hdr_hdl); + +int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy); + +/* + * Header Processing Context + */ +int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs); + +int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); + +int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls, + bool by_user); + +/* + * Routing + */ +int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules); + +int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules); + +int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls); + +int ipa3_commit_rt(enum ipa_ip_type ip); + +int ipa3_reset_rt(enum ipa_ip_type ip); + +int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup); + +int ipa3_put_rt_tbl(u32 rt_tbl_hdl); + +int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in); + +int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules); + +/* + * Filtering + */ +int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules); + +int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules); + +int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls); + +int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules); + +int ipa3_commit_flt(enum ipa_ip_type ip); + +int ipa3_reset_flt(enum ipa_ip_type ip); + +/* + * NAT + */ +int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem); + +int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init); + +int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma); + +int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del); + +int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn); + +/* + * Messaging + */ +int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback); +int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback); +int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta); + +/* + * Interface + */ +int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx); +int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext); +int ipa3_deregister_intf(const char *name); + +/* + * Aggregation + */ +int ipa3_set_aggr_mode(enum ipa_aggr_mode mode); + +int ipa3_set_qcncm_ndp_sig(char sig[3]); + +int ipa3_set_single_ndp_per_mbim(bool enable); + +/* + * Data path + */ +int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *metadata); + +/* + * To transfer multiple data packets + * While passing the data descriptor list, the anchor node + * should be of type struct ipa_tx_data_desc not list_head + */ +int ipa3_tx_dp_mul(enum ipa_client_type dst, + struct ipa_tx_data_desc *data_desc); + +void ipa3_free_skb(struct ipa_rx_data *data); + +/* + * System pipes + */ +int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl); + +int ipa3_teardown_sys_pipe(u32 clnt_hdl); + +int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in, + unsigned long *ipa_transport_hdl, + u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status); + +int ipa3_sys_teardown(u32 clnt_hdl); + +int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl, + unsigned long gsi_ev_hdl); + +int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out); +int ipa3_disconnect_wdi_pipe(u32 clnt_hdl); +int ipa3_enable_wdi_pipe(u32 clnt_hdl); +int ipa3_disable_wdi_pipe(u32 clnt_hdl); +int ipa3_resume_wdi_pipe(u32 clnt_hdl); +int ipa3_suspend_wdi_pipe(u32 clnt_hdl); +int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats); +u16 ipa3_get_smem_restr_bytes(void); +int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes); +int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp); +int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl); +int ipa3_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv); +void ipa3_ntn_uc_dereg_rdyCB(void); + +/* + * To retrieve doorbell physical address of + * wlan pipes + */ +int ipa3_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out); + +/* + * To register uC ready callback if uC not ready + * and also check uC readiness + * if uC not ready only, register callback + */ +int ipa3_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param); +/* + * To de-register uC ready callback + */ +int ipa3_uc_dereg_rdyCB(void); + +/* + * Tethering bridge (Rmnet / MBIM) + */ +int ipa3_teth_bridge_init(struct teth_bridge_init_params *params); + +int ipa3_teth_bridge_disconnect(enum ipa_client_type client); + +int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params); + +/* + * Tethering client info + */ +void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink); + +enum ipacm_client_enum ipa3_get_client(int pipe_idx); + +bool ipa3_get_client_uplink(int pipe_idx); + +int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats); + +int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota); +/* + * IPADMA + */ +int ipa3_dma_init(void); + +int ipa3_dma_enable(void); + +int ipa3_dma_disable(void); + +int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len); + +int ipa3_dma_async_memcpy(u64 dest, u64 src, int len, + void (*user_cb)(void *user1), void *user_param); + +int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len); + +void ipa3_dma_destroy(void); + +/* + * MHI + */ + +int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params); + +int ipa3_connect_mhi_pipe( + struct ipa_mhi_connect_params_internal *in, + u32 *clnt_hdl); + +int ipa3_disconnect_mhi_pipe(u32 clnt_hdl); + +bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client); + +int ipa3_mhi_reset_channel_internal(enum ipa_client_type client); + +int ipa3_mhi_start_channel_internal(enum ipa_client_type client); + +bool ipa3_has_open_aggr_frame(enum ipa_client_type client); + +int ipa3_mhi_resume_channels_internal(enum ipa_client_type client, + bool LPTransitionRejected, bool brstmode_enabled, + union __packed gsi_channel_scratch ch_scratch, u8 index); + +int ipa3_mhi_destroy_channel(enum ipa_client_type client); + +/* + * mux id + */ +int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in); + +/* + * interrupts + */ +int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data); + +int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt); + +/* + * Miscellaneous + */ +int ipa3_get_ep_mapping(enum ipa_client_type client); + +bool ipa3_is_ready(void); + +void ipa3_proxy_clk_vote(void); +void ipa3_proxy_clk_unvote(void); + +bool ipa3_is_client_handle_valid(u32 clnt_hdl); + +enum ipa_client_type ipa3_get_client_mapping(int pipe_idx); + +void ipa_init_ep_flt_bitmap(void); + +bool ipa_is_ep_support_flt(int pipe_idx); + +enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx); + +bool ipa3_get_modem_cfg_emb_pipe_flt(void); + +u8 ipa3_get_qmb_master_sel(enum ipa_client_type client); + +/* internal functions */ + +int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type, + struct ipa_api_controller *api_ctrl); + +bool ipa_is_modem_pipe(int pipe_idx); + +int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc, + bool in_atomic); +int ipa3_send(struct ipa3_sys_context *sys, + u32 num_desc, + struct ipa3_desc *desc, + bool in_atomic); +int ipa3_get_ep_mapping(enum ipa_client_type client); +int ipa_get_ep_group(enum ipa_client_type client); + +int ipa3_generate_hw_rule(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + u8 **buf, + u16 *en_rule); +int ipa3_init_hw(void); +struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name); +int ipa3_set_single_ndp_per_mbim(bool enable); +void ipa3_debugfs_init(void); +void ipa3_debugfs_remove(void); + +void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size); +#ifdef IPA_DEBUG +#define IPA_DUMP_BUFF(base, phy_base, size) \ + ipa3_dump_buff_internal(base, phy_base, size) +#else +#define IPA_DUMP_BUFF(base, phy_base, size) +#endif +int ipa3_init_mem_partition(struct device_node *dev_node); +int ipa3_controller_static_bind(struct ipa3_controller *controller, + enum ipa_hw_type ipa_hw_type); +int ipa3_cfg_route(struct ipahal_reg_route *route); +int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout); +int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr); +int ipa3_cfg_filter(u32 disable); +int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary); +struct ipa3_context *ipa3_get_ctx(void); +void ipa3_enable_clks(void); +void ipa3_disable_clks(void); +void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id); +int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info + *id); +void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id); +void ipa3_dec_client_disable_clks_no_block( + struct ipa_active_client_logging_info *id); +void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id, + bool int_ctx); +void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id, + bool int_ctx); +int ipa3_active_clients_log_print_buffer(char *buf, int size); +int ipa3_active_clients_log_print_table(char *buf, int size); +void ipa3_active_clients_log_clear(void); +int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev); +int __ipa3_del_rt_rule(u32 rule_hdl); +int __ipa3_del_hdr(u32 hdr_hdl, bool by_user); +int __ipa3_release_hdr(u32 hdr_hdl); +int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl); +int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe); +int _ipa_read_ep_reg_v4_0(char *buf, int max_len, int pipe); +void _ipa_enable_clks_v3_0(void); +void _ipa_disable_clks_v3_0(void); +struct device *ipa3_get_dma_dev(void); +void ipa3_suspend_active_aggr_wa(u32 clnt_hdl); +void ipa3_suspend_handler(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data); + +ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos); +int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count); +int ipa3_query_intf(struct ipa_ioc_query_intf *lookup); +int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx); +int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx); +int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext); + +void wwan_cleanup(void); + +int ipa3_teth_bridge_driver_init(void); +void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data); + +int _ipa_init_sram_v3(void); +int _ipa_init_hdr_v3_0(void); +int _ipa_init_rt4_v3(void); +int _ipa_init_rt6_v3(void); +int _ipa_init_flt4_v3(void); +int _ipa_init_flt6_v3(void); + +int __ipa_commit_flt_v3(enum ipa_ip_type ip); +int __ipa_commit_rt_v3(enum ipa_ip_type ip); + +int __ipa_commit_hdr_v3_0(void); +void ipa3_skb_recycle(struct sk_buff *skb); +void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx); +void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx); + +int ipa3_enable_data_path(u32 clnt_hdl); +int ipa3_disable_data_path(u32 clnt_hdl); +int ipa3_alloc_rule_id(struct idr *rule_ids); +int ipa3_id_alloc(void *ptr); +void *ipa3_id_find(u32 id); +void ipa3_id_remove(u32 id); +int ipa3_enable_force_clear(u32 request_id, bool throttle_source, + u32 source_pipe_bitmask); +int ipa3_disable_force_clear(u32 request_id); + +int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage, + u32 bandwidth_mbps); + +int ipa3_cfg_ep_status(u32 clnt_hdl, + const struct ipahal_reg_ep_cfg_status *ipa_ep_cfg); + +int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name name); +int ipa3_suspend_resource_sync(enum ipa_rm_resource_name name); +int ipa3_resume_resource(enum ipa_rm_resource_name name); +bool ipa3_should_pipe_be_suspended(enum ipa_client_type client); +int ipa3_tag_aggr_force_close(int pipe_num); + +void ipa3_active_clients_unlock(void); +int ipa3_wdi_init(void); +int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id); +int ipa3_tag_process(struct ipa3_desc *desc, int num_descs, + unsigned long timeout); + +void ipa3_q6_pre_shutdown_cleanup(void); +void ipa3_q6_post_shutdown_cleanup(void); +int ipa3_init_q6_smem(void); + +int ipa3_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req); +int ipa3_mhi_query_ch_info(enum ipa_client_type client, + struct gsi_chan_info *ch_info); + +int ipa3_uc_interface_init(void); +int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client); +int ipa3_uc_state_check(void); +int ipa3_uc_loaded_check(void); +void ipa3_uc_load_notify(void); +int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status, + bool polling_mode, unsigned long timeout_jiffies); +void ipa3_uc_register_handlers(enum ipa3_hw_features feature, + struct ipa3_uc_hdlrs *hdlrs); +int ipa3_create_nat_device(void); +int ipa3_uc_notify_clk_state(bool enabled); +void ipa3_dma_async_memcpy_notify_cb(void *priv, + enum ipa_dp_evt_type evt, unsigned long data); + +int ipa3_uc_update_hw_flags(u32 flags); + +int ipa3_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void)); +void ipa3_uc_mhi_cleanup(void); +int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd); +int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr, + u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx, + u32 first_evt_idx); +int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle, + int contexArrayIndex, int channelDirection); +int ipa3_uc_mhi_reset_channel(int channelHandle); +int ipa3_uc_mhi_suspend_channel(int channelHandle); +int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected); +int ipa3_uc_mhi_stop_event_update_channel(int channelHandle); +int ipa3_uc_mhi_print_stats(char *dbg_buff, int size); +int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len); +void ipa3_tag_destroy_imm(void *user1, int user2); +const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info + (enum ipa_client_type client); +void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val); + +/* Hardware stats */ + +#define IPA_STATS_MAX_PIPE_BIT 32 + +struct ipa_teth_stats_endpoints { + u32 prod_mask; + u32 dst_ep_mask[IPA_STATS_MAX_PIPE_BIT]; +}; + +struct ipa_flt_rt_stats { + u32 num_pkts; + u32 num_pkts_hash; +}; + +int ipa_hw_stats_init(void); + +int ipa_debugfs_init_stats(struct dentry *parent); + +int ipa_init_quota_stats(u32 pipe_bitmask); + +int ipa_get_quota_stats(struct ipa_quota_stats_all *out); + +int ipa_reset_quota_stats(enum ipa_client_type client); + +int ipa_reset_all_quota_stats(void); + +int ipa_init_drop_stats(u32 pipe_bitmask); + +int ipa_get_drop_stats(struct ipa_drop_stats_all *out); + +int ipa_reset_drop_stats(enum ipa_client_type client); + +int ipa_reset_all_drop_stats(void); + +int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in); + +int ipa_get_teth_stats(enum ipa_client_type prod, + struct ipa_quota_stats_all *out); + +int ipa_reset_teth_stats(enum ipa_client_type prod, enum ipa_client_type cons); + +int ipa_reset_all_cons_teth_stats(enum ipa_client_type prod); + +int ipa_reset_all_teth_stats(void); + +int ipa_flt_rt_stats_add_rule_id(enum ipa_ip_type ip, bool filtering, + u16 rule_id); + +int ipa_flt_rt_stats_start(enum ipa_ip_type ip, bool filtering); + +int ipa_flt_rt_stats_clear_rule_ids(enum ipa_ip_type ip, bool filtering); + +int ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id, + struct ipa_flt_rt_stats *out); + +int ipa_reset_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id); + +int ipa_reset_all_flt_rt_stats(enum ipa_ip_type ip, bool filtering); + +u32 ipa3_get_num_pipes(void); +struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void); +struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void); +struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void); +struct iommu_domain *ipa3_get_smmu_domain(void); +struct iommu_domain *ipa3_get_uc_smmu_domain(void); +struct iommu_domain *ipa3_get_wlan_smmu_domain(void); +int ipa3_iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); +int ipa3_ap_suspend(struct device *dev); +int ipa3_ap_resume(struct device *dev); +int ipa3_init_interrupts(void); +struct iommu_domain *ipa3_get_smmu_domain(void); +int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); +int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); +int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple); +int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple); +void ipa3_set_resorce_groups_min_max_limits(void); +void ipa3_suspend_apps_pipes(bool suspend); +int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, + enum ipa_ip_type ip_type, + bool hashable, + struct ipahal_flt_rule_entry entry[], + int *num_entry); +int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, + enum ipa_ip_type ip_type, + bool hashable, + struct ipahal_rt_rule_entry entry[], + int *num_entry); +int ipa3_restore_suspend_handler(void); +int ipa3_inject_dma_task_for_gsi(void); +int ipa3_uc_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr); +void ipa3_inc_acquire_wakelock(void); +void ipa3_dec_release_wakelock(void); +int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base); +int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data); +const char *ipa_hw_error_str(enum ipa3_hw_errors err_type); +int ipa_gsi_ch20_wa(void); +int ipa3_rx_poll(u32 clnt_hdl, int budget); +void ipa3_recycle_wan_skb(struct sk_buff *skb); +int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map); +int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr, + u32 size, bool map); +void ipa3_reset_freeze_vote(void); +int ipa3_ntn_init(void); +int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats); +struct dentry *ipa_debugfs_get_root(void); +bool ipa3_is_msm_device(void); +struct device *ipa3_get_pdev(void); +void ipa3_enable_dcd(void); +void ipa3_disable_prefetch(enum ipa_client_type client); +int ipa3_alloc_common_event_ring(void); +int ipa3_allocate_dma_task_for_gsi(void); +void ipa3_free_dma_task_for_gsi(void); +#endif /* _IPA3_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c new file mode 100644 index 000000000000..b3a61c9fc985 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c @@ -0,0 +1,569 @@ +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include "ipa_i.h" + +#define INTERRUPT_WORKQUEUE_NAME "ipa_interrupt_wq" +#define DIS_SUSPEND_INTERRUPT_TIMEOUT 5 +#define IPA_IRQ_NUM_MAX 32 + +struct ipa3_interrupt_info { + ipa_irq_handler_t handler; + enum ipa_irq_type interrupt; + void *private_data; + bool deferred_flag; +}; + +struct ipa3_interrupt_work_wrap { + struct work_struct interrupt_work; + ipa_irq_handler_t handler; + enum ipa_irq_type interrupt; + void *private_data; + void *interrupt_data; +}; + +static struct ipa3_interrupt_info ipa_interrupt_to_cb[IPA_IRQ_NUM_MAX]; +static struct workqueue_struct *ipa_interrupt_wq; +static u32 ipa_ee; + +static void ipa3_tx_suspend_interrupt_wa(void); +static void ipa3_enable_tx_suspend_wa(struct work_struct *work); +static DECLARE_DELAYED_WORK(dwork_en_suspend_int, + ipa3_enable_tx_suspend_wa); +static spinlock_t suspend_wa_lock; +static void ipa3_process_interrupts(bool isr_context); + +static int ipa3_irq_mapping[IPA_IRQ_MAX] = { + [IPA_UC_TX_CMD_Q_NOT_FULL_IRQ] = -1, + [IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ] = -1, + [IPA_BAD_SNOC_ACCESS_IRQ] = 0, + [IPA_EOT_COAL_IRQ] = -1, + [IPA_UC_IRQ_0] = 2, + [IPA_UC_IRQ_1] = 3, + [IPA_UC_IRQ_2] = 4, + [IPA_UC_IRQ_3] = 5, + [IPA_UC_IN_Q_NOT_EMPTY_IRQ] = 6, + [IPA_UC_RX_CMD_Q_NOT_FULL_IRQ] = 7, + [IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ] = 8, + [IPA_RX_ERR_IRQ] = 9, + [IPA_DEAGGR_ERR_IRQ] = 10, + [IPA_TX_ERR_IRQ] = 11, + [IPA_STEP_MODE_IRQ] = 12, + [IPA_PROC_ERR_IRQ] = 13, + [IPA_TX_SUSPEND_IRQ] = 14, + [IPA_TX_HOLB_DROP_IRQ] = 15, + [IPA_GSI_IDLE_IRQ] = 16, +}; + +static void ipa3_interrupt_defer(struct work_struct *work); +static DECLARE_WORK(ipa3_interrupt_defer_work, ipa3_interrupt_defer); + +static void ipa3_deferred_interrupt_work(struct work_struct *work) +{ + struct ipa3_interrupt_work_wrap *work_data = + container_of(work, + struct ipa3_interrupt_work_wrap, + interrupt_work); + IPADBG("call handler from workq...\n"); + work_data->handler(work_data->interrupt, work_data->private_data, + work_data->interrupt_data); + kfree(work_data->interrupt_data); + kfree(work_data); +} + +static bool ipa3_is_valid_ep(u32 ep_suspend_data) +{ + u32 bmsk = 1; + u32 i = 0; + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + if ((ep_suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) + return true; + bmsk = bmsk << 1; + } + return false; +} + +static int ipa3_handle_interrupt(int irq_num, bool isr_context) +{ + struct ipa3_interrupt_info interrupt_info; + struct ipa3_interrupt_work_wrap *work_data; + u32 suspend_data; + void *interrupt_data = NULL; + struct ipa_tx_suspend_irq_data *suspend_interrupt_data = NULL; + int res; + + interrupt_info = ipa_interrupt_to_cb[irq_num]; + if (interrupt_info.handler == NULL) { + IPAERR("A callback function wasn't set for interrupt num %d\n", + irq_num); + return -EINVAL; + } + + switch (interrupt_info.interrupt) { + case IPA_TX_SUSPEND_IRQ: + IPADBG_LOW("processing TX_SUSPEND interrupt work-around\n"); + ipa3_tx_suspend_interrupt_wa(); + suspend_data = ipahal_read_reg_n(IPA_IRQ_SUSPEND_INFO_EE_n, + ipa_ee); + IPADBG_LOW("get interrupt %d\n", suspend_data); + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) { + /* Clearing L2 interrupts status */ + ipahal_write_reg_n(IPA_SUSPEND_IRQ_CLR_EE_n, + ipa_ee, suspend_data); + } + if (!ipa3_is_valid_ep(suspend_data)) + return 0; + + suspend_interrupt_data = + kzalloc(sizeof(*suspend_interrupt_data), GFP_ATOMIC); + if (!suspend_interrupt_data) { + IPAERR("failed allocating suspend_interrupt_data\n"); + return -ENOMEM; + } + suspend_interrupt_data->endpoints = suspend_data; + interrupt_data = suspend_interrupt_data; + break; + case IPA_UC_IRQ_0: + if (ipa3_ctx->apply_rg10_wa) { + /* + * Early detect of uC crash. If RG10 workaround is + * enable uC crash will not be detected as before + * processing uC event the interrupt is cleared using + * uC register write which times out as it crashed + * already. + */ + if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_ERROR) + ipa3_ctx->uc_ctx.uc_failed = true; + } + break; + default: + break; + } + + /* Force defer processing if in ISR context. */ + if (interrupt_info.deferred_flag || isr_context) { + work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap), + GFP_ATOMIC); + if (!work_data) { + IPAERR("failed allocating ipa3_interrupt_work_wrap\n"); + res = -ENOMEM; + goto fail_alloc_work; + } + INIT_WORK(&work_data->interrupt_work, + ipa3_deferred_interrupt_work); + work_data->handler = interrupt_info.handler; + work_data->interrupt = interrupt_info.interrupt; + work_data->private_data = interrupt_info.private_data; + work_data->interrupt_data = interrupt_data; + queue_work(ipa_interrupt_wq, &work_data->interrupt_work); + + } else { + interrupt_info.handler(interrupt_info.interrupt, + interrupt_info.private_data, + interrupt_data); + kfree(interrupt_data); + } + + return 0; + +fail_alloc_work: + kfree(interrupt_data); + return res; +} + +static void ipa3_enable_tx_suspend_wa(struct work_struct *work) +{ + u32 en; + u32 suspend_bmask; + int irq_num; + + IPADBG_LOW("Enter\n"); + + irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ]; + + if (irq_num == -1) { + WARN_ON(1); + return; + } + + /* make sure ipa hw is clocked on*/ + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee); + suspend_bmask = 1 << irq_num; + /*enable TX_SUSPEND_IRQ*/ + en |= suspend_bmask; + IPADBG("enable TX_SUSPEND_IRQ, IPA_IRQ_EN_EE reg, write val = %u\n" + , en); + ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, en); + ipa3_process_interrupts(false); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + IPADBG_LOW("Exit\n"); +} + +static void ipa3_tx_suspend_interrupt_wa(void) +{ + u32 val; + u32 suspend_bmask; + int irq_num; + + IPADBG_LOW("Enter\n"); + irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ]; + + if (irq_num == -1) { + WARN_ON(1); + return; + } + + /*disable TX_SUSPEND_IRQ*/ + val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee); + suspend_bmask = 1 << irq_num; + val &= ~suspend_bmask; + IPADBG("Disabling TX_SUSPEND_IRQ, write val: %u to IPA_IRQ_EN_EE reg\n", + val); + ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val); + + IPADBG_LOW(" processing suspend interrupt work-around, delayed work\n"); + queue_delayed_work(ipa_interrupt_wq, &dwork_en_suspend_int, + msecs_to_jiffies(DIS_SUSPEND_INTERRUPT_TIMEOUT)); + + IPADBG_LOW("Exit\n"); +} + +static inline bool is_uc_irq(int irq_num) +{ + if (ipa_interrupt_to_cb[irq_num].interrupt >= IPA_UC_IRQ_0 && + ipa_interrupt_to_cb[irq_num].interrupt <= IPA_UC_IRQ_3) + return true; + else + return false; +} + +static void ipa3_process_interrupts(bool isr_context) +{ + u32 reg; + u32 bmsk; + u32 i = 0; + u32 en; + unsigned long flags; + bool uc_irq; + + IPADBG_LOW("Enter\n"); + + spin_lock_irqsave(&suspend_wa_lock, flags); + en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee); + reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee); + while (en & reg) { + bmsk = 1; + for (i = 0; i < IPA_IRQ_NUM_MAX; i++) { + if (en & reg & bmsk) { + uc_irq = is_uc_irq(i); + + /* + * Clear uC interrupt before processing to avoid + * clearing unhandled interrupts + */ + if (uc_irq) + ipa3_uc_rg10_write_reg(IPA_IRQ_CLR_EE_n, + ipa_ee, bmsk); + + /* + * handle the interrupt with spin_lock + * unlocked to avoid calling client in atomic + * context. mutual exclusion still preserved + * as the read/clr is done with spin_lock + * locked. + */ + spin_unlock_irqrestore(&suspend_wa_lock, flags); + ipa3_handle_interrupt(i, isr_context); + spin_lock_irqsave(&suspend_wa_lock, flags); + + /* + * Clear non uC interrupt after processing + * to avoid clearing interrupt data + */ + if (!uc_irq) + ipa3_uc_rg10_write_reg(IPA_IRQ_CLR_EE_n, + ipa_ee, bmsk); + } + bmsk = bmsk << 1; + } + /* + * In case uC failed interrupt cannot be cleared. + * Device will crash as part of handling uC event handler. + */ + if (ipa3_ctx->apply_rg10_wa && ipa3_ctx->uc_ctx.uc_failed) + break; + + reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee); + /* since the suspend interrupt HW bug we must + * read again the EN register, otherwise the while is endless + */ + en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee); + } + + spin_unlock_irqrestore(&suspend_wa_lock, flags); + IPADBG_LOW("Exit\n"); +} + +static void ipa3_interrupt_defer(struct work_struct *work) +{ + IPADBG("processing interrupts in wq\n"); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipa3_process_interrupts(false); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPADBG("Done\n"); +} + +static irqreturn_t ipa3_isr(int irq, void *ctxt) +{ + struct ipa_active_client_logging_info log_info; + + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); + IPADBG_LOW("Enter\n"); + /* defer interrupt handling in case IPA is not clocked on */ + if (ipa3_inc_client_enable_clks_no_block(&log_info)) { + IPADBG("defer interrupt processing\n"); + queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work); + return IRQ_HANDLED; + } + + ipa3_process_interrupts(true); + IPADBG_LOW("Exit\n"); + + ipa3_dec_client_disable_clks(&log_info); + return IRQ_HANDLED; +} +/** + * ipa3_add_interrupt_handler() - Adds handler to an interrupt type + * @interrupt: Interrupt type + * @handler: The handler to be added + * @deferred_flag: whether the handler processing should be deferred in + * a workqueue + * @private_data: the client's private data + * + * Adds handler to an interrupt type and enable the specific bit + * in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled + */ +int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data) +{ + u32 val; + u32 bmsk; + int irq_num; + int client_idx, ep_idx; + + IPADBG("interrupt_enum(%d)\n", interrupt); + if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ || + interrupt >= IPA_IRQ_MAX) { + IPAERR("invalid interrupt number %d\n", interrupt); + return -EINVAL; + } + + irq_num = ipa3_irq_mapping[interrupt]; + if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) { + IPAERR("interrupt %d not supported\n", interrupt); + WARN_ON(1); + return -EFAULT; + } + IPADBG("ipa_interrupt_to_cb irq_num(%d)\n", irq_num); + + ipa_interrupt_to_cb[irq_num].deferred_flag = deferred_flag; + ipa_interrupt_to_cb[irq_num].handler = handler; + ipa_interrupt_to_cb[irq_num].private_data = private_data; + ipa_interrupt_to_cb[irq_num].interrupt = interrupt; + + val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee); + IPADBG("read IPA_IRQ_EN_EE_n register. reg = %d\n", val); + bmsk = 1 << irq_num; + val |= bmsk; + ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val); + IPADBG("wrote IPA_IRQ_EN_EE_n register. reg = %d\n", val); + + /* register SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt*/ + if ((interrupt == IPA_TX_SUSPEND_IRQ) && + (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) { + val = ~0; + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) + if (IPA_CLIENT_IS_Q6_CONS(client_idx) || + IPA_CLIENT_IS_Q6_PROD(client_idx)) { + ep_idx = ipa3_get_ep_mapping(client_idx); + IPADBG("modem ep_idx(%d) client_idx = %d\n", + ep_idx, client_idx); + if (ep_idx == -1) + IPADBG("Invalid IPA client\n"); + else + val &= ~(1 << ep_idx); + } + + ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, val); + IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", val); + } + return 0; +} + +/** + * ipa3_remove_interrupt_handler() - Removes handler to an interrupt type + * @interrupt: Interrupt type + * + * Removes the handler and disable the specific bit in IRQ_EN register + */ +int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt) +{ + u32 val; + u32 bmsk; + int irq_num; + + if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ || + interrupt >= IPA_IRQ_MAX) { + IPAERR("invalid interrupt number %d\n", interrupt); + return -EINVAL; + } + + irq_num = ipa3_irq_mapping[interrupt]; + if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) { + IPAERR("interrupt %d not supported\n", interrupt); + WARN_ON(1); + return -EFAULT; + } + + kfree(ipa_interrupt_to_cb[irq_num].private_data); + ipa_interrupt_to_cb[irq_num].deferred_flag = false; + ipa_interrupt_to_cb[irq_num].handler = NULL; + ipa_interrupt_to_cb[irq_num].private_data = NULL; + ipa_interrupt_to_cb[irq_num].interrupt = -1; + + /* clean SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt */ + if ((interrupt == IPA_TX_SUSPEND_IRQ) && + (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) { + ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, 0); + IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", 0); + } + + val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee); + bmsk = 1 << irq_num; + val &= ~bmsk; + ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val); + + return 0; +} + +/** + * ipa3_interrupts_init() - Initialize the IPA interrupts framework + * @ipa_irq: The interrupt number to allocate + * @ee: Execution environment + * @ipa_dev: The basic device structure representing the IPA driver + * + * - Initialize the ipa_interrupt_to_cb array + * - Clear interrupts status + * - Register the ipa interrupt handler - ipa3_isr + * - Enable apps processor wakeup by IPA interrupts + */ +int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev) +{ + int idx; + int res = 0; + + ipa_ee = ee; + for (idx = 0; idx < IPA_IRQ_NUM_MAX; idx++) { + ipa_interrupt_to_cb[idx].deferred_flag = false; + ipa_interrupt_to_cb[idx].handler = NULL; + ipa_interrupt_to_cb[idx].private_data = NULL; + ipa_interrupt_to_cb[idx].interrupt = -1; + } + + ipa_interrupt_wq = create_singlethread_workqueue( + INTERRUPT_WORKQUEUE_NAME); + if (!ipa_interrupt_wq) { + IPAERR("workqueue creation failed\n"); + return -ENOMEM; + } + + res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr, + IRQF_TRIGGER_RISING, "ipa", ipa_dev); + if (res) { + IPAERR("fail to register IPA IRQ handler irq=%d\n", ipa_irq); + return -ENODEV; + } + IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq); + + res = enable_irq_wake(ipa_irq); + if (res) + IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n", + ipa_irq, res); + else + IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq); + + spin_lock_init(&suspend_wa_lock); + return 0; +} + +/** + * ipa3_suspend_active_aggr_wa() - Emulate suspend IRQ + * @clnt_hndl: suspended client handle, IRQ is emulated for this pipe + * + * Emulate suspend IRQ to unsuspend client which was suspended with an open + * aggregation frame in order to bypass HW bug of IRQ not generated when + * endpoint is suspended during an open aggregation. + */ +void ipa3_suspend_active_aggr_wa(u32 clnt_hdl) +{ + struct ipa3_interrupt_info interrupt_info; + struct ipa3_interrupt_work_wrap *work_data; + struct ipa_tx_suspend_irq_data *suspend_interrupt_data; + int irq_num; + int aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE); + + if (aggr_active_bitmap & (1 << clnt_hdl)) { + /* force close aggregation */ + ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl)); + + /* simulate suspend IRQ */ + irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ]; + interrupt_info = ipa_interrupt_to_cb[irq_num]; + if (interrupt_info.handler == NULL) { + IPAERR("no CB function for IPA_TX_SUSPEND_IRQ\n"); + return; + } + suspend_interrupt_data = kzalloc( + sizeof(*suspend_interrupt_data), + GFP_ATOMIC); + if (!suspend_interrupt_data) { + IPAERR("failed allocating suspend_interrupt_data\n"); + return; + } + suspend_interrupt_data->endpoints = 1 << clnt_hdl; + + work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap), + GFP_ATOMIC); + if (!work_data) { + IPAERR("failed allocating ipa3_interrupt_work_wrap\n"); + goto fail_alloc_work; + } + INIT_WORK(&work_data->interrupt_work, + ipa3_deferred_interrupt_work); + work_data->handler = interrupt_info.handler; + work_data->interrupt = IPA_TX_SUSPEND_IRQ; + work_data->private_data = interrupt_info.private_data; + work_data->interrupt_data = (void *)suspend_interrupt_data; + queue_work(ipa_interrupt_wq, &work_data->interrupt_work); + return; +fail_alloc_work: + kfree(suspend_interrupt_data); + } +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c new file mode 100644 index 000000000000..4e9299db3530 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c @@ -0,0 +1,649 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include "ipa_i.h" + +struct ipa3_intf { + char name[IPA_RESOURCE_NAME_MAX]; + struct list_head link; + u32 num_tx_props; + u32 num_rx_props; + u32 num_ext_props; + struct ipa_ioc_tx_intf_prop *tx; + struct ipa_ioc_rx_intf_prop *rx; + struct ipa_ioc_ext_intf_prop *ext; + enum ipa_client_type excp_pipe; +}; + +struct ipa3_push_msg { + struct ipa_msg_meta meta; + ipa_msg_free_fn callback; + void *buff; + struct list_head link; +}; + +struct ipa3_pull_msg { + struct ipa_msg_meta meta; + ipa_msg_pull_fn callback; + struct list_head link; +}; + +/** + * ipa3_register_intf() - register "logical" interface + * @name: [in] interface name + * @tx: [in] TX properties of the interface + * @rx: [in] RX properties of the interface + * + * Register an interface and its tx and rx properties, this allows + * configuration of rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx) +{ + return ipa3_register_intf_ext(name, tx, rx, NULL); +} + +/** + * ipa3_register_intf_ext() - register "logical" interface which has only + * extended properties + * @name: [in] interface name + * @tx: [in] TX properties of the interface + * @rx: [in] RX properties of the interface + * @ext: [in] EXT properties of the interface + * + * Register an interface and its tx, rx and ext properties, this allows + * configuration of rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext) +{ + struct ipa3_intf *intf; + u32 len; + + if (name == NULL || (tx == NULL && rx == NULL && ext == NULL)) { + IPAERR("invalid params name=%pK tx=%pK rx=%pK ext=%pK\n", name, + tx, rx, ext); + return -EINVAL; + } + + if (tx && tx->num_props > IPA_NUM_PROPS_MAX) { + IPAERR("invalid tx num_props=%d max=%d\n", tx->num_props, + IPA_NUM_PROPS_MAX); + return -EINVAL; + } + + if (rx && rx->num_props > IPA_NUM_PROPS_MAX) { + IPAERR("invalid rx num_props=%d max=%d\n", rx->num_props, + IPA_NUM_PROPS_MAX); + return -EINVAL; + } + + if (ext && ext->num_props > IPA_NUM_PROPS_MAX) { + IPAERR("invalid ext num_props=%d max=%d\n", ext->num_props, + IPA_NUM_PROPS_MAX); + return -EINVAL; + } + + len = sizeof(struct ipa3_intf); + intf = kzalloc(len, GFP_KERNEL); + if (intf == NULL) + return -ENOMEM; + + strlcpy(intf->name, name, IPA_RESOURCE_NAME_MAX); + + if (tx) { + intf->num_tx_props = tx->num_props; + len = tx->num_props * sizeof(struct ipa_ioc_tx_intf_prop); + intf->tx = kzalloc(len, GFP_KERNEL); + if (intf->tx == NULL) { + kfree(intf); + return -ENOMEM; + } + memcpy(intf->tx, tx->prop, len); + } + + if (rx) { + intf->num_rx_props = rx->num_props; + len = rx->num_props * sizeof(struct ipa_ioc_rx_intf_prop); + intf->rx = kzalloc(len, GFP_KERNEL); + if (intf->rx == NULL) { + kfree(intf->tx); + kfree(intf); + return -ENOMEM; + } + memcpy(intf->rx, rx->prop, len); + } + + if (ext) { + intf->num_ext_props = ext->num_props; + len = ext->num_props * sizeof(struct ipa_ioc_ext_intf_prop); + intf->ext = kzalloc(len, GFP_KERNEL); + if (intf->ext == NULL) { + kfree(intf->rx); + kfree(intf->tx); + kfree(intf); + return -ENOMEM; + } + memcpy(intf->ext, ext->prop, len); + } + + if (ext && ext->excp_pipe_valid) + intf->excp_pipe = ext->excp_pipe; + else + intf->excp_pipe = IPA_CLIENT_APPS_LAN_CONS; + + mutex_lock(&ipa3_ctx->lock); + list_add_tail(&intf->link, &ipa3_ctx->intf_list); + mutex_unlock(&ipa3_ctx->lock); + + return 0; +} + +/** + * ipa3_deregister_intf() - de-register previously registered logical interface + * @name: [in] interface name + * + * De-register a previously registered interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_deregister_intf(const char *name) +{ + struct ipa3_intf *entry; + struct ipa3_intf *next; + int result = -EINVAL; + + if ((name == NULL) || + (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX)) { + IPAERR("invalid param name=%s\n", name); + return result; + } + + mutex_lock(&ipa3_ctx->lock); + list_for_each_entry_safe(entry, next, &ipa3_ctx->intf_list, link) { + if (!strcmp(entry->name, name)) { + list_del(&entry->link); + kfree(entry->ext); + kfree(entry->rx); + kfree(entry->tx); + kfree(entry); + result = 0; + break; + } + } + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_query_intf() - query logical interface properties + * @lookup: [inout] interface name and number of properties + * + * Obtain the handle and number of tx and rx properties for the named + * interface, used as part of querying the tx and rx properties for + * configuration of various rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_query_intf(struct ipa_ioc_query_intf *lookup) +{ + struct ipa3_intf *entry; + int result = -EINVAL; + + if (lookup == NULL) { + IPAERR("invalid param lookup=%pK\n", lookup); + return result; + } + + if (strnlen(lookup->name, IPA_RESOURCE_NAME_MAX) == + IPA_RESOURCE_NAME_MAX) { + IPAERR_RL("Interface name too long. (%s)\n", lookup->name); + return result; + } + + mutex_lock(&ipa3_ctx->lock); + list_for_each_entry(entry, &ipa3_ctx->intf_list, link) { + if (!strcmp(entry->name, lookup->name)) { + lookup->num_tx_props = entry->num_tx_props; + lookup->num_rx_props = entry->num_rx_props; + lookup->num_ext_props = entry->num_ext_props; + lookup->excp_pipe = entry->excp_pipe; + result = 0; + break; + } + } + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_query_intf_tx_props() - qeury TX props of an interface + * @tx: [inout] interface tx attributes + * + * Obtain the tx properties for the specified interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx) +{ + struct ipa3_intf *entry; + int result = -EINVAL; + + if (tx == NULL) { + IPAERR("null args: tx\n"); + return result; + } + + if (strnlen(tx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) { + IPAERR_RL("Interface name too long. (%s)\n", tx->name); + return result; + } + + mutex_lock(&ipa3_ctx->lock); + list_for_each_entry(entry, &ipa3_ctx->intf_list, link) { + if (!strcmp(entry->name, tx->name)) { + /* add the entry check */ + if (entry->num_tx_props != tx->num_tx_props) { + IPAERR("invalid entry number(%u %u)\n", + entry->num_tx_props, + tx->num_tx_props); + mutex_unlock(&ipa3_ctx->lock); + return result; + } + memcpy(tx->tx, entry->tx, entry->num_tx_props * + sizeof(struct ipa_ioc_tx_intf_prop)); + result = 0; + break; + } + } + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_query_intf_rx_props() - qeury RX props of an interface + * @rx: [inout] interface rx attributes + * + * Obtain the rx properties for the specified interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx) +{ + struct ipa3_intf *entry; + int result = -EINVAL; + + if (rx == NULL) { + IPAERR("null args: rx\n"); + return result; + } + + if (strnlen(rx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) { + IPAERR_RL("Interface name too long. (%s)\n", rx->name); + return result; + } + + mutex_lock(&ipa3_ctx->lock); + list_for_each_entry(entry, &ipa3_ctx->intf_list, link) { + if (!strcmp(entry->name, rx->name)) { + /* add the entry check */ + if (entry->num_rx_props != rx->num_rx_props) { + IPAERR("invalid entry number(%u %u)\n", + entry->num_rx_props, + rx->num_rx_props); + mutex_unlock(&ipa3_ctx->lock); + return result; + } + memcpy(rx->rx, entry->rx, entry->num_rx_props * + sizeof(struct ipa_ioc_rx_intf_prop)); + result = 0; + break; + } + } + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_query_intf_ext_props() - qeury EXT props of an interface + * @ext: [inout] interface ext attributes + * + * Obtain the ext properties for the specified interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext) +{ + struct ipa3_intf *entry; + int result = -EINVAL; + + if (ext == NULL) { + IPAERR("invalid param ext=%pK\n", ext); + return result; + } + + mutex_lock(&ipa3_ctx->lock); + list_for_each_entry(entry, &ipa3_ctx->intf_list, link) { + if (!strcmp(entry->name, ext->name)) { + /* add the entry check */ + if (entry->num_ext_props != ext->num_ext_props) { + IPAERR("invalid entry number(%u %u)\n", + entry->num_ext_props, + ext->num_ext_props); + mutex_unlock(&ipa3_ctx->lock); + return result; + } + memcpy(ext->ext, entry->ext, entry->num_ext_props * + sizeof(struct ipa_ioc_ext_intf_prop)); + result = 0; + break; + } + } + mutex_unlock(&ipa3_ctx->lock); + return result; +} + +static void ipa3_send_msg_free(void *buff, u32 len, u32 type) +{ + kfree(buff); +} + +/** + * ipa3_send_msg() - Send "message" from kernel client to IPA driver + * @meta: [in] message meta-data + * @buff: [in] the payload for message + * @callback: [in] free callback + * + * Client supplies the message meta-data and payload which IPA driver buffers + * till read by user-space. After read from user space IPA driver invokes the + * callback supplied to free the message payload. Client must not touch/free + * the message payload after calling this API. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback) +{ + struct ipa3_push_msg *msg; + void *data = NULL; + + if (meta == NULL || (buff == NULL && callback != NULL) || + (buff != NULL && callback == NULL)) { + IPAERR_RL("invalid param meta=%pK buff=%pK, callback=%pK\n", + meta, buff, callback); + return -EINVAL; + } + + if (meta->msg_type >= IPA_EVENT_MAX_NUM) { + IPAERR_RL("unsupported message type %d\n", meta->msg_type); + return -EINVAL; + } + + msg = kzalloc(sizeof(struct ipa3_push_msg), GFP_KERNEL); + if (msg == NULL) + return -ENOMEM; + + msg->meta = *meta; + if (meta->msg_len > 0 && buff) { + data = kmalloc(meta->msg_len, GFP_KERNEL); + if (data == NULL) { + kfree(msg); + return -ENOMEM; + } + memcpy(data, buff, meta->msg_len); + msg->buff = data; + msg->callback = ipa3_send_msg_free; + } + + mutex_lock(&ipa3_ctx->msg_lock); + list_add_tail(&msg->link, &ipa3_ctx->msg_list); + mutex_unlock(&ipa3_ctx->msg_lock); + IPA_STATS_INC_CNT(ipa3_ctx->stats.msg_w[meta->msg_type]); + + wake_up(&ipa3_ctx->msg_waitq); + if (buff) + callback(buff, meta->msg_len, meta->msg_type); + + return 0; +} + +/** + * ipa3_register_pull_msg() - register pull message type + * @meta: [in] message meta-data + * @callback: [in] pull callback + * + * Register message callback by kernel client with IPA driver for IPA driver to + * pull message on-demand. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback) +{ + struct ipa3_pull_msg *msg; + + if (meta == NULL || callback == NULL) { + IPAERR("invalid param meta=%pK callback=%pK\n", meta, callback); + return -EINVAL; + } + + msg = kzalloc(sizeof(struct ipa3_pull_msg), GFP_KERNEL); + if (msg == NULL) + return -ENOMEM; + + msg->meta = *meta; + msg->callback = callback; + + mutex_lock(&ipa3_ctx->msg_lock); + list_add_tail(&msg->link, &ipa3_ctx->pull_msg_list); + mutex_unlock(&ipa3_ctx->msg_lock); + + return 0; +} + +/** + * ipa3_deregister_pull_msg() - De-register pull message type + * @meta: [in] message meta-data + * + * De-register "message" by kernel client from IPA driver + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta) +{ + struct ipa3_pull_msg *entry; + struct ipa3_pull_msg *next; + int result = -EINVAL; + + if (meta == NULL) { + IPAERR("null arg: meta\n"); + return result; + } + + mutex_lock(&ipa3_ctx->msg_lock); + list_for_each_entry_safe(entry, next, &ipa3_ctx->pull_msg_list, link) { + if (entry->meta.msg_len == meta->msg_len && + entry->meta.msg_type == meta->msg_type) { + list_del(&entry->link); + kfree(entry); + result = 0; + break; + } + } + mutex_unlock(&ipa3_ctx->msg_lock); + return result; +} + +/** + * ipa3_read() - read message from IPA device + * @filp: [in] file pointer + * @buf: [out] buffer to read into + * @count: [in] size of above buffer + * @f_pos: [inout] file position + * + * Uer-space should continually read from /dev/ipa, read wll block when there + * are no messages to read. Upon return, user-space should read the ipa_msg_meta + * from the start of the buffer to know what type of message was read and its + * length in the remainder of the buffer. Buffer supplied must be big enough to + * hold the message meta-data and the largest defined message type + * + * Returns: how many bytes copied to buffer + * + * Note: Should not be called from atomic context + */ +ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos) +{ + char __user *start; + struct ipa3_push_msg *msg = NULL; + int ret; + DEFINE_WAIT_FUNC(wait, woken_wake_function); + int locked; + + start = buf; + + add_wait_queue(&ipa3_ctx->msg_waitq, &wait); + while (1) { + mutex_lock(&ipa3_ctx->msg_lock); + locked = 1; + + if (!list_empty(&ipa3_ctx->msg_list)) { + msg = list_first_entry(&ipa3_ctx->msg_list, + struct ipa3_push_msg, link); + list_del(&msg->link); + } + + IPADBG_LOW("msg=%pK\n", msg); + + if (msg) { + locked = 0; + mutex_unlock(&ipa3_ctx->msg_lock); + if (copy_to_user(buf, &msg->meta, + sizeof(struct ipa_msg_meta))) { + ret = -EFAULT; + kfree(msg); + msg = NULL; + break; + } + buf += sizeof(struct ipa_msg_meta); + count -= sizeof(struct ipa_msg_meta); + if (msg->buff) { + if (copy_to_user(buf, msg->buff, + msg->meta.msg_len)) { + ret = -EFAULT; + kfree(msg); + msg = NULL; + break; + } + buf += msg->meta.msg_len; + count -= msg->meta.msg_len; + msg->callback(msg->buff, msg->meta.msg_len, + msg->meta.msg_type); + } + IPA_STATS_INC_CNT( + ipa3_ctx->stats.msg_r[msg->meta.msg_type]); + kfree(msg); + } + + ret = -EAGAIN; + if (filp->f_flags & O_NONBLOCK) + break; + + ret = -EINTR; + if (signal_pending(current)) + break; + + if (start != buf) + break; + + locked = 0; + mutex_unlock(&ipa3_ctx->msg_lock); + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } + + remove_wait_queue(&ipa3_ctx->msg_waitq, &wait); + if (start != buf && ret != -EFAULT) + ret = buf - start; + + if (locked) + mutex_unlock(&ipa3_ctx->msg_lock); + + return ret; +} + +/** + * ipa3_pull_msg() - pull the specified message from client + * @meta: [in] message meta-data + * @buf: [out] buffer to read into + * @count: [in] size of above buffer + * + * Populate the supplied buffer with the pull message which is fetched + * from client, the message must have previously been registered with + * the IPA driver + * + * Returns: how many bytes copied to buffer + * + * Note: Should not be called from atomic context + */ +int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count) +{ + struct ipa3_pull_msg *entry; + int result = -EINVAL; + + if (meta == NULL || buff == NULL || !count) { + IPAERR_RL("invalid param name=%pK buff=%pK count=%zu\n", + meta, buff, count); + return result; + } + + mutex_lock(&ipa3_ctx->msg_lock); + list_for_each_entry(entry, &ipa3_ctx->pull_msg_list, link) { + if (entry->meta.msg_len == meta->msg_len && + entry->meta.msg_type == meta->msg_type) { + result = entry->callback(buff, count, meta->msg_type); + break; + } + } + mutex_unlock(&ipa3_ctx->msg_lock); + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c new file mode 100644 index 000000000000..b19ef8b35817 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c @@ -0,0 +1,657 @@ +/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ipa_common_i.h" +#include "ipa_i.h" +#include "ipa_qmi_service.h" + +#define IPA_MHI_DRV_NAME "ipa_mhi" + + +#define IPA_MHI_DBG(fmt, args...) \ + do { \ + pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_MHI_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + + +#define IPA_MHI_ERR(fmt, args...) \ + do { \ + pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + + +#define IPA_MHI_FUNC_ENTRY() \ + IPA_MHI_DBG_LOW("ENTRY\n") +#define IPA_MHI_FUNC_EXIT() \ + IPA_MHI_DBG_LOW("EXIT\n") + +#define IPA_MHI_MAX_UL_CHANNELS 1 +#define IPA_MHI_MAX_DL_CHANNELS 1 + +/* bit #40 in address should be asserted for MHI transfers over pcie */ +#define IPA_MHI_HOST_ADDR_COND(addr) \ + ((params->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr)) + +enum ipa3_mhi_polling_mode { + IPA_MHI_POLLING_MODE_DB_MODE, + IPA_MHI_POLLING_MODE_POLL_MODE, +}; + +bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client) +{ + int res; + int ipa_ep_idx; + struct ipa3_ep_context *ep; + + IPA_MHI_FUNC_ENTRY(); + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPA_MHI_ERR("Invalid client.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + IPA_MHI_DBG_LOW("Stopping GSI channel %ld\n", ep->gsi_chan_hdl); + res = gsi_stop_channel(ep->gsi_chan_hdl); + if (res != 0 && + res != -GSI_STATUS_AGAIN && + res != -GSI_STATUS_TIMED_OUT) { + IPA_MHI_ERR("GSI stop channel failed %d\n", + res); + WARN_ON(1); + return false; + } + + if (res == 0) { + IPA_MHI_DBG_LOW("GSI channel %ld STOP\n", + ep->gsi_chan_hdl); + return true; + } + + return false; +} + +static int ipa3_mhi_reset_gsi_channel(enum ipa_client_type client) +{ + int res; + int clnt_hdl; + + IPA_MHI_FUNC_ENTRY(); + + clnt_hdl = ipa3_get_ep_mapping(client); + if (clnt_hdl < 0) + return -EFAULT; + + res = ipa3_reset_gsi_channel(clnt_hdl); + if (res) { + IPA_MHI_ERR("ipa3_reset_gsi_channel failed %d\n", res); + return -EFAULT; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +int ipa3_mhi_reset_channel_internal(enum ipa_client_type client) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + + res = ipa3_mhi_reset_gsi_channel(client); + if (res) { + IPAERR("ipa3_mhi_reset_gsi_channel failed\n"); + ipa_assert(); + return res; + } + + res = ipa3_disable_data_path(ipa3_get_ep_mapping(client)); + if (res) { + IPA_MHI_ERR("ipa3_disable_data_path failed %d\n", res); + return res; + } + IPA_MHI_FUNC_EXIT(); + + return 0; +} + +int ipa3_mhi_start_channel_internal(enum ipa_client_type client) +{ + int res; + int ipa_ep_idx; + + IPA_MHI_FUNC_ENTRY(); + + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx < 0) { + IPA_MHI_ERR("Invalid client %d\n", client); + return -EINVAL; + } + res = ipa3_enable_data_path(ipa_ep_idx); + if (res) { + IPA_MHI_ERR("ipa3_enable_data_path failed %d\n", res); + return res; + } + IPA_MHI_FUNC_EXIT(); + + return 0; +} + +static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client, + struct ipa_mhi_ch_ctx *ch_ctx_host, int ring_size) +{ + switch (ch_ctx_host->pollcfg) { + case 0: + /*set default polling configuration according to MHI spec*/ + if (IPA_CLIENT_IS_PROD(client)) + return 7; + else + return (ring_size/2)/8; + break; + default: + return ch_ctx_host->pollcfg; + } +} + +static int ipa_mhi_start_gsi_channel(enum ipa_client_type client, + int ipa_ep_idx, struct start_gsi_channel *params) +{ + int res = 0; + struct gsi_evt_ring_props ev_props; + struct ipa_mhi_msi_info *msi; + struct gsi_chan_props ch_props; + union __packed gsi_channel_scratch ch_scratch; + struct ipa3_ep_context *ep; + const struct ipa_gsi_ep_config *ep_cfg; + + IPA_MHI_FUNC_ENTRY(); + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + msi = params->msi; + ep_cfg = ipa3_get_gsi_ep_info(client); + if (!ep_cfg) { + IPA_MHI_ERR("Wrong parameter, ep_cfg is NULL\n"); + return -EPERM; + } + + /* allocate event ring only for the first time pipe is connected */ + if (params->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) { + memset(&ev_props, 0, sizeof(ev_props)); + ev_props.intf = GSI_EVT_CHTYPE_MHI_EV; + ev_props.intr = GSI_INTR_MSI; + ev_props.re_size = GSI_EVT_RING_RE_SIZE_16B; + ev_props.ring_len = params->ev_ctx_host->rlen; + ev_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND( + params->ev_ctx_host->rbase); + ev_props.int_modt = params->ev_ctx_host->intmodt * + IPA_SLEEP_CLK_RATE_KHZ; + ev_props.int_modc = params->ev_ctx_host->intmodc; + ev_props.intvec = ((msi->data & ~msi->mask) | + (params->ev_ctx_host->msivec & msi->mask)); + ev_props.msi_addr = IPA_MHI_HOST_ADDR_COND( + (((u64)msi->addr_hi << 32) | msi->addr_low)); + ev_props.rp_update_addr = IPA_MHI_HOST_ADDR_COND( + params->event_context_addr + + offsetof(struct ipa_mhi_ev_ctx, rp)); + ev_props.exclusive = true; + ev_props.err_cb = params->ev_err_cb; + ev_props.user_data = params->channel; + ev_props.evchid_valid = true; + ev_props.evchid = params->evchid; + IPA_MHI_DBG("allocating event ring ep:%u evchid:%u\n", + ipa_ep_idx, ev_props.evchid); + res = gsi_alloc_evt_ring(&ev_props, ipa3_ctx->gsi_dev_hdl, + &ep->gsi_evt_ring_hdl); + if (res) { + IPA_MHI_ERR("gsi_alloc_evt_ring failed %d\n", res); + goto fail_alloc_evt; + } + IPA_MHI_DBG("client %d, caching event ring hdl %lu\n", + client, + ep->gsi_evt_ring_hdl); + *params->cached_gsi_evt_ring_hdl = + ep->gsi_evt_ring_hdl; + + } else { + IPA_MHI_DBG("event ring already exists: evt_ring_hdl=%lu\n", + *params->cached_gsi_evt_ring_hdl); + ep->gsi_evt_ring_hdl = *params->cached_gsi_evt_ring_hdl; + } + + if (params->ev_ctx_host->wp == params->ev_ctx_host->rbase) { + IPA_MHI_ERR("event ring wp is not updated. base=wp=0x%llx\n", + params->ev_ctx_host->wp); + goto fail_alloc_ch; + } + + IPA_MHI_DBG("Ring event db: evt_ring_hdl=%lu host_wp=0x%llx\n", + ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp); + res = gsi_ring_evt_ring_db(ep->gsi_evt_ring_hdl, + params->ev_ctx_host->wp); + if (res) { + IPA_MHI_ERR("fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n", + res, ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp); + goto fail_alloc_ch; + } + + memset(&ch_props, 0, sizeof(ch_props)); + ch_props.prot = GSI_CHAN_PROT_MHI; + ch_props.dir = IPA_CLIENT_IS_PROD(client) ? + GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI; + ch_props.ch_id = ep_cfg->ipa_gsi_chan_num; + ch_props.evt_ring_hdl = *params->cached_gsi_evt_ring_hdl; + ch_props.re_size = GSI_CHAN_RE_SIZE_16B; + ch_props.ring_len = params->ch_ctx_host->rlen; + ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND( + params->ch_ctx_host->rbase); + ch_props.use_db_eng = GSI_CHAN_DB_MODE; + ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG; + ch_props.low_weight = 1; + ch_props.err_cb = params->ch_err_cb; + ch_props.chan_user_data = params->channel; + res = gsi_alloc_channel(&ch_props, ipa3_ctx->gsi_dev_hdl, + &ep->gsi_chan_hdl); + if (res) { + IPA_MHI_ERR("gsi_alloc_channel failed %d\n", + res); + goto fail_alloc_ch; + } + + memset(&ch_scratch, 0, sizeof(ch_scratch)); + ch_scratch.mhi.mhi_host_wp_addr = IPA_MHI_HOST_ADDR_COND( + params->channel_context_addr + + offsetof(struct ipa_mhi_ch_ctx, wp)); + ch_scratch.mhi.assert_bit40 = params->assert_bit40; + ch_scratch.mhi.max_outstanding_tre = + ep_cfg->ipa_if_tlv * ch_props.re_size; + ch_scratch.mhi.outstanding_threshold = + min(ep_cfg->ipa_if_tlv / 2, 8) * ch_props.re_size; + ch_scratch.mhi.oob_mod_threshold = 4; + if (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT || + params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE) { + ch_scratch.mhi.burst_mode_enabled = true; + ch_scratch.mhi.polling_configuration = + ipa3_mhi_get_ch_poll_cfg(client, params->ch_ctx_host, + (ch_props.ring_len / ch_props.re_size)); + ch_scratch.mhi.polling_mode = IPA_MHI_POLLING_MODE_DB_MODE; + } else { + ch_scratch.mhi.burst_mode_enabled = false; + } + res = gsi_write_channel_scratch(ep->gsi_chan_hdl, + ch_scratch); + if (res) { + IPA_MHI_ERR("gsi_write_channel_scratch failed %d\n", + res); + goto fail_ch_scratch; + } + + *params->mhi = ch_scratch.mhi; + + IPA_MHI_DBG("Starting channel\n"); + res = gsi_start_channel(ep->gsi_chan_hdl); + if (res) { + IPA_MHI_ERR("gsi_start_channel failed %d\n", res); + goto fail_ch_start; + } + + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_ch_start: +fail_ch_scratch: + gsi_dealloc_channel(ep->gsi_chan_hdl); +fail_alloc_ch: + gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); + ep->gsi_evt_ring_hdl = ~0; +fail_alloc_evt: + return res; +} + +int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params) +{ + int res; + struct gsi_device_scratch gsi_scratch; + const struct ipa_gsi_ep_config *gsi_ep_info; + + IPA_MHI_FUNC_ENTRY(); + + if (!params) { + IPA_MHI_ERR("null args\n"); + return -EINVAL; + } + + /* Initialize IPA MHI engine */ + gsi_ep_info = ipa3_get_gsi_ep_info(IPA_CLIENT_MHI_PROD); + if (!gsi_ep_info) { + IPAERR("MHI PROD has no ep allocated\n"); + ipa_assert(); + } + memset(&gsi_scratch, 0, sizeof(gsi_scratch)); + gsi_scratch.mhi_base_chan_idx_valid = true; + gsi_scratch.mhi_base_chan_idx = gsi_ep_info->ipa_gsi_chan_num + + params->gsi.first_ch_idx; + res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl, + &gsi_scratch); + if (res) { + IPA_MHI_ERR("failed to write device scratch %d\n", res); + goto fail_init_engine; + } + + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_init_engine: + return res; +} + +/** + * ipa3_connect_mhi_pipe() - Connect pipe to IPA and start corresponding + * MHI channel + * @in: connect parameters + * @clnt_hdl: [out] client handle for this pipe + * + * This function is called by IPA MHI client driver on MHI channel start. + * This function is called after MHI engine was started. + * + * Return codes: 0 : success + * negative : error + */ +int ipa3_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in, + u32 *clnt_hdl) +{ + struct ipa3_ep_context *ep; + int ipa_ep_idx; + int res; + enum ipa_client_type client; + + IPA_MHI_FUNC_ENTRY(); + + if (!in || !clnt_hdl) { + IPA_MHI_ERR("NULL args\n"); + return -EINVAL; + } + + client = in->sys->client; + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPA_MHI_ERR("Invalid client.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (ep->valid == 1) { + IPA_MHI_ERR("EP already allocated.\n"); + return -EPERM; + } + + memset(ep, 0, offsetof(struct ipa3_ep_context, sys)); + ep->valid = 1; + ep->skip_ep_cfg = in->sys->skip_ep_cfg; + ep->client = client; + ep->client_notify = in->sys->notify; + ep->priv = in->sys->priv; + ep->keep_ipa_awake = in->sys->keep_ipa_awake; + + res = ipa_mhi_start_gsi_channel(client, + ipa_ep_idx, &in->start.gsi); + if (res) { + IPA_MHI_ERR("ipa_mhi_start_gsi_channel failed %d\n", + res); + goto fail_start_channel; + } + + res = ipa3_enable_data_path(ipa_ep_idx); + if (res) { + IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res, + ipa_ep_idx); + goto fail_ep_cfg; + } + + if (!ep->skip_ep_cfg) { + if (ipa3_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto fail_ep_cfg; + } + if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) { + IPAERR("fail to configure status of EP.\n"); + goto fail_ep_cfg; + } + IPA_MHI_DBG("ep configuration successful\n"); + } else { + IPA_MHI_DBG("skipping ep configuration\n"); + } + + *clnt_hdl = ipa_ep_idx; + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(client)) + ipa3_install_dflt_flt_rules(ipa_ep_idx); + + ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + IPA_MHI_DBG("client %d (ep: %d) connected\n", client, + ipa_ep_idx); + + IPA_MHI_FUNC_EXIT(); + + return 0; + +fail_ep_cfg: + ipa3_disable_data_path(ipa_ep_idx); +fail_start_channel: + memset(ep, 0, offsetof(struct ipa3_ep_context, sys)); + return -EPERM; +} + +/** + * ipa3_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding + * MHI channel + * @clnt_hdl: client handle for this pipe + * + * This function is called by IPA MHI client driver on MHI channel reset. + * This function is called after MHI channel was started. + * This function is doing the following: + * - Send command to uC/GSI to reset corresponding MHI channel + * - Configure IPA EP control + * + * Return codes: 0 : success + * negative : error + */ +int ipa3_disconnect_mhi_pipe(u32 clnt_hdl) +{ + struct ipa3_ep_context *ep; + int res; + + IPA_MHI_FUNC_ENTRY(); + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes) { + IPAERR("invalid handle %d\n", clnt_hdl); + return -EINVAL; + } + + if (ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("pipe was not connected %d\n", clnt_hdl); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + res = gsi_dealloc_channel(ep->gsi_chan_hdl); + if (res) { + IPAERR("gsi_dealloc_channel failed %d\n", res); + goto fail_reset_channel; + } + + ep->valid = 0; + ipa3_delete_dflt_flt_rules(clnt_hdl); + + IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl); + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_reset_channel: + return res; +} + +int ipa3_mhi_resume_channels_internal(enum ipa_client_type client, + bool LPTransitionRejected, bool brstmode_enabled, + union __packed gsi_channel_scratch ch_scratch, u8 index) +{ + int res; + int ipa_ep_idx; + struct ipa3_ep_context *ep; + + IPA_MHI_FUNC_ENTRY(); + + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx < 0) { + IPA_MHI_ERR("Invalid client %d\n", client); + return -EINVAL; + } + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (brstmode_enabled && !LPTransitionRejected) { + /* + * set polling mode bit to DB mode before + * resuming the channel + */ + res = gsi_write_channel_scratch( + ep->gsi_chan_hdl, ch_scratch); + if (res) { + IPA_MHI_ERR("write ch scratch fail %d\n" + , res); + return res; + } + } + + res = gsi_start_channel(ep->gsi_chan_hdl); + if (res) { + IPA_MHI_ERR("failed to resume channel error %d\n", res); + return res; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +int ipa3_mhi_query_ch_info(enum ipa_client_type client, + struct gsi_chan_info *ch_info) +{ + int ipa_ep_idx; + int res; + struct ipa3_ep_context *ep; + + IPA_MHI_FUNC_ENTRY(); + + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx < 0) { + IPA_MHI_ERR("Invalid client %d\n", client); + return -EINVAL; + } + ep = &ipa3_ctx->ep[ipa_ep_idx]; + res = gsi_query_channel_info(ep->gsi_chan_hdl, ch_info); + if (res) { + IPA_MHI_ERR("gsi_query_channel_info failed\n"); + return res; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +bool ipa3_has_open_aggr_frame(enum ipa_client_type client) +{ + u32 aggr_state_active; + int ipa_ep_idx; + + aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE); + IPA_MHI_DBG_LOW("IPA_STATE_AGGR_ACTIVE_OFST 0x%x\n", aggr_state_active); + + ipa_ep_idx = ipa_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + ipa_assert(); + return false; + } + + if ((1 << ipa_ep_idx) & aggr_state_active) + return true; + + return false; +} + +int ipa3_mhi_destroy_channel(enum ipa_client_type client) +{ + int res; + int ipa_ep_idx; + struct ipa3_ep_context *ep; + + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx < 0) { + IPA_MHI_ERR("Invalid client %d\n", client); + return -EINVAL; + } + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + IPA_MHI_DBG("reset event ring (hdl: %lu, ep: %d)\n", + ep->gsi_evt_ring_hdl, ipa_ep_idx); + + res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl); + if (res) { + IPAERR(" failed to reset evt ring %lu, err %d\n" + , ep->gsi_evt_ring_hdl, res); + goto fail; + } + + IPA_MHI_DBG("dealloc event ring (hdl: %lu, ep: %d)\n", + ep->gsi_evt_ring_hdl, ipa_ep_idx); + + res = gsi_dealloc_evt_ring( + ep->gsi_evt_ring_hdl); + if (res) { + IPAERR("dealloc evt ring %lu failed, err %d\n" + , ep->gsi_evt_ring_hdl, res); + goto fail; + } + + return 0; +fail: + return res; +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA MHI driver"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c new file mode 100644 index 000000000000..a57aff128872 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c @@ -0,0 +1,1053 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" +#include "ipahal/ipahal.h" + +#define IPA_NAT_PHYS_MEM_OFFSET 0 +#define IPA_NAT_PHYS_MEM_SIZE IPA_RAM_NAT_SIZE + +#define IPA_NAT_TEMP_MEM_SIZE 128 + +enum nat_table_type { + IPA_NAT_BASE_TBL = 0, + IPA_NAT_EXPN_TBL = 1, + IPA_NAT_INDX_TBL = 2, + IPA_NAT_INDEX_EXPN_TBL = 3, +}; + +#define NAT_TABLE_ENTRY_SIZE_BYTE 32 +#define NAT_INTEX_TABLE_ENTRY_SIZE_BYTE 4 + +static int ipa3_nat_vma_fault_remap(struct vm_fault *vmf) +{ + vmf->page = NULL; + + IPADBG("\n"); + return VM_FAULT_SIGBUS; +} + +/* VMA related file operations functions */ +const static struct vm_operations_struct ipa3_nat_remap_vm_ops = { + .fault = ipa3_nat_vma_fault_remap, +}; + +static int ipa3_nat_open(struct inode *inode, struct file *filp) +{ + struct ipa3_nat_mem *nat_ctx; + + IPADBG("\n"); + nat_ctx = container_of(inode->i_cdev, struct ipa3_nat_mem, cdev); + filp->private_data = nat_ctx; + IPADBG("return\n"); + + return 0; +} + +static int ipa3_nat_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long vsize = vma->vm_end - vma->vm_start; + struct ipa3_nat_mem *nat_ctx = + (struct ipa3_nat_mem *)filp->private_data; + unsigned long phys_addr; + int result; + + mutex_lock(&nat_ctx->lock); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (nat_ctx->is_sys_mem) { + IPADBG("Mapping system memory\n"); + if (nat_ctx->is_mapped) { + IPAERR("mapping already exists, only 1 supported\n"); + result = -EINVAL; + goto bail; + } + IPADBG("map sz=0x%zx\n", nat_ctx->size); + result = + dma_mmap_coherent( + ipa3_ctx->pdev, vma, + nat_ctx->vaddr, nat_ctx->dma_handle, + nat_ctx->size); + + if (result) { + IPAERR("unable to map memory. Err:%d\n", result); + goto bail; + } + ipa3_ctx->nat_mem.nat_base_address = nat_ctx->vaddr; + } else { + IPADBG("Mapping shared(local) memory\n"); + IPADBG("map sz=0x%lx\n", vsize); + + if ((IPA_NAT_PHYS_MEM_SIZE == 0) || + (vsize > IPA_NAT_PHYS_MEM_SIZE)) { + result = -EINVAL; + goto bail; + } + phys_addr = ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, + IPA_NAT_PHYS_MEM_OFFSET); + + if (remap_pfn_range( + vma, vma->vm_start, + phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) { + IPAERR("remap failed\n"); + result = -EAGAIN; + goto bail; + } + ipa3_ctx->nat_mem.nat_base_address = (void *)vma->vm_start; + } + nat_ctx->is_mapped = true; + vma->vm_ops = &ipa3_nat_remap_vm_ops; + IPADBG("return\n"); + result = 0; +bail: + mutex_unlock(&nat_ctx->lock); + return result; +} + +static const struct file_operations ipa3_nat_fops = { + .owner = THIS_MODULE, + .open = ipa3_nat_open, + .mmap = ipa3_nat_mmap +}; + +/** + * ipa3_allocate_temp_nat_memory() - Allocates temp nat memory + * + * Called during nat table delete + */ +void ipa3_allocate_temp_nat_memory(void) +{ + struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem); + int gfp_flags = GFP_KERNEL | __GFP_ZERO; + + nat_ctx->tmp_vaddr = + dma_alloc_coherent(ipa3_ctx->pdev, IPA_NAT_TEMP_MEM_SIZE, + &nat_ctx->tmp_dma_handle, gfp_flags); + + if (nat_ctx->tmp_vaddr == NULL) { + IPAERR("Temp Memory alloc failed\n"); + nat_ctx->is_tmp_mem = false; + return; + } + + nat_ctx->is_tmp_mem = true; + IPADBG("IPA NAT allocated temp memory successfully\n"); +} + +/** + * ipa3_create_nat_device() - Create the NAT device + * + * Called during ipa init to create nat device + * + * Returns: 0 on success, negative on failure + */ +int ipa3_create_nat_device(void) +{ + struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem); + int result; + + IPADBG("\n"); + + mutex_lock(&nat_ctx->lock); + nat_ctx->class = class_create(THIS_MODULE, NAT_DEV_NAME); + if (IS_ERR(nat_ctx->class)) { + IPAERR("unable to create the class\n"); + result = -ENODEV; + goto vaddr_alloc_fail; + } + result = alloc_chrdev_region(&nat_ctx->dev_num, + 0, + 1, + NAT_DEV_NAME); + if (result) { + IPAERR("alloc_chrdev_region err.\n"); + result = -ENODEV; + goto alloc_chrdev_region_fail; + } + + nat_ctx->dev = + device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx, + "%s", NAT_DEV_NAME); + + if (IS_ERR(nat_ctx->dev)) { + IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev)); + result = -ENODEV; + goto device_create_fail; + } + + cdev_init(&nat_ctx->cdev, &ipa3_nat_fops); + nat_ctx->cdev.owner = THIS_MODULE; + nat_ctx->cdev.ops = &ipa3_nat_fops; + + result = cdev_add(&nat_ctx->cdev, nat_ctx->dev_num, 1); + if (result) { + IPAERR("cdev_add err=%d\n", -result); + goto cdev_add_fail; + } + IPADBG("ipa nat dev added successful. major:%d minor:%d\n", + MAJOR(nat_ctx->dev_num), + MINOR(nat_ctx->dev_num)); + + nat_ctx->is_dev = true; + ipa3_allocate_temp_nat_memory(); + IPADBG("IPA NAT device created successfully\n"); + result = 0; + goto bail; + +cdev_add_fail: + device_destroy(nat_ctx->class, nat_ctx->dev_num); +device_create_fail: + unregister_chrdev_region(nat_ctx->dev_num, 1); +alloc_chrdev_region_fail: + class_destroy(nat_ctx->class); +vaddr_alloc_fail: + if (nat_ctx->vaddr) { + IPADBG("Releasing system memory\n"); + dma_free_coherent( + ipa3_ctx->pdev, nat_ctx->size, + nat_ctx->vaddr, nat_ctx->dma_handle); + nat_ctx->vaddr = NULL; + nat_ctx->dma_handle = 0; + nat_ctx->size = 0; + } + +bail: + mutex_unlock(&nat_ctx->lock); + + return result; +} + +/** + * ipa3_allocate_nat_device() - Allocates memory for the NAT device + * @mem: [in/out] memory parameters + * + * Called by NAT client driver to allocate memory for the NAT entries. Based on + * the request size either shared or system memory will be used. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem) +{ + struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem); + gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; + int result; + + IPADBG("passed memory size %zu\n", mem->size); + + mutex_lock(&nat_ctx->lock); + if (strcmp(mem->dev_name, NAT_DEV_NAME)) { + IPAERR_RL("Nat device name mismatch\n"); + IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name); + result = -EPERM; + goto bail; + } + + if (nat_ctx->is_dev != true) { + IPAERR("Nat device not created successfully during boot up\n"); + result = -EPERM; + goto bail; + } + + if (nat_ctx->is_dev_init == true) { + IPAERR("Device already init\n"); + result = 0; + goto bail; + } + + if (mem->size <= 0 || + nat_ctx->is_dev_init == true) { + IPAERR_RL("Invalid Parameters or device is already init\n"); + result = -EPERM; + goto bail; + } + + if (mem->size > IPA_NAT_PHYS_MEM_SIZE) { + IPADBG("Allocating system memory\n"); + nat_ctx->is_sys_mem = true; + nat_ctx->vaddr = + dma_alloc_coherent(ipa3_ctx->pdev, mem->size, + &nat_ctx->dma_handle, gfp_flags); + if (nat_ctx->vaddr == NULL) { + IPAERR("memory alloc failed\n"); + result = -ENOMEM; + goto bail; + } + nat_ctx->size = mem->size; + } else { + IPADBG("using shared(local) memory\n"); + nat_ctx->is_sys_mem = false; + } + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + struct ipa_pdn_entry *pdn_entries; + struct ipa_mem_buffer *pdn_mem = &ipa3_ctx->nat_mem.pdn_mem; + + pdn_mem->size = sizeof(struct ipa_pdn_entry) * IPA_MAX_PDN_NUM; + if (IPA_MEM_PART(pdn_config_size) < pdn_mem->size) { + IPAERR( + "number of PDN entries exceeds SRAM available space\n"); + result = -ENOMEM; + goto fail_alloc_pdn; + } + + pdn_mem->base = dma_alloc_coherent(ipa3_ctx->pdev, + pdn_mem->size, + &pdn_mem->phys_base, + gfp_flags); + if (!pdn_mem->base) { + IPAERR("fail to allocate PDN memory\n"); + result = -ENOMEM; + goto fail_alloc_pdn; + } + pdn_entries = pdn_mem->base; + memset(pdn_entries, 0, pdn_mem->size); + IPADBG("IPA NAT dev allocated PDN memory successfully\n"); + } + + nat_ctx->is_dev_init = true; + IPADBG("IPA NAT dev init successfully\n"); + mutex_unlock(&nat_ctx->lock); + + return 0; + +fail_alloc_pdn: + if (nat_ctx->vaddr) { + dma_free_coherent(ipa3_ctx->pdev, mem->size, nat_ctx->vaddr, + nat_ctx->dma_handle); + nat_ctx->vaddr = NULL; + } +bail: + mutex_unlock(&nat_ctx->lock); + + return result; +} + +/* IOCTL function handlers */ +/** + * ipa3_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW + * @init: [in] initialization command attributes + * + * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) +{ +#define TBL_ENTRY_SIZE 32 +#define INDX_TBL_ENTRY_SIZE 4 + + struct ipa3_desc desc[3]; + struct ipahal_imm_cmd_ip_v4_nat_init cmd; + int num_cmd = 0; + int i = 0; + struct ipahal_imm_cmd_pyld *cmd_pyld[3]; + struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 }; + int result = 0; + u32 offset = 0; + size_t tmp; + + IPADBG("\n"); + if (init->table_entries == 0) { + IPADBG("Table entries is zero\n"); + return -EPERM; + } + + /* check for integer overflow */ + if (init->ipv4_rules_offset > + UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) { + IPAERR_RL("Detected overflow\n"); + return -EPERM; + } + /* Check Table Entry offset is not + * beyond allocated size + */ + tmp = init->ipv4_rules_offset + + (TBL_ENTRY_SIZE * (init->table_entries + 1)); + if (tmp > ipa3_ctx->nat_mem.size) { + IPAERR_RL("Table rules offset not valid\n"); + IPAERR_RL("offset:%d entries:%d size:%zu mem_size:%zu\n", + init->ipv4_rules_offset, (init->table_entries + 1), + tmp, ipa3_ctx->nat_mem.size); + return -EPERM; + } + + /* check for integer overflow */ + if (init->expn_rules_offset > + (UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries))) { + IPAERR_RL("Detected overflow\n"); + return -EPERM; + } + /* Check Expn Table Entry offset is not + * beyond allocated size + */ + tmp = init->expn_rules_offset + + (TBL_ENTRY_SIZE * init->expn_table_entries); + if (tmp > ipa3_ctx->nat_mem.size) { + IPAERR_RL("Expn Table rules offset not valid\n"); + IPAERR_RL("offset:%d entries:%d size:%zu mem_size:%zu\n", + init->expn_rules_offset, init->expn_table_entries, + tmp, ipa3_ctx->nat_mem.size); + return -EPERM; + } + + /* check for integer overflow */ + if (init->index_offset > + UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) { + IPAERR_RL("Detected overflow\n"); + return -EPERM; + } + /* Check Indx Table Entry offset is not + * beyond allocated size + */ + tmp = init->index_offset + + (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1)); + if (tmp > ipa3_ctx->nat_mem.size) { + IPAERR_RL("Indx Table rules offset not valid\n"); + IPAERR_RL("offset:%d entries:%d size:%zu mem_size:%zu\n", + init->index_offset, (init->table_entries + 1), + tmp, ipa3_ctx->nat_mem.size); + return -EPERM; + } + + /* check for integer overflow */ + if (init->index_expn_offset > + UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) { + IPAERR_RL("Detected overflow\n"); + return -EPERM; + } + /* Check Expn Table entry offset is not + * beyond allocated size + */ + tmp = init->index_expn_offset + + (INDX_TBL_ENTRY_SIZE * init->expn_table_entries); + if (tmp > ipa3_ctx->nat_mem.size) { + IPAERR_RL("Indx Expn Table rules offset not valid\n"); + IPAERR_RL("offset:%d entries:%d size:%zu mem_size:%zu\n", + init->index_expn_offset, init->expn_table_entries, + tmp, ipa3_ctx->nat_mem.size); + return -EPERM; + } + + memset(&desc, 0, sizeof(desc)); + /* NO-OP IC for ensuring that IPA pipeline is empty */ + cmd_pyld[num_cmd] = + ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("failed to construct NOP imm cmd\n"); + result = -ENOMEM; + goto bail; + } + + desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode; + desc[num_cmd].type = IPA_IMM_CMD_DESC; + desc[num_cmd].callback = NULL; + desc[num_cmd].user1 = NULL; + desc[num_cmd].user2 = 0; + desc[num_cmd].pyld = cmd_pyld[num_cmd]->data; + desc[num_cmd].len = cmd_pyld[num_cmd]->len; + num_cmd++; + + if (ipa3_ctx->nat_mem.vaddr) { + IPADBG("using system memory for nat table\n"); + cmd.ipv4_rules_addr_shared = false; + cmd.ipv4_expansion_rules_addr_shared = false; + cmd.index_table_addr_shared = false; + cmd.index_table_expansion_addr_shared = false; + + offset = UINT_MAX - ipa3_ctx->nat_mem.dma_handle; + + if ((init->ipv4_rules_offset > offset) || + (init->expn_rules_offset > offset) || + (init->index_offset > offset) || + (init->index_expn_offset > offset)) { + IPAERR_RL("Failed due to integer overflow\n"); + IPAERR_RL("nat.mem.dma_handle: 0x%pa\n", + &ipa3_ctx->nat_mem.dma_handle); + IPAERR_RL("ipv4_rules_offset: 0x%x\n", + init->ipv4_rules_offset); + IPAERR_RL("expn_rules_offset: 0x%x\n", + init->expn_rules_offset); + IPAERR_RL("index_offset: 0x%x\n", + init->index_offset); + IPAERR_RL("index_expn_offset: 0x%x\n", + init->index_expn_offset); + result = -EPERM; + goto destroy_imm_cmd; + } + cmd.ipv4_rules_addr = + ipa3_ctx->nat_mem.dma_handle + init->ipv4_rules_offset; + IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset); + + cmd.ipv4_expansion_rules_addr = + ipa3_ctx->nat_mem.dma_handle + init->expn_rules_offset; + IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset); + + cmd.index_table_addr = + ipa3_ctx->nat_mem.dma_handle + init->index_offset; + IPADBG("index_offset:0x%x\n", init->index_offset); + + cmd.index_table_expansion_addr = + ipa3_ctx->nat_mem.dma_handle + init->index_expn_offset; + IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset); + } else { + IPADBG("using shared(local) memory for nat table\n"); + cmd.ipv4_rules_addr_shared = true; + cmd.ipv4_expansion_rules_addr_shared = true; + cmd.index_table_addr_shared = true; + cmd.index_table_expansion_addr_shared = true; + + cmd.ipv4_rules_addr = init->ipv4_rules_offset + + IPA_RAM_NAT_OFST; + + cmd.ipv4_expansion_rules_addr = init->expn_rules_offset + + IPA_RAM_NAT_OFST; + + cmd.index_table_addr = init->index_offset + + IPA_RAM_NAT_OFST; + + cmd.index_table_expansion_addr = init->index_expn_offset + + IPA_RAM_NAT_OFST; + } + cmd.table_index = init->tbl_index; + IPADBG("Table index:0x%x\n", cmd.table_index); + cmd.size_base_tables = init->table_entries; + IPADBG("Base Table size:0x%x\n", cmd.size_base_tables); + cmd.size_expansion_tables = init->expn_table_entries; + IPADBG("Expansion Table size:0x%x\n", cmd.size_expansion_tables); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + /* + * public ip field changed to store the PDN config base + * address in IPAv4 + */ + cmd.public_ip_addr = IPA_MEM_PART(pdn_config_ofst); + IPADBG("pdn config base:0x%x\n", cmd.public_ip_addr); + } else { + cmd.public_ip_addr = init->ip_addr; + IPADBG("Public ip address:0x%x\n", cmd.public_ip_addr); + } + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n"); + result = -EPERM; + goto destroy_imm_cmd; + } + + desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode; + desc[num_cmd].type = IPA_IMM_CMD_DESC; + desc[num_cmd].callback = NULL; + desc[num_cmd].user1 = NULL; + desc[num_cmd].user2 = 0; + desc[num_cmd].pyld = cmd_pyld[num_cmd]->data; + desc[num_cmd].len = cmd_pyld[num_cmd]->len; + num_cmd++; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + struct ipa_pdn_entry *pdn_entries; + + /* store ip in pdn entries cache array */ + pdn_entries = ipa3_ctx->nat_mem.pdn_mem.base; + pdn_entries[0].public_ip = init->ip_addr; + pdn_entries[0].dst_metadata = 0; + pdn_entries[0].src_metadata = 0; + pdn_entries[0].resrvd = 0; + + IPADBG("Public ip address:0x%x\n", init->ip_addr); + + /* Copy the PDN config table to SRAM */ + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = sizeof(struct ipa_pdn_entry) * IPA_MAX_PDN_NUM; + mem_cmd.system_addr = ipa3_ctx->nat_mem.pdn_mem.phys_base; + mem_cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(pdn_config_ofst); + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR( + "fail construct dma_shared_mem cmd: for pdn table"); + result = -ENOMEM; + goto destroy_imm_cmd; + } + desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode; + desc[num_cmd].type = IPA_IMM_CMD_DESC; + desc[num_cmd].callback = NULL; + desc[num_cmd].user1 = NULL; + desc[num_cmd].user2 = 0; + desc[num_cmd].pyld = cmd_pyld[num_cmd]->data; + desc[num_cmd].len = cmd_pyld[num_cmd]->len; + num_cmd++; + IPADBG("added PDN table copy cmd\n"); + } + + IPADBG("posting v4 init command\n"); + if (ipa3_send_cmd(num_cmd, desc)) { + IPAERR("Fail to send immediate command\n"); + result = -EPERM; + goto destroy_imm_cmd; + } + + ipa3_ctx->nat_mem.public_ip_addr = init->ip_addr; + IPADBG("Table ip address:0x%x", ipa3_ctx->nat_mem.public_ip_addr); + + ipa3_ctx->nat_mem.ipv4_rules_addr = + (char *)ipa3_ctx->nat_mem.nat_base_address + init->ipv4_rules_offset; + IPADBG("ipv4_rules_addr: 0x%pK\n", + ipa3_ctx->nat_mem.ipv4_rules_addr); + + ipa3_ctx->nat_mem.ipv4_expansion_rules_addr = + (char *)ipa3_ctx->nat_mem.nat_base_address + init->expn_rules_offset; + IPADBG("ipv4_expansion_rules_addr: 0x%pK\n", + ipa3_ctx->nat_mem.ipv4_expansion_rules_addr); + + ipa3_ctx->nat_mem.index_table_addr = + (char *)ipa3_ctx->nat_mem.nat_base_address + + init->index_offset; + IPADBG("index_table_addr: 0x%pK\n", + ipa3_ctx->nat_mem.index_table_addr); + + ipa3_ctx->nat_mem.index_table_expansion_addr = + (char *)ipa3_ctx->nat_mem.nat_base_address + init->index_expn_offset; + IPADBG("index_table_expansion_addr: 0x%pK\n", + ipa3_ctx->nat_mem.index_table_expansion_addr); + + IPADBG("size_base_tables: %d\n", init->table_entries); + ipa3_ctx->nat_mem.size_base_tables = init->table_entries; + + IPADBG("size_expansion_tables: %d\n", init->expn_table_entries); + ipa3_ctx->nat_mem.size_expansion_tables = init->expn_table_entries; + + IPADBG("return\n"); +destroy_imm_cmd: + for (i = 0; i < num_cmd; i++) + ipahal_destroy_imm_cmd(cmd_pyld[i]); +bail: + return result; +} + +/** + * ipa3_nat_mdfy_pdn() - Modify a PDN entry in PDN config table in IPA SRAM + * @mdfy_pdn: [in] PDN info to be written to SRAM + * + * Called by NAT client driver to modify an entry in the PDN config table + * + * Returns: 0 on success, negative on failure + */ +int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn) +{ + struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 }; + struct ipa3_desc desc; + struct ipahal_imm_cmd_pyld *cmd_pyld; + int result = 0; + struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem); + struct ipa_pdn_entry *pdn_entries = nat_ctx->pdn_mem.base; + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + IPAERR("IPA HW does not support multi PDN\n"); + return -EPERM; + } + if (!nat_ctx->is_dev_init) { + IPAERR("attempt to modify a PDN entry before dev init\n"); + return -EPERM; + } + + if (mdfy_pdn->pdn_index > (IPA_MAX_PDN_NUM - 1)) { + IPAERR("pdn index out of range %d\n", mdfy_pdn->pdn_index); + return -EPERM; + } + + mutex_lock(&nat_ctx->lock); + + /* store ip in pdn entries cache array */ + pdn_entries[mdfy_pdn->pdn_index].public_ip = + mdfy_pdn->public_ip; + pdn_entries[mdfy_pdn->pdn_index].dst_metadata = + mdfy_pdn->dst_metadata; + pdn_entries[mdfy_pdn->pdn_index].src_metadata = + mdfy_pdn->src_metadata; + + IPADBG("Modify PDN in index %d: ", mdfy_pdn->pdn_index); + IPADBG("Public ip address:0x%x, ", mdfy_pdn->public_ip); + IPADBG("dst metadata:0x%x, ", mdfy_pdn->dst_metadata); + IPADBG("src metadata:0x%x\n", mdfy_pdn->src_metadata); + + memset(&desc, 0, sizeof(desc)); + + /* Copy the PDN config table to SRAM */ + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = sizeof(struct ipa_pdn_entry) * IPA_MAX_PDN_NUM; + mem_cmd.system_addr = nat_ctx->pdn_mem.phys_base; + mem_cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(pdn_config_ofst); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld) { + IPAERR( + "fail construct dma_shared_mem cmd: for pdn table\n"); + result = -ENOMEM; + goto bail; + } + desc.opcode = cmd_pyld->opcode; + desc.type = IPA_IMM_CMD_DESC; + desc.callback = NULL; + desc.user1 = NULL; + desc.user2 = 0; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + + IPADBG("sending PDN table copy cmd\n"); + if (ipa3_send_cmd(1, &desc)) { + IPAERR("Fail to send immediate command\n"); + result = -EPERM; + } + + ipahal_destroy_imm_cmd(cmd_pyld); +bail: + mutex_unlock(&nat_ctx->lock); + return result; +} +/** + * ipa3_nat_dma_cmd() - Post NAT_DMA command to IPA HW + * @dma: [in] initialization command attributes + * + * Called by NAT client driver to post NAT_DMA command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma) +{ +#define NUM_OF_DESC 2 + + struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL; + struct ipahal_imm_cmd_nat_dma cmd; + enum ipahal_imm_cmd_name cmd_name = IPA_IMM_CMD_NAT_DMA; + struct ipahal_imm_cmd_pyld *cmd_pyld = NULL; + struct ipa3_desc *desc = NULL; + u16 size = 0, cnt = 0; + int ret = 0; + + IPADBG("\n"); + if (dma->entries <= 0) { + IPAERR_RL("Invalid number of commands %d\n", + dma->entries); + ret = -EPERM; + goto bail; + } + + for (cnt = 0; cnt < dma->entries; cnt++) { + if (dma->dma[cnt].table_index >= 1) { + IPAERR_RL("Invalid table index %d\n", + dma->dma[cnt].table_index); + ret = -EPERM; + goto bail; + } + + switch (dma->dma[cnt].base_addr) { + case IPA_NAT_BASE_TBL: + if (dma->dma[cnt].offset >= + (ipa3_ctx->nat_mem.size_base_tables + 1) * + NAT_TABLE_ENTRY_SIZE_BYTE) { + IPAERR_RL("Invalid offset %d\n", + dma->dma[cnt].offset); + ret = -EPERM; + goto bail; + } + + break; + + case IPA_NAT_EXPN_TBL: + if (dma->dma[cnt].offset >= + ipa3_ctx->nat_mem.size_expansion_tables * + NAT_TABLE_ENTRY_SIZE_BYTE) { + IPAERR_RL("Invalid offset %d\n", + dma->dma[cnt].offset); + ret = -EPERM; + goto bail; + } + + break; + + case IPA_NAT_INDX_TBL: + if (dma->dma[cnt].offset >= + (ipa3_ctx->nat_mem.size_base_tables + 1) * + NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) { + IPAERR_RL("Invalid offset %d\n", + dma->dma[cnt].offset); + ret = -EPERM; + goto bail; + } + + break; + + case IPA_NAT_INDEX_EXPN_TBL: + if (dma->dma[cnt].offset >= + ipa3_ctx->nat_mem.size_expansion_tables * + NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) { + IPAERR_RL("Invalid offset %d\n", + dma->dma[cnt].offset); + ret = -EPERM; + goto bail; + } + + break; + + default: + IPAERR_RL("Invalid base_addr %d\n", + dma->dma[cnt].base_addr); + ret = -EPERM; + goto bail; + } + } + + size = sizeof(struct ipa3_desc) * NUM_OF_DESC; + desc = kzalloc(size, GFP_KERNEL); + if (desc == NULL) { + ret = -ENOMEM; + goto bail; + } + + /* NO-OP IC for ensuring that IPA pipeline is empty */ + nop_cmd_pyld = + ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false); + if (!nop_cmd_pyld) { + IPAERR("Failed to construct NOP imm cmd\n"); + ret = -ENOMEM; + goto bail; + } + desc[0].type = IPA_IMM_CMD_DESC; + desc[0].opcode = nop_cmd_pyld->opcode; + desc[0].callback = NULL; + desc[0].user1 = NULL; + desc[0].user2 = 0; + desc[0].pyld = nop_cmd_pyld->data; + desc[0].len = nop_cmd_pyld->len; + + /* NAT_DMA was renamed to TABLE_DMA starting from IPAv4 */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + cmd_name = IPA_IMM_CMD_TABLE_DMA; + + for (cnt = 0; cnt < dma->entries; cnt++) { + cmd.table_index = dma->dma[cnt].table_index; + cmd.base_addr = dma->dma[cnt].base_addr; + cmd.offset = dma->dma[cnt].offset; + cmd.data = dma->dma[cnt].data; + cmd_pyld = ipahal_construct_imm_cmd(cmd_name, &cmd, false); + if (!cmd_pyld) { + IPAERR_RL("Fail to construct nat_dma imm cmd\n"); + continue; + } + desc[1].type = IPA_IMM_CMD_DESC; + desc[1].opcode = cmd_pyld->opcode; + desc[1].callback = NULL; + desc[1].user1 = NULL; + desc[1].user2 = 0; + desc[1].pyld = cmd_pyld->data; + desc[1].len = cmd_pyld->len; + + ret = ipa3_send_cmd(NUM_OF_DESC, desc); + if (ret == -EPERM) + IPAERR("Fail to send immediate command %d\n", cnt); + ipahal_destroy_imm_cmd(cmd_pyld); + } + +bail: + if (desc != NULL) + kfree(desc); + + if (nop_cmd_pyld != NULL) + ipahal_destroy_imm_cmd(nop_cmd_pyld); + + return ret; +} + +/** + * ipa3_nat_free_mem_and_device() - free the NAT memory and remove the device + * @nat_ctx: [in] the IPA NAT memory to free + * + * Called by NAT client driver to free the NAT memory and remove the device + */ +void ipa3_nat_free_mem_and_device(struct ipa3_nat_mem *nat_ctx) +{ + struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 }; + struct ipa3_desc desc; + struct ipahal_imm_cmd_pyld *cmd_pyld; + + IPADBG("\n"); + mutex_lock(&nat_ctx->lock); + + if (nat_ctx->is_sys_mem) { + IPADBG("freeing the dma memory\n"); + dma_free_coherent( + ipa3_ctx->pdev, nat_ctx->size, + nat_ctx->vaddr, nat_ctx->dma_handle); + nat_ctx->size = 0; + nat_ctx->vaddr = NULL; + } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + struct ipa_pdn_entry *pdn_entries = + nat_ctx->pdn_mem.base; + + /* zero the PDN table and copy the PDN config table to SRAM */ + IPADBG("zeroing the PDN config table\n"); + memset(pdn_entries, 0, sizeof(struct ipa_pdn_entry) * + IPA_MAX_PDN_NUM); + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = sizeof(struct ipa_pdn_entry) * IPA_MAX_PDN_NUM; + mem_cmd.system_addr = nat_ctx->pdn_mem.phys_base; + mem_cmd.local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(pdn_config_ofst); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld) { + IPAERR( + "fail construct dma_shared_mem cmd: for pdn table"); + goto lbl_free_pdn; + } + memset(&desc, 0, sizeof(desc)); + desc.opcode = cmd_pyld->opcode; + desc.pyld = cmd_pyld->data; + desc.len = cmd_pyld->len; + desc.type = IPA_IMM_CMD_DESC; + + IPADBG("sending PDN table copy cmd\n"); + if (ipa3_send_cmd(1, &desc)) + IPAERR("Fail to send immediate command\n"); + + ipahal_destroy_imm_cmd(cmd_pyld); +lbl_free_pdn: + IPADBG("freeing the PDN memory\n"); + dma_free_coherent(ipa3_ctx->pdev, + nat_ctx->pdn_mem.size, + nat_ctx->pdn_mem.base, + nat_ctx->pdn_mem.phys_base); + } + nat_ctx->is_mapped = false; + nat_ctx->is_sys_mem = false; + nat_ctx->is_dev_init = false; + + mutex_unlock(&nat_ctx->lock); + IPADBG("return\n"); +} + +/** + * ipa3_nat_del_cmd() - Delete a NAT table + * @del: [in] delete table table table parameters + * + * Called by NAT client driver to delete the nat table + * + * Returns: 0 on success, negative on failure + */ +int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del) +{ + struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL; + struct ipa3_desc desc[2]; + struct ipahal_imm_cmd_ip_v4_nat_init cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld; + bool mem_type_shared = true; + u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET; + int result; + + IPADBG("\n"); + if (ipa3_ctx->nat_mem.is_tmp_mem) { + IPAERR("using temp memory during nat del\n"); + mem_type_shared = false; + base_addr = ipa3_ctx->nat_mem.tmp_dma_handle; + } + + if ((ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) && + (del->public_ip_addr == 0)) { + IPADBG("Bad Parameter\n"); + result = -EPERM; + goto bail; + } + + memset(&desc, 0, sizeof(desc)); + /* NO-OP IC for ensuring that IPA pipeline is empty */ + nop_cmd_pyld = + ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false); + if (!nop_cmd_pyld) { + IPAERR("Failed to construct NOP imm cmd\n"); + result = -ENOMEM; + goto bail; + } + desc[0].opcode = nop_cmd_pyld->opcode; + desc[0].type = IPA_IMM_CMD_DESC; + desc[0].callback = NULL; + desc[0].user1 = NULL; + desc[0].user2 = 0; + desc[0].pyld = nop_cmd_pyld->data; + desc[0].len = nop_cmd_pyld->len; + + cmd.table_index = del->table_index; + cmd.ipv4_rules_addr = base_addr; + cmd.ipv4_rules_addr_shared = mem_type_shared; + cmd.ipv4_expansion_rules_addr = base_addr; + cmd.ipv4_expansion_rules_addr_shared = mem_type_shared; + cmd.index_table_addr = base_addr; + cmd.index_table_addr_shared = mem_type_shared; + cmd.index_table_expansion_addr = base_addr; + cmd.index_table_expansion_addr_shared = mem_type_shared; + cmd.size_base_tables = 0; + cmd.size_expansion_tables = 0; + cmd.public_ip_addr = 0; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false); + if (!cmd_pyld) { + IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n"); + result = -EPERM; + goto destroy_regwrt_imm_cmd; + } + desc[1].opcode = cmd_pyld->opcode; + desc[1].type = IPA_IMM_CMD_DESC; + desc[1].callback = NULL; + desc[1].user1 = NULL; + desc[1].user2 = 0; + desc[1].pyld = cmd_pyld->data; + desc[1].len = cmd_pyld->len; + + if (ipa3_send_cmd(2, desc)) { + IPAERR("Fail to send immediate command\n"); + result = -EPERM; + goto destroy_imm_cmd; + } + + ipa3_ctx->nat_mem.size_base_tables = 0; + ipa3_ctx->nat_mem.size_expansion_tables = 0; + ipa3_ctx->nat_mem.public_ip_addr = 0; + ipa3_ctx->nat_mem.ipv4_rules_addr = 0; + ipa3_ctx->nat_mem.ipv4_expansion_rules_addr = 0; + ipa3_ctx->nat_mem.index_table_addr = 0; + ipa3_ctx->nat_mem.index_table_expansion_addr = 0; + + ipa3_nat_free_mem_and_device(&ipa3_ctx->nat_mem); + IPADBG("return\n"); + result = 0; + +destroy_imm_cmd: + ipahal_destroy_imm_cmd(cmd_pyld); +destroy_regwrt_imm_cmd: + ipahal_destroy_imm_cmd(nop_cmd_pyld); +bail: + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c new file mode 100644 index 000000000000..491a1d1a7358 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c @@ -0,0 +1,1375 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipa_qmi_service.h" + +#define IPA_Q6_SVC_VERS 1 +#define IPA_A5_SVC_VERS 1 +#define Q6_QMI_COMPLETION_TIMEOUT (60*HZ) + +#define IPA_A5_SERVICE_SVC_ID 0x31 +#define IPA_A5_SERVICE_INS_ID 1 +#define IPA_Q6_SERVICE_SVC_ID 0x31 +#define IPA_Q6_SERVICE_INS_ID 2 + +#define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000 +#define QMI_SEND_REQ_TIMEOUT_MS 60000 + +#define QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS 1000 + +static struct qmi_handle *ipa3_svc_handle; +static void ipa3_a5_svc_recv_msg(struct work_struct *work); +static DECLARE_DELAYED_WORK(work_recv_msg, ipa3_a5_svc_recv_msg); +static struct workqueue_struct *ipa_svc_workqueue; +static struct workqueue_struct *ipa_clnt_req_workqueue; +static struct workqueue_struct *ipa_clnt_resp_workqueue; +static void *curr_conn; +static bool ipa3_qmi_modem_init_fin, ipa3_qmi_indication_fin; +static struct work_struct ipa3_qmi_service_init_work; +static uint32_t ipa_wan_platform; +struct ipa3_qmi_context *ipa3_qmi_ctx; +static bool workqueues_stopped; +static bool ipa3_modem_init_cmplt; +static bool first_time_handshake; +struct mutex ipa3_qmi_lock; + +/* QMI A5 service */ + +static struct msg_desc ipa3_indication_reg_req_desc = { + .max_msg_len = QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_INDICATION_REGISTER_REQ_V01, + .ei_array = ipa3_indication_reg_req_msg_data_v01_ei, +}; +static struct msg_desc ipa3_indication_reg_resp_desc = { + .max_msg_len = QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_INDICATION_REGISTER_RESP_V01, + .ei_array = ipa3_indication_reg_resp_msg_data_v01_ei, +}; +static struct msg_desc ipa3_master_driver_complete_indication_desc = { + .max_msg_len = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01, + .ei_array = ipa3_master_driver_init_complt_ind_msg_data_v01_ei, +}; +static struct msg_desc ipa3_install_fltr_rule_req_desc = { + .max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, + .ei_array = ipa3_install_fltr_rule_req_msg_data_v01_ei, +}; +static struct msg_desc ipa3_install_fltr_rule_resp_desc = { + .max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01, + .ei_array = ipa3_install_fltr_rule_resp_msg_data_v01_ei, +}; +static struct msg_desc ipa3_filter_installed_notif_req_desc = { + .max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, + .ei_array = ipa3_fltr_installed_notif_req_msg_data_v01_ei, +}; +static struct msg_desc ipa3_filter_installed_notif_resp_desc = { + .max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01, + .ei_array = ipa3_fltr_installed_notif_resp_msg_data_v01_ei, +}; +static struct msg_desc ipa3_config_req_desc = { + .max_msg_len = QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_CONFIG_REQ_V01, + .ei_array = ipa3_config_req_msg_data_v01_ei, +}; +static struct msg_desc ipa3_config_resp_desc = { + .max_msg_len = QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_CONFIG_RESP_V01, + .ei_array = ipa3_config_resp_msg_data_v01_ei, +}; + +static struct msg_desc ipa3_init_modem_driver_cmplt_req_desc = { + .max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01, + .ei_array = ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei, +}; + +static struct msg_desc ipa3_init_modem_driver_cmplt_resp_desc = { + .max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01, + .ei_array = ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei, +}; + +static struct msg_desc ipa3_install_fltr_rule_req_ex_desc = { + .max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01, + .ei_array = ipa3_install_fltr_rule_req_ex_msg_data_v01_ei, +}; + +static int ipa3_handle_indication_req(void *req_h, void *req) +{ + struct ipa_indication_reg_req_msg_v01 *indication_req; + struct ipa_indication_reg_resp_msg_v01 resp; + struct ipa_master_driver_init_complt_ind_msg_v01 ind; + int rc; + + indication_req = (struct ipa_indication_reg_req_msg_v01 *)req; + IPAWANDBG("Received INDICATION Request\n"); + + memset(&resp, 0, sizeof(struct ipa_indication_reg_resp_msg_v01)); + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h, + &ipa3_indication_reg_resp_desc, &resp, sizeof(resp)); + ipa3_qmi_indication_fin = true; + /* check if need sending indication to modem */ + if (ipa3_qmi_modem_init_fin) { + IPAWANDBG("send indication to modem (%d)\n", + ipa3_qmi_modem_init_fin); + memset(&ind, 0, sizeof(struct + ipa_master_driver_init_complt_ind_msg_v01)); + ind.master_driver_init_status.result = + IPA_QMI_RESULT_SUCCESS_V01; + rc = qmi_send_ind_from_cb(ipa3_svc_handle, curr_conn, + &ipa3_master_driver_complete_indication_desc, + &ind, + sizeof(ind)); + } else { + IPAWANERR("not send indication\n"); + } + return rc; +} + + +static int ipa3_handle_install_filter_rule_req(void *req_h, void *req) +{ + struct ipa_install_fltr_rule_req_msg_v01 *rule_req; + struct ipa_install_fltr_rule_resp_msg_v01 resp; + uint32_t rule_hdl[MAX_NUM_Q6_RULE]; + int rc = 0, i; + + rule_req = (struct ipa_install_fltr_rule_req_msg_v01 *)req; + memset(rule_hdl, 0, sizeof(rule_hdl)); + memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01)); + IPAWANDBG("Received install filter Request\n"); + + rc = ipa3_copy_ul_filter_rule_to_ipa((struct + ipa_install_fltr_rule_req_msg_v01*)req); + if (rc) + IPAWANERR("copy UL rules from modem is failed\n"); + + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + if (rule_req->filter_spec_ex_list_valid == true) { + resp.rule_id_valid = 1; + if (rule_req->filter_spec_ex_list_len > MAX_NUM_Q6_RULE) { + resp.rule_id_len = MAX_NUM_Q6_RULE; + IPAWANERR("installed (%d) max Q6-UL rules ", + MAX_NUM_Q6_RULE); + IPAWANERR("but modem gives total (%u)\n", + rule_req->filter_spec_ex_list_len); + } else { + resp.rule_id_len = + rule_req->filter_spec_ex_list_len; + } + } else { + resp.rule_id_valid = 0; + resp.rule_id_len = 0; + } + + /* construct UL filter rules response to Modem*/ + for (i = 0; i < resp.rule_id_len; i++) { + resp.rule_id[i] = + rule_req->filter_spec_ex_list[i].rule_id; + } + + rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h, + &ipa3_install_fltr_rule_resp_desc, &resp, sizeof(resp)); + + IPAWANDBG("Replied to install filter request\n"); + return rc; +} + +static int ipa3_handle_filter_installed_notify_req(void *req_h, void *req) +{ + struct ipa_fltr_installed_notif_resp_msg_v01 resp; + int rc = 0; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + IPAWANDBG("Received filter_install_notify Request\n"); + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + + rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h, + &ipa3_filter_installed_notif_resp_desc, + &resp, sizeof(resp)); + + IPAWANDBG("Responsed filter_install_notify Request\n"); + return rc; +} + +static int handle_ipa_config_req(void *req_h, void *req) +{ + struct ipa_config_resp_msg_v01 resp; + int rc; + + memset(&resp, 0, sizeof(struct ipa_config_resp_msg_v01)); + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + IPAWANDBG("Received IPA CONFIG Request\n"); + rc = ipa_mhi_handle_ipa_config_req( + (struct ipa_config_req_msg_v01 *)req); + if (rc) { + IPAERR("ipa3_mhi_handle_ipa_config_req failed %d\n", rc); + resp.resp.result = IPA_QMI_RESULT_FAILURE_V01; + } + rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h, + &ipa3_config_resp_desc, + &resp, sizeof(resp)); + IPAWANDBG("Responsed IPA CONFIG Request\n"); + return rc; +} + +static int ipa3_handle_modem_init_cmplt_req(void *req_h, void *req) +{ + struct ipa_init_modem_driver_cmplt_req_msg_v01 *cmplt_req; + struct ipa_init_modem_driver_cmplt_resp_msg_v01 resp; + int rc; + + IPAWANDBG("Received QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01\n"); + cmplt_req = (struct ipa_init_modem_driver_cmplt_req_msg_v01 *)req; + + if (ipa3_modem_init_cmplt == false) { + ipa3_modem_init_cmplt = true; + if (ipa3_qmi_modem_init_fin == true) { + IPAWANDBG("load uc related registers (%d)\n", + ipa3_qmi_modem_init_fin); + ipa3_uc_load_notify(); + } + } + + memset(&resp, 0, sizeof(resp)); + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + + rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h, + &ipa3_init_modem_driver_cmplt_resp_desc, + &resp, sizeof(resp)); + + IPAWANDBG("Sent QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01\n"); + return rc; +} + +static int ipa3_a5_svc_connect_cb(struct qmi_handle *handle, + void *conn_h) +{ + if (ipa3_svc_handle != handle || !conn_h) + return -EINVAL; + + if (curr_conn) { + IPAWANERR("Service is busy\n"); + return -ECONNREFUSED; + } + curr_conn = conn_h; + return 0; +} + +static int ipa3_a5_svc_disconnect_cb(struct qmi_handle *handle, + void *conn_h) +{ + if (ipa3_svc_handle != handle || curr_conn != conn_h) + return -EINVAL; + + curr_conn = NULL; + return 0; +} + +static int ipa3_a5_svc_req_desc_cb(unsigned int msg_id, + struct msg_desc **req_desc) +{ + int rc; + + switch (msg_id) { + case QMI_IPA_INDICATION_REGISTER_REQ_V01: + *req_desc = &ipa3_indication_reg_req_desc; + rc = sizeof(struct ipa_indication_reg_req_msg_v01); + break; + + case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01: + *req_desc = &ipa3_install_fltr_rule_req_desc; + rc = sizeof(struct ipa_install_fltr_rule_req_msg_v01); + break; + case QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01: + *req_desc = &ipa3_install_fltr_rule_req_ex_desc; + rc = sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01); + break; + case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01: + *req_desc = &ipa3_filter_installed_notif_req_desc; + rc = sizeof(struct ipa_fltr_installed_notif_req_msg_v01); + break; + case QMI_IPA_CONFIG_REQ_V01: + *req_desc = &ipa3_config_req_desc; + rc = sizeof(struct ipa_config_req_msg_v01); + break; + case QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01: + *req_desc = &ipa3_init_modem_driver_cmplt_req_desc; + rc = sizeof(struct ipa_init_modem_driver_cmplt_req_msg_v01); + break; + default: + rc = -ENOTSUPP; + break; + } + return rc; +} + +static int ipa3_a5_svc_req_cb(struct qmi_handle *handle, void *conn_h, + void *req_h, unsigned int msg_id, void *req) +{ + int rc; + + if (ipa3_svc_handle != handle || curr_conn != conn_h) + return -EINVAL; + + switch (msg_id) { + case QMI_IPA_INDICATION_REGISTER_REQ_V01: + rc = ipa3_handle_indication_req(req_h, req); + break; + case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01: + rc = ipa3_handle_install_filter_rule_req(req_h, req); + rc = ipa3_wwan_update_mux_channel_prop(); + break; + case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01: + rc = ipa3_handle_filter_installed_notify_req(req_h, req); + break; + case QMI_IPA_CONFIG_REQ_V01: + rc = handle_ipa_config_req(req_h, req); + break; + case QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01: + rc = ipa3_handle_modem_init_cmplt_req(req_h, req); + break; + default: + rc = -ENOTSUPP; + break; + } + return rc; +} + +static void ipa3_a5_svc_recv_msg(struct work_struct *work) +{ + int rc; + + do { + IPAWANDBG_LOW("Notified about a Receive Event"); + rc = qmi_recv_msg(ipa3_svc_handle); + } while (rc == 0); + if (rc != -ENOMSG) + IPAWANERR("Error receiving message\n"); +} + +static void qmi_ipa_a5_svc_ntfy(struct qmi_handle *handle, + enum qmi_event_type event, void *priv) +{ + switch (event) { + case QMI_RECV_MSG: + if (!workqueues_stopped) + queue_delayed_work(ipa_svc_workqueue, + &work_recv_msg, 0); + break; + default: + break; + } +} + +static struct qmi_svc_ops_options ipa3_a5_svc_ops_options = { + .version = 1, + .service_id = IPA_A5_SERVICE_SVC_ID, + .service_vers = IPA_A5_SVC_VERS, + .service_ins = IPA_A5_SERVICE_INS_ID, + .connect_cb = ipa3_a5_svc_connect_cb, + .disconnect_cb = ipa3_a5_svc_disconnect_cb, + .req_desc_cb = ipa3_a5_svc_req_desc_cb, + .req_cb = ipa3_a5_svc_req_cb, +}; + + +/****************************************************/ +/* QMI A5 client ->Q6 */ +/****************************************************/ +static void ipa3_q6_clnt_recv_msg(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa3_work_recv_msg_client, ipa3_q6_clnt_recv_msg); +static void ipa3_q6_clnt_svc_arrive(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa3_work_svc_arrive, ipa3_q6_clnt_svc_arrive); +static void ipa3_q6_clnt_svc_exit(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa3_work_svc_exit, ipa3_q6_clnt_svc_exit); +/* Test client port for IPC Router */ +static struct qmi_handle *ipa_q6_clnt; +static int ipa_q6_clnt_reset; + +static int ipa3_check_qmi_response(int rc, + int req_id, + enum ipa_qmi_result_type_v01 result, + enum ipa_qmi_error_type_v01 error, + char *resp_type) +{ + if (rc < 0) { + if (rc == -ETIMEDOUT && ipa3_rmnet_ctx.ipa_rmnet_ssr) { + IPAWANERR( + "Timeout for qmi request id %d\n", req_id); + return rc; + } + if ((rc == -ENETRESET) || (rc == -ENODEV)) { + IPAWANERR( + "SSR while waiting for qmi request id %d\n", req_id); + return rc; + } + IPAWANERR("Error sending qmi request id %d, rc = %d\n", + req_id, rc); + return rc; + } + if (result != IPA_QMI_RESULT_SUCCESS_V01 && + ipa3_rmnet_ctx.ipa_rmnet_ssr) { + IPAWANERR( + "Got bad response %d from request id %d (error %d)\n", + req_id, result, error); + return result; + } + IPAWANDBG_LOW("Received %s successfully\n", resp_type); + return 0; +} + +static int ipa3_qmi_init_modem_send_sync_msg(void) +{ + struct ipa_init_modem_driver_req_msg_v01 req; + struct ipa_init_modem_driver_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc; + u16 smem_restr_bytes = ipa3_get_smem_restr_bytes(); + + memset(&req, 0, sizeof(struct ipa_init_modem_driver_req_msg_v01)); + memset(&resp, 0, sizeof(struct ipa_init_modem_driver_resp_msg_v01)); + + req.platform_type_valid = true; + req.platform_type = ipa_wan_platform; + + req.hdr_tbl_info_valid = (IPA_MEM_PART(modem_hdr_size) != 0); + req.hdr_tbl_info.modem_offset_start = + IPA_MEM_PART(modem_hdr_ofst) + smem_restr_bytes; + req.hdr_tbl_info.modem_offset_end = IPA_MEM_PART(modem_hdr_ofst) + + smem_restr_bytes + IPA_MEM_PART(modem_hdr_size) - 1; + + req.v4_route_tbl_info_valid = true; + req.v4_route_tbl_info.route_tbl_start_addr = + IPA_MEM_PART(v4_rt_nhash_ofst) + smem_restr_bytes; + req.v4_route_tbl_info.num_indices = + IPA_MEM_PART(v4_modem_rt_index_hi); + req.v6_route_tbl_info_valid = true; + + req.v6_route_tbl_info.route_tbl_start_addr = + IPA_MEM_PART(v6_rt_nhash_ofst) + smem_restr_bytes; + req.v6_route_tbl_info.num_indices = + IPA_MEM_PART(v6_modem_rt_index_hi); + + req.v4_filter_tbl_start_addr_valid = true; + req.v4_filter_tbl_start_addr = + IPA_MEM_PART(v4_flt_nhash_ofst) + smem_restr_bytes; + + req.v6_filter_tbl_start_addr_valid = true; + req.v6_filter_tbl_start_addr = + IPA_MEM_PART(v6_flt_nhash_ofst) + smem_restr_bytes; + + req.modem_mem_info_valid = (IPA_MEM_PART(modem_size) != 0); + req.modem_mem_info.block_start_addr = + IPA_MEM_PART(modem_ofst) + smem_restr_bytes; + req.modem_mem_info.size = IPA_MEM_PART(modem_size); + + req.ctrl_comm_dest_end_pt_valid = true; + req.ctrl_comm_dest_end_pt = + ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + + req.hdr_proc_ctx_tbl_info_valid = + (IPA_MEM_PART(modem_hdr_proc_ctx_size) != 0); + req.hdr_proc_ctx_tbl_info.modem_offset_start = + IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + smem_restr_bytes; + req.hdr_proc_ctx_tbl_info.modem_offset_end = + IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + + IPA_MEM_PART(modem_hdr_proc_ctx_size) + smem_restr_bytes - 1; + + req.zip_tbl_info_valid = (IPA_MEM_PART(modem_comp_decomp_size) != 0); + req.zip_tbl_info.modem_offset_start = + IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes; + req.zip_tbl_info.modem_offset_end = + IPA_MEM_PART(modem_comp_decomp_ofst) + + IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes - 1; + + req.v4_hash_route_tbl_info_valid = true; + req.v4_hash_route_tbl_info.route_tbl_start_addr = + IPA_MEM_PART(v4_rt_hash_ofst) + smem_restr_bytes; + req.v4_hash_route_tbl_info.num_indices = + IPA_MEM_PART(v4_modem_rt_index_hi); + + req.v6_hash_route_tbl_info_valid = true; + req.v6_hash_route_tbl_info.route_tbl_start_addr = + IPA_MEM_PART(v6_rt_hash_ofst) + smem_restr_bytes; + req.v6_hash_route_tbl_info.num_indices = + IPA_MEM_PART(v6_modem_rt_index_hi); + + req.v4_hash_filter_tbl_start_addr_valid = true; + req.v4_hash_filter_tbl_start_addr = + IPA_MEM_PART(v4_flt_hash_ofst) + smem_restr_bytes; + + req.v6_hash_filter_tbl_start_addr_valid = true; + req.v6_hash_filter_tbl_start_addr = + IPA_MEM_PART(v6_flt_hash_ofst) + smem_restr_bytes; + + if (!ipa3_uc_loaded_check()) { /* First time boot */ + req.is_ssr_bootup_valid = false; + req.is_ssr_bootup = 0; + } else { /* After SSR boot */ + req.is_ssr_bootup_valid = true; + req.is_ssr_bootup = 1; + } + + IPAWANDBG("platform_type %d\n", req.platform_type); + IPAWANDBG("hdr_tbl_info.modem_offset_start %d\n", + req.hdr_tbl_info.modem_offset_start); + IPAWANDBG("hdr_tbl_info.modem_offset_end %d\n", + req.hdr_tbl_info.modem_offset_end); + IPAWANDBG("v4_route_tbl_info.route_tbl_start_addr %d\n", + req.v4_route_tbl_info.route_tbl_start_addr); + IPAWANDBG("v4_route_tbl_info.num_indices %d\n", + req.v4_route_tbl_info.num_indices); + IPAWANDBG("v6_route_tbl_info.route_tbl_start_addr %d\n", + req.v6_route_tbl_info.route_tbl_start_addr); + IPAWANDBG("v6_route_tbl_info.num_indices %d\n", + req.v6_route_tbl_info.num_indices); + IPAWANDBG("v4_filter_tbl_start_addr %d\n", + req.v4_filter_tbl_start_addr); + IPAWANDBG("v6_filter_tbl_start_addr %d\n", + req.v6_filter_tbl_start_addr); + IPAWANDBG("modem_mem_info.block_start_addr %d\n", + req.modem_mem_info.block_start_addr); + IPAWANDBG("modem_mem_info.size %d\n", + req.modem_mem_info.size); + IPAWANDBG("ctrl_comm_dest_end_pt %d\n", + req.ctrl_comm_dest_end_pt); + IPAWANDBG("is_ssr_bootup %d\n", + req.is_ssr_bootup); + IPAWANDBG("v4_hash_route_tbl_info.route_tbl_start_addr %d\n", + req.v4_hash_route_tbl_info.route_tbl_start_addr); + IPAWANDBG("v4_hash_route_tbl_info.num_indices %d\n", + req.v4_hash_route_tbl_info.num_indices); + IPAWANDBG("v6_hash_route_tbl_info.route_tbl_start_addr %d\n", + req.v6_hash_route_tbl_info.route_tbl_start_addr); + IPAWANDBG("v6_hash_route_tbl_info.num_indices %d\n", + req.v6_hash_route_tbl_info.num_indices); + IPAWANDBG("v4_hash_filter_tbl_start_addr %d\n", + req.v4_hash_filter_tbl_start_addr); + IPAWANDBG("v6_hash_filter_tbl_start_addr %d\n", + req.v6_hash_filter_tbl_start_addr); + + req_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_REQ_V01; + req_desc.ei_array = ipa3_init_modem_driver_req_msg_data_v01_ei; + + resp_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_RESP_V01; + resp_desc.ei_array = ipa3_init_modem_driver_resp_msg_data_v01_ei; + + pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req), + &resp_desc, &resp, sizeof(resp), + QMI_SEND_REQ_TIMEOUT_MS); + pr_info("QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 response received\n"); + return ipa3_check_qmi_response(rc, + QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_init_modem_driver_resp_msg_v01"); +} + +/* sending filter-install-request to modem*/ +int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req) +{ + struct ipa_install_fltr_rule_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc; + + /* check if the filter rules from IPACM is valid */ + if (req->filter_spec_ex_list_len == 0) { + IPAWANDBG("IPACM pass zero rules to Q6\n"); + } else { + IPAWANDBG("IPACM pass %u rules to Q6\n", + req->filter_spec_ex_list_len); + } + + mutex_lock(&ipa3_qmi_lock); + if (ipa3_qmi_ctx != NULL) { + /* cache the qmi_filter_request */ + memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[ + ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg]), + req, + sizeof(struct ipa_install_fltr_rule_req_msg_v01)); + ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg++; + ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10; + } + mutex_unlock(&ipa3_qmi_lock); + + req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01; + req_desc.ei_array = ipa3_install_fltr_rule_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01; + resp_desc.ei_array = ipa3_install_fltr_rule_resp_msg_data_v01_ei; + + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, + req, + sizeof(struct ipa_install_fltr_rule_req_msg_v01), + &resp_desc, &resp, sizeof(resp), + QMI_SEND_REQ_TIMEOUT_MS); + return ipa3_check_qmi_response(rc, + QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_install_filter"); +} + +/* sending filter-install-request to modem*/ +int ipa3_qmi_filter_request_ex_send( + struct ipa_install_fltr_rule_req_ex_msg_v01 *req) +{ + struct ipa_install_fltr_rule_resp_ex_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc; + + /* check if the filter rules from IPACM is valid */ + if (req->filter_spec_ex_list_len == 0) { + IPAWANDBG("IPACM pass zero rules to Q6\n"); + } else { + IPAWANDBG("IPACM pass %u rules to Q6\n", + req->filter_spec_ex_list_len); + } + + mutex_lock(&ipa3_qmi_lock); + if (ipa3_qmi_ctx != NULL) { + /* cache the qmi_filter_request */ + memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_ex_msg_cache[ + ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg]), + req, + sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01)); + ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg++; + ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg %= 10; + } + mutex_unlock(&ipa3_qmi_lock); + + req_desc.max_msg_len = + QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01; + req_desc.ei_array = ipa3_install_fltr_rule_req_ex_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_ex_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_V01; + resp_desc.ei_array = ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei; + + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, + req, + sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01), + &resp_desc, &resp, sizeof(resp), + QMI_SEND_REQ_TIMEOUT_MS); + return ipa3_check_qmi_response(rc, + QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_install_filter"); +} + +int ipa3_qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req) +{ + struct ipa_enable_force_clear_datapath_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc = 0; + + + if (!req || !req->source_pipe_bitmask) { + IPAWANERR("invalid params\n"); + return -EINVAL; + } + + req_desc.max_msg_len = + QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01; + req_desc.ei_array = + ipa3_enable_force_clear_datapath_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01; + resp_desc.ei_array = + ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei; + + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, + &req_desc, + req, + sizeof(*req), + &resp_desc, &resp, sizeof(resp), + QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS); + if (rc < 0) { + IPAWANERR("send req failed %d\n", rc); + return rc; + } + if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) { + IPAWANERR("filter_notify failed %d\n", + resp.resp.result); + return resp.resp.result; + } + IPAWANDBG("SUCCESS\n"); + return rc; +} + +int ipa3_qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req) +{ + struct ipa_disable_force_clear_datapath_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc = 0; + + + if (!req) { + IPAWANERR("invalid params\n"); + return -EINVAL; + } + + req_desc.max_msg_len = + QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01; + req_desc.ei_array = + ipa3_disable_force_clear_datapath_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01; + resp_desc.ei_array = + ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei; + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, + &req_desc, + req, + sizeof(*req), + &resp_desc, &resp, sizeof(resp), + QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS); + if (rc < 0) { + IPAWANERR("send req failed %d\n", rc); + return rc; + } + if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) { + IPAWANERR("filter_notify failed %d\n", + resp.resp.result); + return resp.resp.result; + } + IPAWANDBG("SUCCESS\n"); + return rc; +} + +/* sending filter-installed-notify-request to modem*/ +int ipa3_qmi_filter_notify_send( + struct ipa_fltr_installed_notif_req_msg_v01 *req) +{ + struct ipa_fltr_installed_notif_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc = 0; + + /* check if the filter rules from IPACM is valid */ + if (req->rule_id_len == 0) { + IPAWANERR(" delete UL filter rule for pipe %d\n", + req->source_pipe_index); + return -EINVAL; + } else if (req->rule_id_len > QMI_IPA_MAX_FILTERS_V01) { + IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n", + req->source_pipe_index, + req->rule_id_len); + return -EINVAL; + } + + if (req->source_pipe_index == -1) { + IPAWANERR("Source pipe index invalid\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_qmi_lock); + if (ipa3_qmi_ctx != NULL) { + /* cache the qmi_filter_request */ + memcpy(&(ipa3_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[ + ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]), + req, + sizeof(struct ipa_fltr_installed_notif_req_msg_v01)); + ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++; + ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10; + } + mutex_unlock(&ipa3_qmi_lock); + + req_desc.max_msg_len = + QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01; + req_desc.ei_array = ipa3_fltr_installed_notif_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01; + resp_desc.ei_array = ipa3_fltr_installed_notif_resp_msg_data_v01_ei; + + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, + &req_desc, + req, + sizeof(struct ipa_fltr_installed_notif_req_msg_v01), + &resp_desc, &resp, sizeof(resp), + QMI_SEND_REQ_TIMEOUT_MS); + return ipa3_check_qmi_response(rc, + QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_fltr_installed_notif_resp"); +} + +static void ipa3_q6_clnt_recv_msg(struct work_struct *work) +{ + int rc; + + do { + IPAWANDBG_LOW("Notified about a Receive Event"); + rc = qmi_recv_msg(ipa_q6_clnt); + } while (rc == 0); + if (rc != -ENOMSG) + IPAWANERR("Error receiving message\n"); +} + +static void ipa3_q6_clnt_notify(struct qmi_handle *handle, + enum qmi_event_type event, void *notify_priv) +{ + switch (event) { + case QMI_RECV_MSG: + IPAWANDBG_LOW("client qmi recv message called"); + if (!workqueues_stopped) + queue_delayed_work(ipa_clnt_resp_workqueue, + &ipa3_work_recv_msg_client, 0); + break; + default: + break; + } +} + +static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id, + void *msg, unsigned int msg_len, + void *ind_cb_priv) +{ + struct ipa_data_usage_quota_reached_ind_msg_v01 qmi_ind; + struct msg_desc qmi_ind_desc; + int rc = 0; + + if (handle != ipa_q6_clnt) { + IPAWANERR("Wrong client\n"); + return; + } + + if (msg_id == QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01) { + memset(&qmi_ind, 0, sizeof( + struct ipa_data_usage_quota_reached_ind_msg_v01)); + qmi_ind_desc.max_msg_len = + QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01; + qmi_ind_desc.msg_id = QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01; + qmi_ind_desc.ei_array = + ipa3_data_usage_quota_reached_ind_msg_data_v01_ei; + + rc = qmi_kernel_decode(&qmi_ind_desc, &qmi_ind, msg, msg_len); + if (rc < 0) { + IPAWANERR("Error decoding msg_id %d\n", msg_id); + return; + } + IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n", + qmi_ind.apn.mux_id, + (unsigned long int) qmi_ind.apn.num_Mbytes); + ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id, + IPA_UPSTEAM_MODEM); + } +} + +static void ipa3_q6_clnt_svc_arrive(struct work_struct *work) +{ + int rc; + struct ipa_master_driver_init_complt_ind_msg_v01 ind; + + /* Create a Local client port for QMI communication */ + ipa_q6_clnt = qmi_handle_create(ipa3_q6_clnt_notify, NULL); + if (!ipa_q6_clnt) { + IPAWANERR("QMI client handle alloc failed\n"); + return; + } + + IPAWANDBG("Lookup server name, get client-hdl(%pK)\n", + ipa_q6_clnt); + rc = qmi_connect_to_service(ipa_q6_clnt, + IPA_Q6_SERVICE_SVC_ID, + IPA_Q6_SVC_VERS, + IPA_Q6_SERVICE_INS_ID); + if (rc < 0) { + IPAWANERR("Server not found\n"); + qmi_handle_destroy(ipa_q6_clnt); + ipa_q6_clnt = NULL; + return; + } + + rc = qmi_register_ind_cb(ipa_q6_clnt, ipa3_q6_clnt_ind_cb, NULL); + if (rc < 0) + IPAWANERR("Unable to register for indications\n"); + + ipa_q6_clnt_reset = 0; + IPAWANDBG("Q6 QMI service available now\n"); + /* Initialize modem IPA-driver */ + IPAWANDBG("send ipa3_qmi_init_modem_send_sync_msg to modem\n"); + rc = ipa3_qmi_init_modem_send_sync_msg(); + if ((rc == -ENETRESET) || (rc == -ENODEV)) { + IPAWANERR( + "ipa3_qmi_init_modem_send_sync_msg failed due to SSR!\n"); + /* Cleanup will take place when ipa3_wwan_remove is called */ + return; + } + if (rc != 0) { + IPAWANERR("ipa3_qmi_init_modem_send_sync_msg failed\n"); + /* + * Hardware not responding. + * This is a very unexpected scenario, which requires a kernel + * panic in order to force dumps for QMI/Q6 side analysis. + */ + BUG(); + } + ipa3_qmi_modem_init_fin = true; + + /* got modem_init_cmplt_req already, load uc-related register */ + if (ipa3_modem_init_cmplt == true) { + IPAWANDBG("load uc related registers (%d)\n", + ipa3_modem_init_cmplt); + ipa3_uc_load_notify(); + } + + /* In cold-bootup, first_time_handshake = false */ + ipa3_q6_handshake_complete(first_time_handshake); + first_time_handshake = true; + IPAWANDBG("complete, ipa3_qmi_modem_init_fin : %d\n", + ipa3_qmi_modem_init_fin); + + if (ipa3_qmi_indication_fin) { + IPAWANDBG("send indication to modem (%d)\n", + ipa3_qmi_indication_fin); + memset(&ind, 0, sizeof(struct + ipa_master_driver_init_complt_ind_msg_v01)); + ind.master_driver_init_status.result = + IPA_QMI_RESULT_SUCCESS_V01; + rc = qmi_send_ind(ipa3_svc_handle, curr_conn, + &ipa3_master_driver_complete_indication_desc, + &ind, + sizeof(ind)); + IPAWANDBG("ipa_qmi_service_client good\n"); + } else { + IPAWANERR("not send indication (%d)\n", + ipa3_qmi_indication_fin); + } +} + + +static void ipa3_q6_clnt_svc_exit(struct work_struct *work) +{ + qmi_handle_destroy(ipa_q6_clnt); + ipa_q6_clnt_reset = 1; + ipa_q6_clnt = NULL; +} + + +static int ipa3_q6_clnt_svc_event_notify(struct notifier_block *this, + unsigned long code, + void *_cmd) +{ + IPAWANDBG("event %ld\n", code); + switch (code) { + case QMI_SERVER_ARRIVE: + if (!workqueues_stopped) + queue_delayed_work(ipa_clnt_req_workqueue, + &ipa3_work_svc_arrive, 0); + break; + case QMI_SERVER_EXIT: + if (!workqueues_stopped) + queue_delayed_work(ipa_clnt_req_workqueue, + &ipa3_work_svc_exit, 0); + break; + default: + break; + } + return 0; +} + + +static struct notifier_block ipa3_q6_clnt_nb = { + .notifier_call = ipa3_q6_clnt_svc_event_notify, +}; + +static void ipa3_qmi_service_init_worker(struct work_struct *work) +{ + int rc; + + /* Initialize QMI-service*/ + IPAWANDBG("IPA A7 QMI init OK :>>>>\n"); + + /* start the QMI msg cache */ + ipa3_qmi_ctx = vzalloc(sizeof(*ipa3_qmi_ctx)); + if (!ipa3_qmi_ctx) + return; + + ipa3_qmi_ctx->modem_cfg_emb_pipe_flt = + ipa3_get_modem_cfg_emb_pipe_flt(); + + ipa_svc_workqueue = create_singlethread_workqueue("ipa_A7_svc"); + if (!ipa_svc_workqueue) { + IPAWANERR("Creating ipa_A7_svc workqueue failed\n"); + vfree(ipa3_qmi_ctx); + ipa3_qmi_ctx = NULL; + return; + } + + ipa3_svc_handle = qmi_handle_create(qmi_ipa_a5_svc_ntfy, NULL); + if (!ipa3_svc_handle) { + IPAWANERR("Creating ipa_A7_svc qmi handle failed\n"); + goto destroy_ipa_A7_svc_wq; + } + + /* + * Setting the current connection to NULL, as due to a race between + * server and client clean-up in SSR, the disconnect_cb might not + * have necessarily been called + */ + curr_conn = NULL; + + rc = qmi_svc_register(ipa3_svc_handle, &ipa3_a5_svc_ops_options); + if (rc < 0) { + IPAWANERR("Registering ipa_a5 svc failed %d\n", + rc); + goto destroy_qmi_handle; + } + + /* Initialize QMI-client */ + + ipa_clnt_req_workqueue = create_singlethread_workqueue("clnt_req"); + if (!ipa_clnt_req_workqueue) { + IPAWANERR("Creating clnt_req workqueue failed\n"); + goto deregister_qmi_srv; + } + + ipa_clnt_resp_workqueue = create_singlethread_workqueue("clnt_resp"); + if (!ipa_clnt_resp_workqueue) { + IPAWANERR("Creating clnt_resp workqueue failed\n"); + goto destroy_clnt_req_wq; + } + + rc = qmi_svc_event_notifier_register(IPA_Q6_SERVICE_SVC_ID, + IPA_Q6_SVC_VERS, + IPA_Q6_SERVICE_INS_ID, &ipa3_q6_clnt_nb); + if (rc < 0) { + IPAWANERR("notifier register failed\n"); + goto destroy_clnt_resp_wq; + } + + /* get Q6 service and start send modem-initial to Q6 */ + IPAWANDBG("wait service available\n"); + return; + +destroy_clnt_resp_wq: + destroy_workqueue(ipa_clnt_resp_workqueue); + ipa_clnt_resp_workqueue = NULL; +destroy_clnt_req_wq: + destroy_workqueue(ipa_clnt_req_workqueue); + ipa_clnt_req_workqueue = NULL; +deregister_qmi_srv: + qmi_svc_unregister(ipa3_svc_handle); +destroy_qmi_handle: + qmi_handle_destroy(ipa3_svc_handle); + ipa3_svc_handle = 0; +destroy_ipa_A7_svc_wq: + destroy_workqueue(ipa_svc_workqueue); + ipa_svc_workqueue = NULL; + vfree(ipa3_qmi_ctx); + ipa3_qmi_ctx = NULL; +} + +int ipa3_qmi_service_init(uint32_t wan_platform_type) +{ + ipa_wan_platform = wan_platform_type; + ipa3_qmi_modem_init_fin = false; + ipa3_qmi_indication_fin = false; + ipa3_modem_init_cmplt = false; + workqueues_stopped = false; + + if (!ipa3_svc_handle) { + INIT_WORK(&ipa3_qmi_service_init_work, + ipa3_qmi_service_init_worker); + schedule_work(&ipa3_qmi_service_init_work); + } + return 0; +} + +void ipa3_qmi_service_exit(void) +{ + int ret = 0; + + workqueues_stopped = true; + + /* qmi-service */ + if (ipa3_svc_handle) { + ret = qmi_svc_unregister(ipa3_svc_handle); + if (ret < 0) + IPAWANERR("unregister qmi handle %pK failed, ret=%d\n", + ipa3_svc_handle, ret); + } + if (ipa_svc_workqueue) { + flush_workqueue(ipa_svc_workqueue); + destroy_workqueue(ipa_svc_workqueue); + ipa_svc_workqueue = NULL; + } + + if (ipa3_svc_handle) { + ret = qmi_handle_destroy(ipa3_svc_handle); + if (ret < 0) + IPAWANERR("Error destroying qmi handle %pK, ret=%d\n", + ipa3_svc_handle, ret); + } + + /* qmi-client */ + + /* Unregister from events */ + ret = qmi_svc_event_notifier_unregister(IPA_Q6_SERVICE_SVC_ID, + IPA_Q6_SVC_VERS, + IPA_Q6_SERVICE_INS_ID, &ipa3_q6_clnt_nb); + if (ret < 0) + IPAWANERR( + "Error qmi_svc_event_notifier_unregister service %d, ret=%d\n", + IPA_Q6_SERVICE_SVC_ID, ret); + + /* Release client handle */ + ipa3_q6_clnt_svc_exit(0); + + if (ipa_clnt_req_workqueue) { + destroy_workqueue(ipa_clnt_req_workqueue); + ipa_clnt_req_workqueue = NULL; + } + if (ipa_clnt_resp_workqueue) { + destroy_workqueue(ipa_clnt_resp_workqueue); + ipa_clnt_resp_workqueue = NULL; + } + + /* clean the QMI msg cache */ + mutex_lock(&ipa3_qmi_lock); + if (ipa3_qmi_ctx != NULL) { + vfree(ipa3_qmi_ctx); + ipa3_qmi_ctx = NULL; + } + mutex_unlock(&ipa3_qmi_lock); + + ipa3_svc_handle = 0; + ipa3_qmi_modem_init_fin = false; + ipa3_qmi_indication_fin = false; + ipa3_modem_init_cmplt = false; +} + +void ipa3_qmi_stop_workqueues(void) +{ + IPAWANDBG("Stopping all QMI workqueues\n"); + + /* Stopping all workqueues so new work won't be scheduled */ + workqueues_stopped = true; + + /* Making sure that the current scheduled work won't be executed */ + cancel_delayed_work(&work_recv_msg); + cancel_delayed_work(&ipa3_work_recv_msg_client); + cancel_delayed_work(&ipa3_work_svc_arrive); + cancel_delayed_work(&ipa3_work_svc_exit); +} + + +/* voting for bus BW to ipa_rm*/ +int ipa3_vote_for_bus_bw(uint32_t *bw_mbps) +{ + struct ipa_rm_perf_profile profile; + int ret; + + if (bw_mbps == NULL) { + IPAWANERR("Bus BW is invalid\n"); + return -EINVAL; + } + + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = *bw_mbps; + ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD, + &profile); + if (ret) + IPAWANERR("Failed to set perf profile to BW %u\n", + profile.max_supported_bandwidth_mbps); + else + IPAWANDBG("Succeeded to set perf profile to BW %u\n", + profile.max_supported_bandwidth_mbps); + + return ret; +} + +int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req, + struct ipa_get_data_stats_resp_msg_v01 *resp) +{ + struct msg_desc req_desc, resp_desc; + int rc; + + req_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_GET_DATA_STATS_REQ_V01; + req_desc.ei_array = ipa3_get_data_stats_req_msg_data_v01_ei; + + resp_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_GET_DATA_STATS_RESP_V01; + resp_desc.ei_array = ipa3_get_data_stats_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n"); + + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, + sizeof(struct ipa_get_data_stats_req_msg_v01), + &resp_desc, resp, + sizeof(struct ipa_get_data_stats_resp_msg_v01), + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + IPAWANDBG_LOW("QMI_IPA_GET_DATA_STATS_RESP_V01 received\n"); + + return ipa3_check_qmi_response(rc, + QMI_IPA_GET_DATA_STATS_REQ_V01, resp->resp.result, + resp->resp.error, "ipa_get_data_stats_resp_msg_v01"); +} + +int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req, + struct ipa_get_apn_data_stats_resp_msg_v01 *resp) +{ + struct msg_desc req_desc, resp_desc; + int rc; + + req_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_REQ_V01; + req_desc.ei_array = ipa3_get_apn_data_stats_req_msg_data_v01_ei; + + resp_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_RESP_V01; + resp_desc.ei_array = ipa3_get_apn_data_stats_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n"); + + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, + sizeof(struct ipa_get_apn_data_stats_req_msg_v01), + &resp_desc, resp, + sizeof(struct ipa_get_apn_data_stats_resp_msg_v01), + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + IPAWANDBG_LOW("QMI_IPA_GET_APN_DATA_STATS_RESP_V01 received\n"); + + return ipa3_check_qmi_response(rc, + QMI_IPA_GET_APN_DATA_STATS_REQ_V01, resp->resp.result, + resp->resp.error, "ipa_get_apn_data_stats_req_msg_v01"); +} + +int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req) +{ + struct ipa_set_data_usage_quota_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc; + + memset(&resp, 0, sizeof(struct ipa_set_data_usage_quota_resp_msg_v01)); + + req_desc.max_msg_len = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01; + req_desc.ei_array = ipa3_set_data_usage_quota_req_msg_data_v01_ei; + + resp_desc.max_msg_len = + QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01; + resp_desc.ei_array = ipa3_set_data_usage_quota_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, + sizeof(struct ipa_set_data_usage_quota_req_msg_v01), + &resp_desc, &resp, sizeof(resp), + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + IPAWANDBG_LOW("QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 received\n"); + + return ipa3_check_qmi_response(rc, + QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_set_data_usage_quota_req_msg_v01"); +} + +int ipa3_qmi_stop_data_qouta(void) +{ + struct ipa_stop_data_usage_quota_req_msg_v01 req; + struct ipa_stop_data_usage_quota_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc; + + memset(&req, 0, sizeof(struct ipa_stop_data_usage_quota_req_msg_v01)); + memset(&resp, 0, sizeof(struct ipa_stop_data_usage_quota_resp_msg_v01)); + + req_desc.max_msg_len = + QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01; + req_desc.ei_array = ipa3_stop_data_usage_quota_req_msg_data_v01_ei; + + resp_desc.max_msg_len = + QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01; + resp_desc.ei_array = ipa3_stop_data_usage_quota_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req), + &resp_desc, &resp, sizeof(resp), + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + IPAWANDBG_LOW("QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 received\n"); + + return ipa3_check_qmi_response(rc, + QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01"); +} + +void ipa3_qmi_init(void) +{ + mutex_init(&ipa3_qmi_lock); +} + +void ipa3_qmi_cleanup(void) +{ + mutex_destroy(&ipa3_qmi_lock); +} + diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h new file mode 100644 index 000000000000..fa334c903c9e --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h @@ -0,0 +1,345 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef IPA_QMI_SERVICE_H +#define IPA_QMI_SERVICE_H + +#include +#include +#include +#include +#include "ipa_i.h" +#include + +/** + * name of the DL wwan default routing tables for v4 and v6 + */ +#define IPA_A7_QMAP_HDR_NAME "ipa_qmap_hdr" +#define IPA_DFLT_WAN_RT_TBL_NAME "ipa_dflt_wan_rt" +#define MAX_NUM_Q6_RULE 35 +#define MAX_NUM_QMI_RULE_CACHE 10 +#define DEV_NAME "ipa-wan" +#define SUBSYS_MODEM "modem" + +#define IPAWANDBG(fmt, args...) \ + do { \ + pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + + +#define IPAWANDBG_LOW(fmt, args...) \ + do { \ + pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAWANERR(fmt, args...) \ + do { \ + pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAWANINFO(fmt, args...) \ + do { \ + pr_info(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +extern struct ipa3_qmi_context *ipa3_qmi_ctx; + +struct ipa3_qmi_context { +struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE]; +u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE]; +int num_ipa_install_fltr_rule_req_msg; +struct ipa_install_fltr_rule_req_msg_v01 + ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE]; +int num_ipa_install_fltr_rule_req_ex_msg; +struct ipa_install_fltr_rule_req_ex_msg_v01 + ipa_install_fltr_rule_req_ex_msg_cache[MAX_NUM_QMI_RULE_CACHE]; +int num_ipa_fltr_installed_notif_req_msg; +struct ipa_fltr_installed_notif_req_msg_v01 + ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE]; +bool modem_cfg_emb_pipe_flt; +}; + +struct ipa3_rmnet_mux_val { + uint32_t mux_id; + int8_t vchannel_name[IFNAMSIZ]; + bool mux_channel_set; + bool ul_flt_reg; + bool mux_hdr_set; + uint32_t hdr_hdl; +}; + +extern struct qmi_elem_info + ipa3_init_modem_driver_req_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_init_modem_driver_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_indication_reg_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_indication_reg_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_master_driver_init_complt_ind_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_config_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_config_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_get_data_stats_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[]; +extern struct qmi_elem_info + ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[]; + +/** + * struct ipa3_rmnet_context - IPA rmnet context + * @ipa_rmnet_ssr: support modem SSR + * @polling_interval: Requested interval for polling tethered statistics + * @metered_mux_id: The mux ID on which quota has been set + */ +struct ipa3_rmnet_context { + bool ipa_rmnet_ssr; + u64 polling_interval; + u32 metered_mux_id; +}; + +extern struct ipa3_rmnet_context ipa3_rmnet_ctx; + +#ifdef CONFIG_RMNET_IPA3 + +int ipa3_qmi_service_init(uint32_t wan_platform_type); + +void ipa3_qmi_service_exit(void); + +/* sending filter-install-request to modem*/ +int ipa3_qmi_filter_request_send( + struct ipa_install_fltr_rule_req_msg_v01 *req); + +int ipa3_qmi_filter_request_ex_send( + struct ipa_install_fltr_rule_req_ex_msg_v01 *req); + +/* sending filter-installed-notify-request to modem*/ +int ipa3_qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 + *req); + +/* voting for bus BW to ipa_rm*/ +int ipa3_vote_for_bus_bw(uint32_t *bw_mbps); + +int ipa3_qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req); + +int ipa3_qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req); + +int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01 + *rule_req); + +int ipa3_wwan_update_mux_channel_prop(void); + +int ipa3_wan_ioctl_init(void); + +void ipa3_wan_ioctl_stop_qmi_messages(void); + +void ipa3_wan_ioctl_enable_qmi_messages(void); + +void ipa3_wan_ioctl_deinit(void); + +void ipa3_qmi_stop_workqueues(void); + +int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats + *data); + +int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data); + +void ipa3_broadcast_quota_reach_ind(uint32_t mux_id, + enum ipa_upstream_type upstream_type); + +int rmnet_ipa3_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe + *data); + +int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data, + bool reset); + +int rmnet_ipa3_query_tethering_stats_all( + struct wan_ioctl_query_tether_stats_all *data); + +int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data); + +int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req, + struct ipa_get_data_stats_resp_msg_v01 *resp); + +int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req, + struct ipa_get_apn_data_stats_resp_msg_v01 *resp); + +int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req); + +int ipa3_qmi_stop_data_qouta(void); + +void ipa3_q6_handshake_complete(bool ssr_bootup); + +void ipa3_qmi_init(void); + +void ipa3_qmi_cleanup(void); + +#else /* CONFIG_RMNET_IPA3 */ + +static inline int ipa3_qmi_service_init(uint32_t wan_platform_type) +{ + return -EPERM; +} + +static inline void ipa3_qmi_service_exit(void) { } + +/* sending filter-install-request to modem*/ +static inline int ipa3_qmi_filter_request_send( + struct ipa_install_fltr_rule_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa3_qmi_filter_request_ex_send( + struct ipa_install_fltr_rule_req_ex_msg_v01 *req) +{ + return -EPERM; +} + +/* sending filter-installed-notify-request to modem*/ +static inline int ipa3_qmi_filter_notify_send( + struct ipa_fltr_installed_notif_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa3_qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa3_qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa3_copy_ul_filter_rule_to_ipa( + struct ipa_install_fltr_rule_req_msg_v01 *rule_req) +{ + return -EPERM; +} + +static inline int ipa3_wwan_update_mux_channel_prop(void) +{ + return -EPERM; +} + +static inline int ipa3_wan_ioctl_init(void) +{ + return -EPERM; +} + +static inline void ipa3_wan_ioctl_stop_qmi_messages(void) { } + +static inline void ipa3_wan_ioctl_enable_qmi_messages(void) { } + +static inline void ipa3_wan_ioctl_deinit(void) { } + +static inline void ipa3_qmi_stop_workqueues(void) { } + +static inline int ipa3_vote_for_bus_bw(uint32_t *bw_mbps) +{ + return -EPERM; +} + +static inline int rmnet_ipa3_poll_tethering_stats( + struct wan_ioctl_poll_tethering_stats *data) +{ + return -EPERM; +} + +static inline int rmnet_ipa3_set_data_quota( + struct wan_ioctl_set_data_quota *data) +{ + return -EPERM; +} + +static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id, + enum ipa_upstream_type upstream_type) { } + +static inline int ipa3_qmi_get_data_stats( + struct ipa_get_data_stats_req_msg_v01 *req, + struct ipa_get_data_stats_resp_msg_v01 *resp) +{ + return -EPERM; +} + +static inline int ipa3_qmi_get_network_stats( + struct ipa_get_apn_data_stats_req_msg_v01 *req, + struct ipa_get_apn_data_stats_resp_msg_v01 *resp) +{ + return -EPERM; +} + +static inline int ipa3_qmi_set_data_quota( + struct ipa_set_data_usage_quota_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa3_qmi_stop_data_qouta(void) +{ + return -EPERM; +} + +static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { } + +static inline void ipa3_qmi_init(void) +{ +} + +static inline void ipa3_qmi_cleanup(void) +{ +} + +#endif /* CONFIG_RMNET_IPA3 */ + +#endif /* IPA_QMI_SERVICE_H */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c new file mode 100644 index 000000000000..8a9f8dbbf31f --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c @@ -0,0 +1,2926 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + +#include + +/* Type Definitions */ +static struct qmi_elem_info ipa3_hdr_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_hdr_tbl_info_type_v01, + modem_offset_start), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_hdr_tbl_info_type_v01, + modem_offset_end), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_route_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_route_tbl_info_type_v01, + route_tbl_start_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_route_tbl_info_type_v01, + num_indices), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_modem_mem_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_modem_mem_info_type_v01, + block_start_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_modem_mem_info_type_v01, + size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_hdr_proc_ctx_tbl_info_type_v01, + modem_offset_start), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_hdr_proc_ctx_tbl_info_type_v01, + modem_offset_end), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_zip_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_zip_tbl_info_type_v01, + modem_offset_start), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_zip_tbl_info_type_v01, + modem_offset_end), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_ipfltr_range_eq_16_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_range_eq_16_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_range_eq_16_type_v01, + range_low), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_range_eq_16_type_v01, + range_high), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_ipfltr_mask_eq_32_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_32_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_32_type_v01, + mask), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_32_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_ipfltr_eq_16_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_eq_16_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_ipfltr_eq_16_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_ipfltr_eq_32_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_ipfltr_mask_eq_128_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_128_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 16, + .elem_size = sizeof(uint8_t), + .is_array = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_128_type_v01, + mask), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 16, + .elem_size = sizeof(uint8_t), + .is_array = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_128_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_filter_rule_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_type_v01, + rule_eq_bitmap), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_type_v01, + tos_eq_present), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + tos_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + protocol_eq_present), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + protocol_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_ihl_offset_range_16), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01, + .elem_size = sizeof( + struct ipa_ipfltr_range_eq_16_type_v01), + .is_array = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_range_16), + .ei_array = ipa3_ipfltr_range_eq_16_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_offset_meq_32), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01, + .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), + .is_array = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + offset_meq_32), + .ei_array = ipa3_ipfltr_mask_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + tc_eq_present), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + tc_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + flow_eq_present), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + flow_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_16_present), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_ipfltr_eq_16_type_v01), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_16), + .ei_array = ipa3_ipfltr_eq_16_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_32_present), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_ipfltr_eq_32_type_v01), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_32), + .ei_array = ipa3_ipfltr_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_ihl_offset_meq_32), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01, + .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), + .is_array = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_meq_32), + .ei_array = ipa3_ipfltr_mask_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_offset_meq_128), + }, + { + .data_type = QMI_STRUCT, + .elem_len = + QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01, + .elem_size = sizeof( + struct ipa_ipfltr_mask_eq_128_type_v01), + .is_array = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_type_v01, + offset_meq_128), + .ei_array = ipa3_ipfltr_mask_eq_128_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + metadata_meq32_present), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + metadata_meq32), + .ei_array = ipa3_ipfltr_mask_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ipv4_frag_eq_present), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_filter_spec_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + filter_spec_identifier), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + ip_type), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_filter_rule_type_v01), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + filter_rule), + .ei_array = ipa3_filter_rule_type_data_v01_ei, + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + filter_action), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + is_routing_table_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + route_table_index), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + is_mux_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + mux_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa_filter_spec_ex_type_data_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + ip_type), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_filter_rule_type_v01), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + filter_rule), + .ei_array = ipa3_filter_rule_type_data_v01_ei, + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + filter_action), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + is_routing_table_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + route_table_index), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + is_mux_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + mux_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + rule_id), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_ex_type_v01, + is_rule_hashable), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct +qmi_elem_info ipa3_filter_rule_identifier_to_handle_map_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_identifier_to_handle_map_v01, + filter_spec_identifier), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_identifier_to_handle_map_v01, + filter_handle), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_filter_handle_to_index_map_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_handle_to_index_map_v01, + filter_handle), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_handle_to_index_map_v01, + filter_index), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_init_modem_driver_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + platform_type_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + platform_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_hdr_tbl_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_tbl_info), + .ei_array = ipa3_hdr_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_route_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_route_tbl_info), + .ei_array = ipa3_route_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_route_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_route_tbl_info), + .ei_array = ipa3_route_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_filter_tbl_start_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_filter_tbl_start_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_filter_tbl_start_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_filter_tbl_start_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + modem_mem_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_modem_mem_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + modem_mem_info), + .ei_array = ipa3_modem_mem_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + ctrl_comm_dest_end_pt_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + ctrl_comm_dest_end_pt), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + is_ssr_bootup_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + is_ssr_bootup), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_proc_ctx_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof( + struct ipa_hdr_proc_ctx_tbl_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_proc_ctx_tbl_info), + .ei_array = ipa3_hdr_proc_ctx_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + zip_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_zip_tbl_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + zip_tbl_info), + .ei_array = ipa3_zip_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_hash_route_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_hash_route_tbl_info), + .ei_array = ipa3_route_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_hash_route_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_hash_route_tbl_info), + .ei_array = ipa3_route_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_hash_filter_tbl_start_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_hash_filter_tbl_start_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1E, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_hash_filter_tbl_start_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1E, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_hash_filter_tbl_start_addr), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + ctrl_comm_dest_end_pt_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + ctrl_comm_dest_end_pt), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + default_end_pt_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + default_end_pt), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + modem_driver_init_pending_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + modem_driver_init_pending), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_init_modem_driver_cmplt_req_msg_v01, + status), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_init_modem_driver_cmplt_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_indication_reg_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + master_driver_init_complete_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + master_driver_init_complete), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + data_usage_quota_reached_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + data_usage_quota_reached), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_indication_reg_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_indication_reg_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + ipa_master_driver_init_complt_ind_msg_v01, + master_driver_init_status), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(struct ipa_filter_spec_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_list), + .ei_array = ipa_filter_spec_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + source_pipe_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + source_pipe_index), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv4_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv4_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv6_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv6_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + xlat_filter_indices_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + xlat_filter_indices_list_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(uint32_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + xlat_filter_indices_list), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_ex_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_ex_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(struct ipa_filter_spec_ex_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_ex_list), + .ei_array = ipa_filter_spec_ex_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + filter_handle_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + filter_handle_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof( + struct ipa_filter_rule_identifier_to_handle_map_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + filter_handle_list), + .ei_array = + ipa3_filter_rule_identifier_to_handle_map_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + rule_id_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + rule_id_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(uint32_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + rule_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + source_pipe_index), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + install_status), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + filter_index_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof( + struct ipa_filter_handle_to_index_map_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x03, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + filter_index_list), + .ei_array = ipa3_filter_handle_to_index_map_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_pipe_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_pipe_index), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + retain_header_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + retain_header), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_call_mux_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_call_mux_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv4_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv4_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv6_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv6_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv4_filter_idx_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv4_filter_idx), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv6_filter_idx_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv6_filter_idx), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(uint32_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_fltr_installed_notif_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + source_pipe_bitmask), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + request_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + throttle_source_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + throttle_source), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_disable_force_clear_datapath_req_msg_v01, + request_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info + ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_disable_force_clear_datapath_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_config_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_type_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_deaggr_supported_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_deaggr_supported), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_config_req_msg_v01, + max_aggr_frame_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_config_req_msg_v01, + max_aggr_frame_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ipa_ingress_pipe_mode_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ipa_ingress_pipe_mode), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_speed_info_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_speed_info), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_time_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_time_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_pkt_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_pkt_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_byte_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_byte_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_accumulation_time_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_accumulation_time_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_control_flags_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_control_flags), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_msi_event_threshold_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_msi_event_threshold), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_msi_event_threshold_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_msi_event_threshold), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_fifo_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_fifo_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_fifo_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_fifo_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1E, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_buf_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1E, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_buf_size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_config_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_config_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_get_data_stats_req_msg_data_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_get_data_stats_req_msg_v01, + ipa_stats_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_req_msg_v01, + reset_stats_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_req_msg_v01, + reset_stats), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_pipe_stats_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + pipe_index), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv4_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv4_bytes), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv6_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv6_bytes), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_stats_type_filter_rule_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_stats_type_filter_rule_v01, + filter_rule_index), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_stats_type_filter_rule_v01, + num_packets), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ipa_stats_type_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ipa_stats_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ul_src_pipe_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ul_src_pipe_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_PIPES_V01, + .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ul_src_pipe_stats_list), + .ei_array = ipa3_pipe_stats_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_dst_pipe_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_dst_pipe_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_PIPES_V01, + .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_dst_pipe_stats_list), + .ei_array = ipa3_pipe_stats_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_filter_rule_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_filter_rule_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_filter_rule_stats_list), + .ei_array = ipa3_stats_type_filter_rule_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_apn_data_stats_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + mux_id), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_ul_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_ul_bytes), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_dl_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_dl_bytes), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_req_msg_v01, + mux_id_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_req_msg_v01, + mux_id_list_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_APN_V01, + .elem_size = sizeof(uint32_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_req_msg_v01, + mux_id_list), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + apn_data_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + apn_data_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_APN_V01, + .elem_size = sizeof(struct + ipa_apn_data_stats_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + apn_data_stats_list), + .ei_array = ipa3_apn_data_stats_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info ipa3_data_usage_quota_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_data_usage_quota_info_type_v01, + mux_id), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_data_usage_quota_info_type_v01, + num_Mbytes), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_set_data_usage_quota_req_msg_v01, + apn_quota_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_set_data_usage_quota_req_msg_v01, + apn_quota_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_APN_V01, + .elem_size = sizeof(struct + ipa_data_usage_quota_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_set_data_usage_quota_req_msg_v01, + apn_quota_list), + .ei_array = ipa3_data_usage_quota_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_set_data_usage_quota_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct + ipa_data_usage_quota_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_data_usage_quota_reached_ind_msg_v01, + apn), + .ei_array = ipa3_data_usage_quota_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[] = { + /* ipa_stop_data_usage_quota_req_msg is empty */ + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_stop_data_usage_quota_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + filter_spec_ex_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + filter_spec_ex_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_EX_V01, + .elem_size = sizeof(struct + ipa_filter_spec_ex_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + filter_spec_ex_list), + .ei_array = ipa_filter_spec_ex_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + source_pipe_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + source_pipe_index), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + num_ipv4_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + num_ipv4_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + num_ipv6_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + num_ipv6_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + xlat_filter_indices_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + xlat_filter_indices_list_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_EX_V01, + .elem_size = sizeof(uint32_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_ex_msg_v01, + xlat_filter_indices_list), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_ex_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_ex_msg_v01, + rule_id_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_ex_msg_v01, + rule_id_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_EX_V01, + .elem_size = sizeof(uint32_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_ex_msg_v01, + rule_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c new file mode 100644 index 000000000000..2a86772f4ca8 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c @@ -0,0 +1,1805 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include "ipa_i.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_fltrt.h" + +#define IPA_RT_INDEX_BITMAP_SIZE (32) +#define IPA_RT_STATUS_OF_ADD_FAILED (-1) +#define IPA_RT_STATUS_OF_DEL_FAILED (-1) +#define IPA_RT_STATUS_OF_MDFY_FAILED (-1) + +#define IPA_RT_GET_RULE_TYPE(__entry) \ + ( \ + ((__entry)->rule.hashable) ? \ + (IPA_RULE_HASHABLE) : (IPA_RULE_NON_HASHABLE) \ + ) + +/** + * ipa_generate_rt_hw_rule() - Generated the RT H/W single rule + * This func will do the preparation core driver work and then calls + * the HAL layer for the real work. + * @ip: the ip address family type + * @entry: routing entry + * @buf: output buffer, buf == NULL means + * caller wants to know the size of the rule as seen + * by HW so they did not pass a valid buffer, we will use a + * scratch buffer instead. + * With this scheme we are going to + * generate the rule twice, once to know size using scratch + * buffer and second to write the rule to the actual caller + * supplied buffer which is of required size + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + */ +static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip, + struct ipa3_rt_entry *entry, u8 *buf) +{ + struct ipahal_rt_rule_gen_params gen_params; + int res = 0; + + memset(&gen_params, 0, sizeof(gen_params)); + + gen_params.ipt = ip; + gen_params.dst_pipe_idx = ipa3_get_ep_mapping(entry->rule.dst); + if (WARN(gen_params.dst_pipe_idx == -1, + "Wrong destination pipe specified in RT rule\n")) + return -EPERM; + + if (WARN(!IPA_CLIENT_IS_CONS(entry->rule.dst), + "No RT rule on IPA_client_producer")) { + IPAERR("pipe.pipe_idx:%d dst_pipe: %d\n", + gen_params.dst_pipe_idx, entry->rule.dst); + return -EPERM; + } + + if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) { + struct ipa3_hdr_proc_ctx_entry *proc_ctx; + + proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx; + gen_params.hdr_lcl = ipa3_ctx->hdr_proc_ctx_tbl_lcl; + gen_params.hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX; + gen_params.hdr_ofst = proc_ctx->offset_entry->offset + + ipa3_ctx->hdr_proc_ctx_tbl.start_offset; + } else if (entry->hdr) { + gen_params.hdr_lcl = ipa3_ctx->hdr_tbl_lcl; + gen_params.hdr_type = IPAHAL_RT_RULE_HDR_RAW; + gen_params.hdr_ofst = entry->hdr->offset_entry->offset; + } else { + gen_params.hdr_type = IPAHAL_RT_RULE_HDR_NONE; + gen_params.hdr_ofst = 0; + } + + gen_params.priority = entry->prio; + gen_params.id = entry->rule_id; + gen_params.rule = (const struct ipa_rt_rule *)&entry->rule; + + res = ipahal_rt_generate_hw_rule(&gen_params, &entry->hw_len, buf); + if (res) + IPAERR("failed to generate rt h/w rule\n"); + + return res; +} + +/** + * ipa_translate_rt_tbl_to_hw_fmt() - translate the routing driver structures + * (rules and tables) to HW format and fill it in the given buffers + * @ip: the ip address family type + * @rlt: the type of the rules to translate (hashable or non-hashable) + * @base: the rules body buffer to be filled + * @hdr: the rules header (addresses/offsets) buffer to be filled + * @body_ofst: the offset of the rules body from the rules header at + * ipa sram (for local body usage) + * @apps_start_idx: the first rt table index of apps tables + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip, + enum ipa_rule_type rlt, u8 *base, u8 *hdr, + u32 body_ofst, u32 apps_start_idx) +{ + struct ipa3_rt_tbl_set *set; + struct ipa3_rt_tbl *tbl; + struct ipa_mem_buffer tbl_mem; + u8 *tbl_mem_buf; + struct ipa3_rt_entry *entry; + int res; + u64 offset; + u8 *body_i; + + set = &ipa3_ctx->rt_tbl_set[ip]; + body_i = base; + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + if (tbl->sz[rlt] == 0) + continue; + if (tbl->in_sys[rlt]) { + /* only body (no header) */ + tbl_mem.size = tbl->sz[rlt] - + ipahal_get_hw_tbl_hdr_width(); + if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) { + IPAERR("fail to alloc sys tbl of size %d\n", + tbl_mem.size); + goto err; + } + + if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base, + hdr, tbl->idx - apps_start_idx, true)) { + IPAERR("fail to wrt sys tbl addr to hdr\n"); + goto hdr_update_fail; + } + + tbl_mem_buf = tbl_mem.base; + + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_rt_rule_list, + link) { + if (IPA_RT_GET_RULE_TYPE(entry) != rlt) + continue; + res = ipa_generate_rt_hw_rule(ip, entry, + tbl_mem_buf); + if (res) { + IPAERR("failed to gen HW RT rule\n"); + goto hdr_update_fail; + } + tbl_mem_buf += entry->hw_len; + } + + if (tbl->curr_mem[rlt].phys_base) { + WARN_ON(tbl->prev_mem[rlt].phys_base); + tbl->prev_mem[rlt] = tbl->curr_mem[rlt]; + } + tbl->curr_mem[rlt] = tbl_mem; + } else { + offset = body_i - base + body_ofst; + + /* update the hdr at the right index */ + if (ipahal_fltrt_write_addr_to_hdr(offset, hdr, + tbl->idx - apps_start_idx, true)) { + IPAERR("fail to wrt lcl tbl ofst to hdr\n"); + goto hdr_update_fail; + } + + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_rt_rule_list, + link) { + if (IPA_RT_GET_RULE_TYPE(entry) != rlt) + continue; + res = ipa_generate_rt_hw_rule(ip, entry, + body_i); + if (res) { + IPAERR("failed to gen HW RT rule\n"); + goto err; + } + body_i += entry->hw_len; + } + + /** + * advance body_i to next table alignment as local + * tables + * are order back-to-back + */ + body_i += ipahal_get_lcl_tbl_addr_alignment(); + body_i = (u8 *)((long)body_i & + ~ipahal_get_lcl_tbl_addr_alignment()); + } + } + + return 0; + +hdr_update_fail: + ipahal_free_dma_mem(&tbl_mem); +err: + return -EPERM; +} + +static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip) +{ + struct ipa3_rt_tbl *tbl; + struct ipa3_rt_tbl *next; + struct ipa3_rt_tbl_set *set; + int i; + + set = &ipa3_ctx->rt_tbl_set[ip]; + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + for (i = 0; i < IPA_RULE_TYPE_MAX; i++) { + if (tbl->prev_mem[i].phys_base) { + IPADBG_LOW( + "reaping sys rt tbl name=%s ip=%d rlt=%d\n", + tbl->name, ip, i); + ipahal_free_dma_mem(&tbl->prev_mem[i]); + memset(&tbl->prev_mem[i], 0, + sizeof(tbl->prev_mem[i])); + } + } + } + + set = &ipa3_ctx->reap_rt_tbl_set[ip]; + list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) { + for (i = 0; i < IPA_RULE_TYPE_MAX; i++) { + WARN_ON(tbl->prev_mem[i].phys_base != 0); + if (tbl->curr_mem[i].phys_base) { + IPADBG_LOW( + "reaping sys rt tbl name=%s ip=%d rlt=%d\n", + tbl->name, ip, i); + ipahal_free_dma_mem(&tbl->curr_mem[i]); + } + } + list_del(&tbl->link); + kmem_cache_free(ipa3_ctx->rt_tbl_cache, tbl); + } +} + +/** + * ipa_prep_rt_tbl_for_cmt() - preparing the rt table for commit + * assign priorities to the rules, calculate their sizes and calculate + * the overall table size + * @ip: the ip address family type + * @tbl: the rt tbl to be prepared + * + * Return: 0 on success, negative on failure + */ +static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip, + struct ipa3_rt_tbl *tbl) +{ + struct ipa3_rt_entry *entry; + int prio_i; + int res; + int max_prio; + u32 hdr_width; + + tbl->sz[IPA_RULE_HASHABLE] = 0; + tbl->sz[IPA_RULE_NON_HASHABLE] = 0; + + max_prio = ipahal_get_rule_max_priority(); + + prio_i = max_prio; + list_for_each_entry(entry, &tbl->head_rt_rule_list, link) { + + if (entry->rule.max_prio) { + entry->prio = max_prio; + } else { + if (ipahal_rule_decrease_priority(&prio_i)) { + IPAERR("cannot rule decrease priority - %d\n", + prio_i); + return -EPERM; + } + entry->prio = prio_i; + } + + res = ipa_generate_rt_hw_rule(ip, entry, NULL); + if (res) { + IPAERR("failed to calculate HW RT rule size\n"); + return -EPERM; + } + + IPADBG_LOW("RT rule id (handle) %d hw_len %u priority %u\n", + entry->id, entry->hw_len, entry->prio); + + if (entry->rule.hashable) + tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len; + else + tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len; + } + + if ((tbl->sz[IPA_RULE_HASHABLE] + + tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) { + WARN_ON(1); + IPAERR("rt tbl %s is with zero total size\n", tbl->name); + } + + hdr_width = ipahal_get_hw_tbl_hdr_width(); + + if (tbl->sz[IPA_RULE_HASHABLE]) + tbl->sz[IPA_RULE_HASHABLE] += hdr_width; + if (tbl->sz[IPA_RULE_NON_HASHABLE]) + tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width; + + IPADBG("RT tbl index %u hash_sz %u non-hash sz %u\n", tbl->idx, + tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]); + + return 0; +} + +/** + * ipa_generate_rt_hw_tbl_img() - generates the rt hw tbls. + * headers and bodies (sys bodies) are being created into buffers that will + * be filled into the local memory (sram) + * @ip: the ip address family type + * @alloc_params: IN/OUT parameters to hold info regard the tables headers + * and bodies on DDR (DMA buffers), and needed info for the allocation + * that the HAL needs + * + * Return: 0 on success, negative on failure + */ +static int ipa_generate_rt_hw_tbl_img(enum ipa_ip_type ip, + struct ipahal_fltrt_alloc_imgs_params *alloc_params) +{ + u32 hash_bdy_start_ofst, nhash_bdy_start_ofst; + u32 apps_start_idx; + int rc = 0; + + if (ip == IPA_IP_v4) { + nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_rt_nhash_ofst) - + IPA_MEM_PART(v4_rt_nhash_ofst); + hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_rt_hash_ofst) - + IPA_MEM_PART(v4_rt_hash_ofst); + apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo); + } else { + nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_rt_nhash_ofst) - + IPA_MEM_PART(v6_rt_nhash_ofst); + hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_rt_hash_ofst) - + IPA_MEM_PART(v6_rt_hash_ofst); + apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo); + } + + if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) { + IPAERR("fail to allocate RT HW TBL images. IP %d\n", ip); + rc = -ENOMEM; + goto allocate_fail; + } + + if (ipa_translate_rt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE, + alloc_params->hash_bdy.base, alloc_params->hash_hdr.base, + hash_bdy_start_ofst, apps_start_idx)) { + IPAERR("fail to translate hashable rt tbls to hw format\n"); + rc = -EPERM; + goto translate_fail; + } + if (ipa_translate_rt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE, + alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base, + nhash_bdy_start_ofst, apps_start_idx)) { + IPAERR("fail to translate non-hashable rt tbls to hw format\n"); + rc = -EPERM; + goto translate_fail; + } + + return rc; + +translate_fail: + if (alloc_params->hash_hdr.size) + ipahal_free_dma_mem(&alloc_params->hash_hdr); + ipahal_free_dma_mem(&alloc_params->nhash_hdr); + if (alloc_params->hash_bdy.size) + ipahal_free_dma_mem(&alloc_params->hash_bdy); + if (alloc_params->nhash_bdy.size) + ipahal_free_dma_mem(&alloc_params->nhash_bdy); +allocate_fail: + return rc; +} + +/** + * ipa_rt_valid_lcl_tbl_size() - validate if the space allocated for rt tbl + * bodies at the sram is enough for the commit + * @ipt: the ip address family type + * @rlt: the rule type (hashable or non-hashable) + * + * Return: true if enough space available or false in other cases + */ +static bool ipa_rt_valid_lcl_tbl_size(enum ipa_ip_type ipt, + enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy) +{ + u16 avail; + + if (ipt == IPA_IP_v4) + avail = (rlt == IPA_RULE_HASHABLE) ? + IPA_MEM_PART(apps_v4_rt_hash_size) : + IPA_MEM_PART(apps_v4_rt_nhash_size); + else + avail = (rlt == IPA_RULE_HASHABLE) ? + IPA_MEM_PART(apps_v6_rt_hash_size) : + IPA_MEM_PART(apps_v6_rt_nhash_size); + + if (bdy->size <= avail) + return true; + + IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n", + bdy->size, avail, ipt, rlt); + return false; +} + +/** + * __ipa_commit_rt_v3() - commit rt tables to the hw + * commit the headers and the bodies if are local with internal cache flushing + * @ipt: the ip address family type + * + * Return: 0 on success, negative on failure + */ +int __ipa_commit_rt_v3(enum ipa_ip_type ip) +{ + struct ipa3_desc desc[5]; + struct ipahal_imm_cmd_register_write reg_write_cmd = {0}; + struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0}; + struct ipahal_imm_cmd_pyld *cmd_pyld[5]; + int num_cmd = 0; + struct ipahal_fltrt_alloc_imgs_params alloc_params; + u32 num_modem_rt_index; + int rc = 0; + u32 lcl_hash_hdr, lcl_nhash_hdr; + u32 lcl_hash_bdy, lcl_nhash_bdy; + bool lcl_hash, lcl_nhash; + struct ipahal_reg_fltrt_hash_flush flush; + struct ipahal_reg_valmask valmask; + int i; + struct ipa3_rt_tbl_set *set; + struct ipa3_rt_tbl *tbl; + u32 tbl_hdr_width; + + tbl_hdr_width = ipahal_get_hw_tbl_hdr_width(); + memset(desc, 0, sizeof(desc)); + memset(cmd_pyld, 0, sizeof(cmd_pyld)); + memset(&alloc_params, 0, sizeof(alloc_params)); + alloc_params.ipt = ip; + + if (ip == IPA_IP_v4) { + num_modem_rt_index = + IPA_MEM_PART(v4_modem_rt_index_hi) - + IPA_MEM_PART(v4_modem_rt_index_lo) + 1; + lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_hash_ofst) + + num_modem_rt_index * tbl_hdr_width; + lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_nhash_ofst) + + num_modem_rt_index * tbl_hdr_width; + lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v4_rt_hash_ofst); + lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v4_rt_nhash_ofst); + lcl_hash = ipa3_ctx->ip4_rt_tbl_hash_lcl; + lcl_nhash = ipa3_ctx->ip4_rt_tbl_nhash_lcl; + alloc_params.tbls_num = IPA_MEM_PART(v4_apps_rt_index_hi) - + IPA_MEM_PART(v4_apps_rt_index_lo) + 1; + } else { + num_modem_rt_index = + IPA_MEM_PART(v6_modem_rt_index_hi) - + IPA_MEM_PART(v6_modem_rt_index_lo) + 1; + lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_hash_ofst) + + num_modem_rt_index * tbl_hdr_width; + lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_nhash_ofst) + + num_modem_rt_index * tbl_hdr_width; + lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v6_rt_hash_ofst); + lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v6_rt_nhash_ofst); + lcl_hash = ipa3_ctx->ip6_rt_tbl_hash_lcl; + lcl_nhash = ipa3_ctx->ip6_rt_tbl_nhash_lcl; + alloc_params.tbls_num = IPA_MEM_PART(v6_apps_rt_index_hi) - + IPA_MEM_PART(v6_apps_rt_index_lo) + 1; + } + + if (!ipa3_ctx->rt_idx_bitmap[ip]) { + IPAERR("no rt tbls present\n"); + rc = -EPERM; + goto no_rt_tbls; + } + + set = &ipa3_ctx->rt_tbl_set[ip]; + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + if (ipa_prep_rt_tbl_for_cmt(ip, tbl)) { + rc = -EPERM; + goto no_rt_tbls; + } + if (!tbl->in_sys[IPA_RULE_HASHABLE] && + tbl->sz[IPA_RULE_HASHABLE]) { + alloc_params.num_lcl_hash_tbls++; + alloc_params.total_sz_lcl_hash_tbls += + tbl->sz[IPA_RULE_HASHABLE]; + alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width; + } + if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] && + tbl->sz[IPA_RULE_NON_HASHABLE]) { + alloc_params.num_lcl_nhash_tbls++; + alloc_params.total_sz_lcl_nhash_tbls += + tbl->sz[IPA_RULE_NON_HASHABLE]; + alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width; + } + } + + if (ipa_generate_rt_hw_tbl_img(ip, &alloc_params)) { + IPAERR("fail to generate RT HW TBL images. IP %d\n", ip); + rc = -EFAULT; + goto no_rt_tbls; + } + + if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE, + &alloc_params.hash_bdy)) { + rc = -EFAULT; + goto fail_size_valid; + } + if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE, + &alloc_params.nhash_bdy)) { + rc = -EFAULT; + goto fail_size_valid; + } + + /* flushing ipa internal hashable rt rules cache */ + memset(&flush, 0, sizeof(flush)); + if (ip == IPA_IP_v4) + flush.v4_rt = true; + else + flush.v6_rt = true; + ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask); + reg_write_cmd.skip_pipeline_clear = false; + reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH); + reg_write_cmd.value = valmask.val; + reg_write_cmd.value_mask = valmask.mask; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, ®_write_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct register_write imm cmd. IP %d\n", ip); + goto fail_size_valid; + } + desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode; + desc[num_cmd].pyld = cmd_pyld[num_cmd]->data; + desc[num_cmd].len = cmd_pyld[num_cmd]->len; + desc[num_cmd].type = IPA_IMM_CMD_DESC; + num_cmd++; + + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = alloc_params.nhash_hdr.size; + mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base; + mem_cmd.local_addr = lcl_nhash_hdr; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip); + goto fail_imm_cmd_construct; + } + desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode; + desc[num_cmd].pyld = cmd_pyld[num_cmd]->data; + desc[num_cmd].len = cmd_pyld[num_cmd]->len; + desc[num_cmd].type = IPA_IMM_CMD_DESC; + num_cmd++; + + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = alloc_params.hash_hdr.size; + mem_cmd.system_addr = alloc_params.hash_hdr.phys_base; + mem_cmd.local_addr = lcl_hash_hdr; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip); + goto fail_imm_cmd_construct; + } + desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode; + desc[num_cmd].pyld = cmd_pyld[num_cmd]->data; + desc[num_cmd].len = cmd_pyld[num_cmd]->len; + desc[num_cmd].type = IPA_IMM_CMD_DESC; + num_cmd++; + + if (lcl_nhash) { + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = alloc_params.nhash_bdy.size; + mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base; + mem_cmd.local_addr = lcl_nhash_bdy; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct dma_shared_mem cmd. IP %d\n", + ip); + goto fail_imm_cmd_construct; + } + desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode; + desc[num_cmd].pyld = cmd_pyld[num_cmd]->data; + desc[num_cmd].len = cmd_pyld[num_cmd]->len; + desc[num_cmd].type = IPA_IMM_CMD_DESC; + num_cmd++; + } + if (lcl_hash) { + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = alloc_params.hash_bdy.size; + mem_cmd.system_addr = alloc_params.hash_bdy.phys_base; + mem_cmd.local_addr = lcl_hash_bdy; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR("fail construct dma_shared_mem cmd. IP %d\n", + ip); + goto fail_imm_cmd_construct; + } + desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode; + desc[num_cmd].pyld = cmd_pyld[num_cmd]->data; + desc[num_cmd].len = cmd_pyld[num_cmd]->len; + desc[num_cmd].type = IPA_IMM_CMD_DESC; + num_cmd++; + } + + if (ipa3_send_cmd(num_cmd, desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + goto fail_imm_cmd_construct; + } + + IPADBG_LOW("Hashable HEAD\n"); + IPA_DUMP_BUFF(alloc_params.hash_hdr.base, + alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size); + + IPADBG_LOW("Non-Hashable HEAD\n"); + IPA_DUMP_BUFF(alloc_params.nhash_hdr.base, + alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size); + + if (alloc_params.hash_bdy.size) { + IPADBG_LOW("Hashable BODY\n"); + IPA_DUMP_BUFF(alloc_params.hash_bdy.base, + alloc_params.hash_bdy.phys_base, + alloc_params.hash_bdy.size); + } + + if (alloc_params.nhash_bdy.size) { + IPADBG_LOW("Non-Hashable BODY\n"); + IPA_DUMP_BUFF(alloc_params.nhash_bdy.base, + alloc_params.nhash_bdy.phys_base, + alloc_params.nhash_bdy.size); + } + + __ipa_reap_sys_rt_tbls(ip); + +fail_imm_cmd_construct: + for (i = 0 ; i < num_cmd ; i++) + ipahal_destroy_imm_cmd(cmd_pyld[i]); +fail_size_valid: + if (alloc_params.hash_hdr.size) + ipahal_free_dma_mem(&alloc_params.hash_hdr); + ipahal_free_dma_mem(&alloc_params.nhash_hdr); + if (alloc_params.hash_bdy.size) + ipahal_free_dma_mem(&alloc_params.hash_bdy); + if (alloc_params.nhash_bdy.size) + ipahal_free_dma_mem(&alloc_params.nhash_bdy); + +no_rt_tbls: + return rc; +} + +/** + * __ipa3_find_rt_tbl() - find the routing table + * which name is given as parameter + * @ip: [in] the ip address family type of the wanted routing table + * @name: [in] the name of the wanted routing table + * + * Returns: the routing table which name is given as parameter, or NULL if it + * doesn't exist + */ +struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name) +{ + struct ipa3_rt_tbl *entry; + struct ipa3_rt_tbl_set *set; + + if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) { + IPAERR_RL("Name too long: %s\n", name); + return NULL; + } + + set = &ipa3_ctx->rt_tbl_set[ip]; + list_for_each_entry(entry, &set->head_rt_tbl_list, link) { + if (!strcmp(name, entry->name)) + return entry; + } + + return NULL; +} + +/** + * ipa3_query_rt_index() - find the routing table index + * which name and ip type are given as parameters + * @in: [out] the index of the wanted routing table + * + * Returns: the routing table which name is given as parameter, or NULL if it + * doesn't exist + */ +int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in) +{ + struct ipa3_rt_tbl *entry; + + if (in->ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + /* check if this table exists */ + entry = __ipa3_find_rt_tbl(in->ip, in->name); + if (!entry) { + mutex_unlock(&ipa3_ctx->lock); + return -EFAULT; + } + in->idx = entry->idx; + mutex_unlock(&ipa3_ctx->lock); + return 0; +} + +static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip, + const char *name) +{ + struct ipa3_rt_tbl *entry; + struct ipa3_rt_tbl_set *set; + int i; + int id; + int max_tbl_indx; + + if (name == NULL) { + IPAERR_RL("no tbl name\n"); + goto error; + } + + if (ip == IPA_IP_v4) { + max_tbl_indx = + max(IPA_MEM_PART(v4_modem_rt_index_hi), + IPA_MEM_PART(v4_apps_rt_index_hi)); + } else if (ip == IPA_IP_v6) { + max_tbl_indx = + max(IPA_MEM_PART(v6_modem_rt_index_hi), + IPA_MEM_PART(v6_apps_rt_index_hi)); + } else { + IPAERR_RL("bad ip family type\n"); + goto error; + } + + set = &ipa3_ctx->rt_tbl_set[ip]; + /* check if this table exists */ + entry = __ipa3_find_rt_tbl(ip, name); + if (!entry) { + entry = kmem_cache_zalloc(ipa3_ctx->rt_tbl_cache, GFP_KERNEL); + if (!entry) + goto error; + + /* find a routing tbl index */ + for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) { + if (!test_bit(i, &ipa3_ctx->rt_idx_bitmap[ip])) { + entry->idx = i; + set_bit(i, &ipa3_ctx->rt_idx_bitmap[ip]); + break; + } + } + if (i == IPA_RT_INDEX_BITMAP_SIZE) { + IPAERR("not free RT tbl indices left\n"); + goto fail_rt_idx_alloc; + } + if (i > max_tbl_indx) { + IPAERR("rt tbl index is above max\n"); + goto fail_rt_idx_alloc; + } + + INIT_LIST_HEAD(&entry->head_rt_rule_list); + INIT_LIST_HEAD(&entry->link); + strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX); + entry->set = set; + entry->cookie = IPA_RT_TBL_COOKIE; + entry->in_sys[IPA_RULE_HASHABLE] = (ip == IPA_IP_v4) ? + !ipa3_ctx->ip4_rt_tbl_hash_lcl : + !ipa3_ctx->ip6_rt_tbl_hash_lcl; + entry->in_sys[IPA_RULE_NON_HASHABLE] = (ip == IPA_IP_v4) ? + !ipa3_ctx->ip4_rt_tbl_nhash_lcl : + !ipa3_ctx->ip6_rt_tbl_nhash_lcl; + set->tbl_cnt++; + entry->rule_ids = &set->rule_ids; + list_add(&entry->link, &set->head_rt_tbl_list); + + IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx, + set->tbl_cnt, ip); + + id = ipa3_id_alloc(entry); + if (id < 0) { + IPAERR("failed to add to tree\n"); + WARN_ON(1); + goto ipa_insert_failed; + } + entry->id = id; + } + + return entry; +ipa_insert_failed: + set->tbl_cnt--; + list_del(&entry->link); + idr_destroy(entry->rule_ids); +fail_rt_idx_alloc: + entry->cookie = 0; + kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry); +error: + return NULL; +} + +static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry) +{ + enum ipa_ip_type ip = IPA_IP_MAX; + u32 id; + struct ipa3_rt_tbl_set *rset; + + if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) { + IPAERR_RL("bad params\n"); + return -EINVAL; + } + id = entry->id; + if (ipa3_id_find(id) == NULL) { + IPAERR_RL("lookup failed\n"); + return -EPERM; + } + + if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v4]) + ip = IPA_IP_v4; + else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6]) + ip = IPA_IP_v6; + else { + WARN_ON(1); + return -EPERM; + } + + rset = &ipa3_ctx->reap_rt_tbl_set[ip]; + + entry->rule_ids = NULL; + if (entry->in_sys[IPA_RULE_HASHABLE] || + entry->in_sys[IPA_RULE_NON_HASHABLE]) { + list_move(&entry->link, &rset->head_rt_tbl_list); + clear_bit(entry->idx, &ipa3_ctx->rt_idx_bitmap[ip]); + entry->set->tbl_cnt--; + IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d ip=%d\n", + entry->idx, entry->set->tbl_cnt, ip); + } else { + list_del(&entry->link); + clear_bit(entry->idx, &ipa3_ctx->rt_idx_bitmap[ip]); + entry->set->tbl_cnt--; + IPADBG("del rt tbl_idx=%d tbl_cnt=%d ip=%d\n", + entry->idx, entry->set->tbl_cnt, ip); + kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry); + } + + /* remove the handle from the database */ + ipa3_id_remove(id); + return 0; +} + +static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule, + struct ipa3_hdr_entry **hdr, + struct ipa3_hdr_proc_ctx_entry **proc_ctx) +{ + if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) { + IPAERR("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n"); + return -EPERM; + } + + if (rule->hdr_hdl) { + *hdr = ipa3_id_find(rule->hdr_hdl); + if ((*hdr == NULL) || ((*hdr)->cookie != IPA_HDR_COOKIE)) { + IPAERR("rt rule does not point to valid hdr\n"); + return -EPERM; + } + } else if (rule->hdr_proc_ctx_hdl) { + *proc_ctx = ipa3_id_find(rule->hdr_proc_ctx_hdl); + if ((*proc_ctx == NULL) || + ((*proc_ctx)->cookie != IPA_PROC_HDR_COOKIE)) { + + IPAERR("rt rule does not point to valid proc ctx\n"); + return -EPERM; + } + } + + return 0; +} + +static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry, + const struct ipa_rt_rule *rule, + struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr, + struct ipa3_hdr_proc_ctx_entry *proc_ctx) +{ + int id; + + *entry = kmem_cache_zalloc(ipa3_ctx->rt_rule_cache, GFP_KERNEL); + if (!*entry) + goto error; + + INIT_LIST_HEAD(&(*entry)->link); + (*(entry))->cookie = IPA_RT_RULE_COOKIE; + (*(entry))->rule = *rule; + (*(entry))->tbl = tbl; + (*(entry))->hdr = hdr; + (*(entry))->proc_ctx = proc_ctx; + id = ipa3_alloc_rule_id(tbl->rule_ids); + if (id < 0) { + IPAERR("failed to allocate rule id\n"); + WARN_ON(1); + goto alloc_rule_id_fail; + } + (*(entry))->rule_id = id; + + return 0; + +alloc_rule_id_fail: + kmem_cache_free(ipa3_ctx->rt_rule_cache, *entry); +error: + return -EPERM; +} + +static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl, + struct ipa3_rt_tbl *tbl) +{ + int id; + + tbl->rule_cnt++; + if (entry->hdr) + entry->hdr->ref_cnt++; + else if (entry->proc_ctx) + entry->proc_ctx->ref_cnt++; + id = ipa3_id_alloc(entry); + if (id < 0) { + IPAERR("failed to add to tree\n"); + WARN_ON(1); + goto ipa_insert_failed; + } + IPADBG("add rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n", + tbl->idx, tbl->rule_cnt, entry->rule_id); + *rule_hdl = id; + entry->id = id; + + return 0; + +ipa_insert_failed: + if (entry->hdr) + entry->hdr->ref_cnt--; + else if (entry->proc_ctx) + entry->proc_ctx->ref_cnt--; + idr_remove(tbl->rule_ids, entry->rule_id); + list_del(&entry->link); + kmem_cache_free(ipa3_ctx->rt_rule_cache, entry); + return -EPERM; +} + +static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, + const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl) +{ + struct ipa3_rt_tbl *tbl; + struct ipa3_rt_entry *entry; + struct ipa3_hdr_entry *hdr = NULL; + struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL; + + if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx)) + goto error; + + + tbl = __ipa_add_rt_tbl(ip, name); + if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) { + IPAERR_RL("failed adding rt tbl name = %s\n", + name ? name : ""); + goto error; + } + /* + * do not allow any rules to be added at end of the "default" routing + * tables + */ + if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) && + (tbl->rule_cnt > 0) && (at_rear != 0)) { + IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d at_rear=%d" + , tbl->rule_cnt, at_rear); + goto error; + } + + if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx)) + goto error; + + if (at_rear) + list_add_tail(&entry->link, &tbl->head_rt_rule_list); + else + list_add(&entry->link, &tbl->head_rt_rule_list); + + if (__ipa_finish_rt_rule_add(entry, rule_hdl, tbl)) + goto error; + + return 0; + +error: + return -EPERM; +} + +static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl, + const struct ipa_rt_rule *rule, u32 *rule_hdl, + struct ipa3_rt_entry **add_after_entry) +{ + struct ipa3_rt_entry *entry; + struct ipa3_hdr_entry *hdr = NULL; + struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL; + + if (!*add_after_entry) + goto error; + + if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx)) + goto error; + + if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx)) + goto error; + + list_add(&entry->link, &((*add_after_entry)->link)); + + if (__ipa_finish_rt_rule_add(entry, rule_hdl, tbl)) + goto error; + + /* + * prepare for next insertion + */ + *add_after_entry = entry; + + return 0; + +error: + *add_after_entry = NULL; + return -EPERM; +} + +/** + * ipa3_add_rt_rule() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) +{ + int i; + int ret; + + if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < rules->num_rules; i++) { + if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name, + &rules->rules[i].rule, + rules->rules[i].at_rear, + &rules->rules[i].rt_rule_hdl)) { + IPAERR_RL("failed to add rt rule %d\n", i); + rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED; + } else { + rules->rules[i].status = 0; + } + } + + if (rules->commit) + if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) { + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +/** + * ipa3_add_rt_rule_after() - Add the given routing rules after the + * specified rule to SW and optionally commit to IPA HW + * @rules: [inout] set of routing rules to add + handle where to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules) +{ + int i; + int ret = 0; + struct ipa3_rt_tbl *tbl = NULL; + struct ipa3_rt_entry *entry = NULL; + + if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + + tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name); + if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) { + IPAERR_RL("failed finding rt tbl name = %s\n", + rules->rt_tbl_name); + ret = -EINVAL; + goto bail; + } + + if (tbl->rule_cnt <= 0) { + IPAERR_RL("tbl->rule_cnt <= 0"); + ret = -EINVAL; + goto bail; + } + + entry = ipa3_id_find(rules->add_after_hdl); + if (!entry) { + IPAERR_RL("failed finding rule %d in rt tbls\n", + rules->add_after_hdl); + ret = -EINVAL; + goto bail; + } + + if (entry->tbl != tbl) { + IPAERR_RL("given rt rule does not match the table\n"); + ret = -EINVAL; + goto bail; + } + + /* + * do not allow any rules to be added at end of the "default" routing + * tables + */ + if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) && + (&entry->link == tbl->head_rt_rule_list.prev)) { + IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d\n", + tbl->rule_cnt); + ret = -EINVAL; + goto bail; + } + + /* + * we add all rules one after the other, if one insertion fails, it cuts + * the chain (all following will receive fail status) following calls to + * __ipa_add_rt_rule_after will fail (entry == NULL) + */ + + for (i = 0; i < rules->num_rules; i++) { + if (__ipa_add_rt_rule_after(tbl, + &rules->rules[i].rule, + &rules->rules[i].rt_rule_hdl, + &entry)) { + IPAERR_RL("failed to add rt rule %d\n", i); + rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED; + } else { + rules->rules[i].status = 0; + } + } + + if (rules->commit) + if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) { + IPAERR_RL("failed to commit\n"); + ret = -EPERM; + goto bail; + } + + ret = 0; + goto bail; + +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +int __ipa3_del_rt_rule(u32 rule_hdl) +{ + struct ipa3_rt_entry *entry; + int id; + + entry = ipa3_id_find(rule_hdl); + + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + return -EINVAL; + } + + if (entry->cookie != IPA_RT_RULE_COOKIE) { + IPAERR_RL("bad params\n"); + return -EINVAL; + } + + if (entry->hdr) + __ipa3_release_hdr(entry->hdr->id); + else if (entry->proc_ctx) + __ipa3_release_hdr_proc_ctx(entry->proc_ctx->id); + list_del(&entry->link); + entry->tbl->rule_cnt--; + IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n ref_cnt=%u", + entry->tbl->idx, entry->tbl->rule_cnt, + entry->rule_id, entry->tbl->ref_cnt); + idr_remove(entry->tbl->rule_ids, entry->rule_id); + if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) { + if (__ipa_del_rt_tbl(entry->tbl)) + IPAERR_RL("fail to del RT tbl\n"); + } + entry->cookie = 0; + id = entry->id; + kmem_cache_free(ipa3_ctx->rt_rule_cache, entry); + + /* remove the handle from the database */ + ipa3_id_remove(id); + + return 0; +} + +/** + * ipa3_del_rt_rule() - Remove the specified routing rules to SW and optionally + * commit to IPA HW + * @hdls: [inout] set of routing rules to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls) +{ + int i; + int ret; + + if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa3_del_rt_rule(hdls->hdl[i].hdl)) { + IPAERR_RL("failed to del rt rule %i\n", i); + hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) + if (ipa3_ctx->ctrl->ipa3_commit_rt(hdls->ip)) { + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +/** + * ipa_commit_rt_rule() - Commit the current SW routing table of specified type + * to IPA HW + * @ip: The family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_commit_rt(enum ipa_ip_type ip) +{ + int ret; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + /* + * issue a commit on the filtering module of same IP type since + * filtering rules point to routing tables + */ + if (ipa3_commit_flt(ip)) + return -EPERM; + + mutex_lock(&ipa3_ctx->lock); + if (ipa3_ctx->ctrl->ipa3_commit_rt(ip)) { + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +/** + * ipa3_reset_rt() - reset the current SW routing table of specified type + * (does not commit to HW) + * @ip: The family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_reset_rt(enum ipa_ip_type ip) +{ + struct ipa3_rt_tbl *tbl; + struct ipa3_rt_tbl *tbl_next; + struct ipa3_rt_tbl_set *set; + struct ipa3_rt_entry *rule; + struct ipa3_rt_entry *rule_next; + struct ipa3_rt_tbl_set *rset; + u32 apps_start_idx; + int id; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + if (ip == IPA_IP_v4) + apps_start_idx = + IPA_MEM_PART(v4_apps_rt_index_lo); + else + apps_start_idx = + IPA_MEM_PART(v6_apps_rt_index_lo); + + /* + * issue a reset on the filtering module of same IP type since + * filtering rules point to routing tables + */ + if (ipa3_reset_flt(ip)) + IPAERR_RL("fail to reset flt ip=%d\n", ip); + + set = &ipa3_ctx->rt_tbl_set[ip]; + rset = &ipa3_ctx->reap_rt_tbl_set[ip]; + mutex_lock(&ipa3_ctx->lock); + IPADBG("reset rt ip=%d\n", ip); + list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) { + list_for_each_entry_safe(rule, rule_next, + &tbl->head_rt_rule_list, link) { + if (ipa3_id_find(rule->id) == NULL) { + WARN_ON(1); + mutex_unlock(&ipa3_ctx->lock); + return -EFAULT; + } + + /* + * for the "default" routing tbl, remove all but the + * last rule + */ + if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1) + continue; + + list_del(&rule->link); + tbl->rule_cnt--; + if (rule->hdr) + __ipa3_release_hdr(rule->hdr->id); + else if (rule->proc_ctx) + __ipa3_release_hdr_proc_ctx(rule->proc_ctx->id); + rule->cookie = 0; + idr_remove(tbl->rule_ids, rule->rule_id); + id = rule->id; + kmem_cache_free(ipa3_ctx->rt_rule_cache, rule); + + /* remove the handle from the database */ + ipa3_id_remove(id); + } + + if (ipa3_id_find(tbl->id) == NULL) { + WARN_ON(1); + mutex_unlock(&ipa3_ctx->lock); + return -EFAULT; + } + id = tbl->id; + + /* do not remove the "default" routing tbl which has index 0 */ + if (tbl->idx != apps_start_idx) { + tbl->rule_ids = NULL; + if (tbl->in_sys[IPA_RULE_HASHABLE] || + tbl->in_sys[IPA_RULE_NON_HASHABLE]) { + list_move(&tbl->link, &rset->head_rt_tbl_list); + clear_bit(tbl->idx, + &ipa3_ctx->rt_idx_bitmap[ip]); + set->tbl_cnt--; + IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n", + tbl->idx, set->tbl_cnt); + } else { + list_del(&tbl->link); + set->tbl_cnt--; + clear_bit(tbl->idx, + &ipa3_ctx->rt_idx_bitmap[ip]); + IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n", + tbl->idx, set->tbl_cnt); + kmem_cache_free(ipa3_ctx->rt_tbl_cache, tbl); + } + /* remove the handle from the database */ + ipa3_id_remove(id); + } + } + mutex_unlock(&ipa3_ctx->lock); + + return 0; +} + +/** + * ipa3_get_rt_tbl() - lookup the specified routing table and return handle if + * it exists, if lookup succeeds the routing table ref cnt is increased + * @lookup: [inout] routing table to lookup and its handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + * Caller should call ipa3_put_rt_tbl later if this function succeeds + */ +int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup) +{ + struct ipa3_rt_tbl *entry; + int result = -EFAULT; + + if (lookup == NULL || lookup->ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + mutex_lock(&ipa3_ctx->lock); + entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name); + if (entry && entry->cookie == IPA_RT_TBL_COOKIE) { + if (entry->ref_cnt == U32_MAX) { + IPAERR("fail: ref count crossed limit\n"); + goto ret; + } + entry->ref_cnt++; + lookup->hdl = entry->id; + + /* commit for get */ + if (ipa3_ctx->ctrl->ipa3_commit_rt(lookup->ip)) + IPAERR_RL("fail to commit RT tbl\n"); + + result = 0; + } + +ret: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_put_rt_tbl() - Release the specified routing table handle + * @rt_tbl_hdl: [in] the routing table handle to release + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_put_rt_tbl(u32 rt_tbl_hdl) +{ + struct ipa3_rt_tbl *entry; + enum ipa_ip_type ip = IPA_IP_MAX; + int result = 0; + + mutex_lock(&ipa3_ctx->lock); + entry = ipa3_id_find(rt_tbl_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + result = -EINVAL; + goto ret; + } + + if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) { + IPAERR_RL("bad params\n"); + result = -EINVAL; + goto ret; + } + + if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v4]) + ip = IPA_IP_v4; + else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6]) + ip = IPA_IP_v6; + else { + WARN_ON(1); + result = -EINVAL; + goto ret; + } + + entry->ref_cnt--; + if (entry->ref_cnt == 0 && entry->rule_cnt == 0) { + IPADBG("zero ref_cnt, delete rt tbl (idx=%u)\n", + entry->idx); + if (__ipa_del_rt_tbl(entry)) + IPAERR_RL("fail to del RT tbl\n"); + /* commit for put */ + if (ipa3_ctx->ctrl->ipa3_commit_rt(ip)) + IPAERR_RL("fail to commit RT tbl\n"); + } + + result = 0; + +ret: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + + +static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) +{ + struct ipa3_rt_entry *entry; + struct ipa3_hdr_entry *hdr = NULL; + struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL; + + if (rtrule->rule.hdr_hdl) { + hdr = ipa3_id_find(rtrule->rule.hdr_hdl); + if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { + IPAERR_RL("rt rule does not point to valid hdr\n"); + goto error; + } + } else if (rtrule->rule.hdr_proc_ctx_hdl) { + proc_ctx = ipa3_id_find(rtrule->rule.hdr_proc_ctx_hdl); + if ((proc_ctx == NULL) || + (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) { + IPAERR_RL("rt rule does not point to valid proc ctx\n"); + goto error; + } + } + + entry = ipa3_id_find(rtrule->rt_rule_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + goto error; + } + + if (entry->cookie != IPA_RT_RULE_COOKIE) { + IPAERR_RL("bad params\n"); + goto error; + } + + if (entry->hdr) + entry->hdr->ref_cnt--; + if (entry->proc_ctx) + entry->proc_ctx->ref_cnt--; + + entry->rule = rtrule->rule; + entry->hdr = hdr; + entry->proc_ctx = proc_ctx; + + if (entry->hdr) + entry->hdr->ref_cnt++; + if (entry->proc_ctx) + entry->proc_ctx->ref_cnt++; + + entry->hw_len = 0; + entry->prio = 0; + + return 0; + +error: + return -EPERM; +} + +/** + * ipa3_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally + * commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls) +{ + int i; + int result; + + if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad param\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < hdls->num_rules; i++) { + if (__ipa_mdfy_rt_rule(&hdls->rules[i])) { + IPAERR_RL("failed to mdfy rt rule %i\n", i); + hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED; + } else { + hdls->rules[i].status = 0; + } + } + + if (hdls->commit) + if (ipa3_ctx->ctrl->ipa3_commit_rt(hdls->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + + return result; +} + +/** + * ipa3_set_rt_tuple_mask() - Sets the rt tuple masking for the given tbl + * table index must be for AP EP (not modem) + * updates the the routing masking values without changing the flt ones. + * + * @tbl_idx: routing table index to configure the tuple masking + * @tuple: the tuple members masking + * Returns: 0 on success, negative on failure + * + */ +int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple) +{ + struct ipahal_reg_fltrt_hash_tuple fltrt_tuple; + + if (!tuple) { + IPAERR("bad tuple\n"); + return -EINVAL; + } + + if (tbl_idx >= + max(IPA_MEM_PART(v6_rt_num_index), + IPA_MEM_PART(v4_rt_num_index)) || + tbl_idx < 0) { + IPAERR("bad table index\n"); + return -EINVAL; + } + + if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) && + tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi)) { + IPAERR("cannot configure modem v4 rt tuple by AP\n"); + return -EINVAL; + } + + if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) && + tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi)) { + IPAERR("cannot configure modem v6 rt tuple by AP\n"); + return -EINVAL; + } + + ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n, + tbl_idx, &fltrt_tuple); + fltrt_tuple.rt = *tuple; + ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n, + tbl_idx, &fltrt_tuple); + + return 0; +} + +/** + * ipa3_rt_read_tbl_from_hw() -Read routing table from IPA HW + * @tbl_idx: routing table index + * @ip_type: IPv4 or IPv6 table + * @hashable: hashable or non-hashable table + * @entry: array to fill the table entries + * @num_entry: number of entries in entry array. set by the caller to indicate + * entry array size. Then set by this function as an output parameter to + * indicate the number of entries in the array + * + * This function reads the routing table from IPA SRAM and prepares an array + * of entries. This function is mainly used for debugging purposes. + * + * If empty table or Modem Apps table, zero entries will be returned. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, enum ipa_ip_type ip_type, + bool hashable, struct ipahal_rt_rule_entry entry[], int *num_entry) +{ + void *ipa_sram_mmio; + u64 hdr_base_ofst; + int res = 0; + u64 tbl_addr; + bool is_sys; + struct ipa_mem_buffer *sys_tbl_mem; + u8 *rule_addr; + int rule_idx; + + IPADBG_LOW("tbl_idx=%d ip_t=%d hash=%d entry=0x%pK num_entry=0x%pK\n", + tbl_idx, ip_type, hashable, entry, num_entry); + + if (ip_type == IPA_IP_v4 && tbl_idx >= IPA_MEM_PART(v4_rt_num_index)) { + IPAERR("Invalid params\n"); + return -EFAULT; + } + + if (ip_type == IPA_IP_v6 && tbl_idx >= IPA_MEM_PART(v6_rt_num_index)) { + IPAERR("Invalid params\n"); + return -EFAULT; + } + + /* map IPA SRAM */ + ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, + ipa3_ctx->smem_restricted_bytes / 4), + ipa3_ctx->smem_sz); + if (!ipa_sram_mmio) { + IPAERR("fail to ioremap IPA SRAM\n"); + return -ENOMEM; + } + + memset(entry, 0, sizeof(*entry) * (*num_entry)); + if (hashable) { + if (ip_type == IPA_IP_v4) + hdr_base_ofst = + IPA_MEM_PART(v4_rt_hash_ofst); + else + hdr_base_ofst = + IPA_MEM_PART(v6_rt_hash_ofst); + } else { + if (ip_type == IPA_IP_v4) + hdr_base_ofst = + IPA_MEM_PART(v4_rt_nhash_ofst); + else + hdr_base_ofst = + IPA_MEM_PART(v6_rt_nhash_ofst); + } + + IPADBG_LOW("hdr_base_ofst=0x%llx\n", hdr_base_ofst); + + res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst, + tbl_idx, &tbl_addr, &is_sys); + if (res) { + IPAERR("failed to read table address from header structure\n"); + goto bail; + } + IPADBG_LOW("rt tbl %d: tbl_addr=0x%llx is_sys=%d\n", + tbl_idx, tbl_addr, is_sys); + if (!tbl_addr) { + IPAERR("invalid rt tbl addr\n"); + res = -EFAULT; + goto bail; + } + + /* for tables which reside in DDR access it from the virtual memory */ + if (is_sys) { + struct ipa3_rt_tbl_set *set; + struct ipa3_rt_tbl *tbl; + + set = &ipa3_ctx->rt_tbl_set[ip_type]; + rule_addr = NULL; + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + if (tbl->idx == tbl_idx) { + sys_tbl_mem = &(tbl->curr_mem[hashable ? + IPA_RULE_HASHABLE : + IPA_RULE_NON_HASHABLE]); + if (sys_tbl_mem->phys_base && + sys_tbl_mem->phys_base != tbl_addr) { + IPAERR("mismatch:parsed=%llx sw=%pad\n" + , tbl_addr, + &sys_tbl_mem->phys_base); + } + if (sys_tbl_mem->phys_base) + rule_addr = sys_tbl_mem->base; + else + rule_addr = NULL; + } + } + } else { + rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr; + } + + IPADBG_LOW("First rule addr 0x%pK\n", rule_addr); + + if (!rule_addr) { + /* Modem table in system memory or empty table */ + *num_entry = 0; + goto bail; + } + + rule_idx = 0; + while (rule_idx < *num_entry) { + res = ipahal_rt_parse_hw_rule(rule_addr, &entry[rule_idx]); + if (res) { + IPAERR("failed parsing rt rule\n"); + goto bail; + } + + IPADBG_LOW("rule_size=%d\n", entry[rule_idx].rule_size); + if (!entry[rule_idx].rule_size) + break; + + rule_addr += entry[rule_idx].rule_size; + rule_idx++; + } + *num_entry = rule_idx; +bail: + iounmap(ipa_sram_mmio); + return res; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h new file mode 100644 index 000000000000..4addc9c6f742 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h @@ -0,0 +1,153 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ipa +#define TRACE_INCLUDE_FILE ipa_trace + +#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _IPA_TRACE_H + +#include + +TRACE_EVENT( + intr_to_poll3, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + poll_to_intr3, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + idle_sleep_enter3, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + idle_sleep_exit3, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + rmnet_ipa_netifni3, + + TP_PROTO(unsigned long rx_pkt_cnt), + + TP_ARGS(rx_pkt_cnt), + + TP_STRUCT__entry( + __field(unsigned long, rx_pkt_cnt) + ), + + TP_fast_assign( + __entry->rx_pkt_cnt = rx_pkt_cnt; + ), + + TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt) +); + +TRACE_EVENT( + rmnet_ipa_netifrx3, + + TP_PROTO(unsigned long rx_pkt_cnt), + + TP_ARGS(rx_pkt_cnt), + + TP_STRUCT__entry( + __field(unsigned long, rx_pkt_cnt) + ), + + TP_fast_assign( + __entry->rx_pkt_cnt = rx_pkt_cnt; + ), + + TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt) +); + +TRACE_EVENT( + rmnet_ipa_netif_rcv_skb3, + + TP_PROTO(unsigned long rx_pkt_cnt), + + TP_ARGS(rx_pkt_cnt), + + TP_STRUCT__entry( + __field(unsigned long, rx_pkt_cnt) + ), + + TP_fast_assign( + __entry->rx_pkt_cnt = rx_pkt_cnt; + ), + + TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt) +); + +#endif /* _IPA_TRACE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c new file mode 100644 index 000000000000..01b66391ba0a --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c @@ -0,0 +1,929 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "ipa_i.h" +#include + +#define IPA_RAM_UC_SMEM_SIZE 128 +#define IPA_HW_INTERFACE_VERSION 0x2000 +#define IPA_PKT_FLUSH_TO_US 100 +#define IPA_UC_POLL_SLEEP_USEC 100 +#define IPA_UC_POLL_MAX_RETRY 10000 + +/** + * Mailbox register to Interrupt HWP for CPU cmd + * Usage of IPA_UC_MAILBOX_m_n doorbell instead of IPA_IRQ_EE_UC_0 + * due to HW limitation. + * + */ +#define IPA_CPU_2_HW_CMD_MBOX_m 0 +#define IPA_CPU_2_HW_CMD_MBOX_n 23 + +/** + * enum ipa3_cpu_2_hw_commands - Values that represent the commands from the CPU + * IPA_CPU_2_HW_CMD_NO_OP : No operation is required. + * IPA_CPU_2_HW_CMD_UPDATE_FLAGS : Update SW flags which defines the behavior + * of HW. + * IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST : Launch predefined test over HW. + * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO : Read HW internal debug information. + * IPA_CPU_2_HW_CMD_ERR_FATAL : CPU instructs HW to perform error fatal + * handling. + * IPA_CPU_2_HW_CMD_CLK_GATE : CPU instructs HW to goto Clock Gated state. + * IPA_CPU_2_HW_CMD_CLK_UNGATE : CPU instructs HW to goto Clock Ungated state. + * IPA_CPU_2_HW_CMD_MEMCPY : CPU instructs HW to do memcopy using QMB. + * IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug. + * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY : Command to check for GSI channel emptiness. + */ +enum ipa3_cpu_2_hw_commands { + IPA_CPU_2_HW_CMD_NO_OP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0), + IPA_CPU_2_HW_CMD_UPDATE_FLAGS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), + IPA_CPU_2_HW_CMD_DEBUG_GET_INFO = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3), + IPA_CPU_2_HW_CMD_ERR_FATAL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4), + IPA_CPU_2_HW_CMD_CLK_GATE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5), + IPA_CPU_2_HW_CMD_CLK_UNGATE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6), + IPA_CPU_2_HW_CMD_MEMCPY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7), + IPA_CPU_2_HW_CMD_RESET_PIPE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8), + IPA_CPU_2_HW_CMD_REG_WRITE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9), + IPA_CPU_2_HW_CMD_GSI_CH_EMPTY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10), +}; + +/** + * enum ipa3_hw_2_cpu_responses - Values that represent common HW responses + * to CPU commands. + * @IPA_HW_2_CPU_RESPONSE_NO_OP : No operation response + * @IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED : HW shall send this command once + * boot sequence is completed and HW is ready to serve commands from CPU + * @IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED: Response to CPU commands + * @IPA_HW_2_CPU_RESPONSE_DEBUG_GET_INFO : Response to + * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO command + */ +enum ipa3_hw_2_cpu_responses { + IPA_HW_2_CPU_RESPONSE_NO_OP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0), + IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), + IPA_HW_2_CPU_RESPONSE_DEBUG_GET_INFO = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3), +}; + +/** + * struct IpaHwMemCopyData_t - Structure holding the parameters + * for IPA_CPU_2_HW_CMD_MEMCPY command. + * + * The parameters are passed as immediate params in the shared memory + */ +struct IpaHwMemCopyData_t { + u32 destination_addr; + u32 source_addr; + u32 dest_buffer_size; + u32 source_buffer_size; +}; + +/** + * struct IpaHwRegWriteCmdData_t - holds the parameters for + * IPA_CPU_2_HW_CMD_REG_WRITE command. Parameters are + * sent as 64b immediate parameters. + * @RegisterAddress: RG10 register address where the value needs to be written + * @RegisterValue: 32-Bit value to be written into the register + */ +struct IpaHwRegWriteCmdData_t { + u32 RegisterAddress; + u32 RegisterValue; +}; + +/** + * union IpaHwCpuCmdCompletedResponseData_t - Structure holding the parameters + * for IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED response. + * @originalCmdOp : The original command opcode + * @status : 0 for success indication, otherwise failure + * @reserved : Reserved + * + * Parameters are sent as 32b immediate parameters. + */ +union IpaHwCpuCmdCompletedResponseData_t { + struct IpaHwCpuCmdCompletedResponseParams_t { + u32 originalCmdOp:8; + u32 status:8; + u32 reserved:16; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwUpdateFlagsCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_UPDATE_FLAGS command + * @newFlags: SW flags defined the behavior of HW. + * This field is expected to be used as bitmask for enum ipa3_hw_flags + */ +union IpaHwUpdateFlagsCmdData_t { + struct IpaHwUpdateFlagsCmdParams_t { + u32 newFlags; + } params; + u32 raw32b; +}; + +/** + * union IpaHwChkChEmptyCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY command. Parameters are sent as 32b + * immediate parameters. + * @ee_n : EE owner of the channel + * @vir_ch_id : GSI virtual channel ID of the channel to checked of emptiness + * @reserved_02_04 : Reserved + */ +union IpaHwChkChEmptyCmdData_t { + struct IpaHwChkChEmptyCmdParams_t { + u8 ee_n; + u8 vir_ch_id; + u16 reserved_02_04; + } __packed params; + u32 raw32b; +} __packed; + +/** + * When resource group 10 limitation mitigation is enabled, uC send + * cmd should be able to run in interrupt context, so using spin lock + * instead of mutex. + */ +#define IPA3_UC_LOCK(flags) \ +do { \ + if (ipa3_ctx->apply_rg10_wa) \ + spin_lock_irqsave(&ipa3_ctx->uc_ctx.uc_spinlock, flags); \ + else \ + mutex_lock(&ipa3_ctx->uc_ctx.uc_lock); \ +} while (0) + +#define IPA3_UC_UNLOCK(flags) \ +do { \ + if (ipa3_ctx->apply_rg10_wa) \ + spin_unlock_irqrestore(&ipa3_ctx->uc_ctx.uc_spinlock, flags); \ + else \ + mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock); \ +} while (0) + +struct ipa3_uc_hdlrs ipa3_uc_hdlrs[IPA_HW_NUM_FEATURES] = { { 0 } }; + +const char *ipa_hw_error_str(enum ipa3_hw_errors err_type) +{ + const char *str; + + switch (err_type) { + case IPA_HW_ERROR_NONE: + str = "IPA_HW_ERROR_NONE"; + break; + case IPA_HW_INVALID_DOORBELL_ERROR: + str = "IPA_HW_INVALID_DOORBELL_ERROR"; + break; + case IPA_HW_DMA_ERROR: + str = "IPA_HW_DMA_ERROR"; + break; + case IPA_HW_FATAL_SYSTEM_ERROR: + str = "IPA_HW_FATAL_SYSTEM_ERROR"; + break; + case IPA_HW_INVALID_OPCODE: + str = "IPA_HW_INVALID_OPCODE"; + break; + case IPA_HW_INVALID_PARAMS: + str = "IPA_HW_INVALID_PARAMS"; + break; + case IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE: + str = "IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE"; + break; + case IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE: + str = "IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE"; + break; + case IPA_HW_GSI_CH_NOT_EMPTY_FAILURE: + str = "IPA_HW_GSI_CH_NOT_EMPTY_FAILURE"; + break; + default: + str = "INVALID ipa_hw_errors type"; + } + + return str; +} + +static void ipa3_log_evt_hdlr(void) +{ + int i; + + if (!ipa3_ctx->uc_ctx.uc_event_top_ofst) { + ipa3_ctx->uc_ctx.uc_event_top_ofst = + ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams; + if (ipa3_ctx->uc_ctx.uc_event_top_ofst + + sizeof(struct IpaHwEventLogInfoData_t) >= + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) + + ipa3_ctx->smem_sz) { + IPAERR("uc_top 0x%x outside SRAM\n", + ipa3_ctx->uc_ctx.uc_event_top_ofst); + goto bad_uc_top_ofst; + } + + ipa3_ctx->uc_ctx.uc_event_top_mmio = ioremap( + ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->uc_ctx.uc_event_top_ofst, + sizeof(struct IpaHwEventLogInfoData_t)); + if (!ipa3_ctx->uc_ctx.uc_event_top_mmio) { + IPAERR("fail to ioremap uc top\n"); + goto bad_uc_top_ofst; + } + + for (i = 0; i < IPA_HW_NUM_FEATURES; i++) { + if (ipa3_uc_hdlrs[i].ipa_uc_event_log_info_hdlr) + ipa3_uc_hdlrs[i].ipa_uc_event_log_info_hdlr + (ipa3_ctx->uc_ctx.uc_event_top_mmio); + } + } else { + + if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams != + ipa3_ctx->uc_ctx.uc_event_top_ofst) { + IPAERR("uc top ofst changed new=%u cur=%u\n", + ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams, + ipa3_ctx->uc_ctx.uc_event_top_ofst); + } + } + + return; + +bad_uc_top_ofst: + ipa3_ctx->uc_ctx.uc_event_top_ofst = 0; +} + +/** + * ipa3_uc_state_check() - Check the status of the uC interface + * + * Return value: 0 if the uC is loaded, interface is initialized + * and there was no recent failure in one of the commands. + * A negative value is returned otherwise. + */ +int ipa3_uc_state_check(void) +{ + if (!ipa3_ctx->uc_ctx.uc_inited) { + IPAERR("uC interface not initialized\n"); + return -EFAULT; + } + + if (!ipa3_ctx->uc_ctx.uc_loaded) { + IPAERR("uC is not loaded\n"); + return -EFAULT; + } + + if (ipa3_ctx->uc_ctx.uc_failed) { + IPAERR("uC has failed its last command\n"); + return -EFAULT; + } + + return 0; +} + +/** + * ipa3_uc_loaded_check() - Check the uC has been loaded + * + * Return value: 1 if the uC is loaded, 0 otherwise + */ +int ipa3_uc_loaded_check(void) +{ + return ipa3_ctx->uc_ctx.uc_loaded; +} +EXPORT_SYMBOL(ipa3_uc_loaded_check); + +static void ipa3_uc_event_handler(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data) +{ + union IpaHwErrorEventData_t evt; + u8 feature; + + WARN_ON(private_data != ipa3_ctx); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + IPADBG("uC evt opcode=%u\n", + ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp); + + + feature = EXTRACT_UC_FEATURE(ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp); + + if (feature >= IPA_HW_FEATURE_MAX) { + IPAERR("Invalid feature %u for event %u\n", + feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return; + } + /* Feature specific handling */ + if (ipa3_uc_hdlrs[feature].ipa_uc_event_hdlr) + ipa3_uc_hdlrs[feature].ipa_uc_event_hdlr + (ipa3_ctx->uc_ctx.uc_sram_mmio); + + /* General handling */ + if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_ERROR) { + evt.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams; + IPAERR("uC Error, evt errorType = %s\n", + ipa_hw_error_str(evt.params.errorType)); + ipa3_ctx->uc_ctx.uc_failed = true; + ipa3_ctx->uc_ctx.uc_error_type = evt.params.errorType; + ipa3_ctx->uc_ctx.uc_error_timestamp = + ipahal_read_reg(IPA_TAG_TIMER); + /* Unexpected UC hardware state */ + BUG(); + } else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_LOG_INFO) { + IPADBG("uC evt log info ofst=0x%x\n", + ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams); + ipa3_log_evt_hdlr(); + } else { + IPADBG("unsupported uC evt opcode=%u\n", + ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp); + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + +} + +int ipa3_uc_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr) +{ + int result = 0; + struct ipa_active_client_logging_info log_info; + + IPADBG("this=%pK evt=%lu ptr=%pK\n", this, event, ptr); + + result = ipa3_uc_state_check(); + if (result) + goto fail; + + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); + if (ipa3_inc_client_enable_clks_no_block(&log_info)) + goto fail; + + ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp = + IPA_CPU_2_HW_CMD_ERR_FATAL; + ipa3_ctx->uc_ctx.pending_cmd = ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp; + /* ensure write to shared memory is done before triggering uc */ + wmb(); + + if (ipa3_ctx->apply_rg10_wa) + ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n, + IPA_CPU_2_HW_CMD_MBOX_m, + IPA_CPU_2_HW_CMD_MBOX_n, 0x1); + else + ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1); + + /* give uc enough time to save state */ + udelay(IPA_PKT_FLUSH_TO_US); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPADBG("err_fatal issued\n"); + +fail: + return NOTIFY_DONE; +} + +static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data) +{ + union IpaHwCpuCmdCompletedResponseData_t uc_rsp; + u8 feature; + int res; + int i; + + WARN_ON(private_data != ipa3_ctx); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + IPADBG("uC rsp opcode=%u\n", + ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp); + + feature = EXTRACT_UC_FEATURE(ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp); + + if (feature >= IPA_HW_FEATURE_MAX) { + IPAERR("Invalid feature %u for event %u\n", + feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return; + } + + /* Feature specific handling */ + if (ipa3_uc_hdlrs[feature].ipa3_uc_response_hdlr) { + res = ipa3_uc_hdlrs[feature].ipa3_uc_response_hdlr( + ipa3_ctx->uc_ctx.uc_sram_mmio, + &ipa3_ctx->uc_ctx.uc_status); + if (res == 0) { + IPADBG("feature %d specific response handler\n", + feature); + complete_all(&ipa3_ctx->uc_ctx.uc_completion); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return; + } + } + + /* General handling */ + if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp == + IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED) { + ipa3_ctx->uc_ctx.uc_loaded = true; + + IPADBG("IPA uC loaded\n"); + /* + * The proxy vote is held until uC is loaded to ensure that + * IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED is received. + */ + ipa3_proxy_clk_unvote(); + + for (i = 0; i < IPA_HW_NUM_FEATURES; i++) { + if (ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr) + ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr(); + } + } else if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp == + IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) { + uc_rsp.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams; + IPADBG("uC cmd response opcode=%u status=%u\n", + uc_rsp.params.originalCmdOp, + uc_rsp.params.status); + if (uc_rsp.params.originalCmdOp == + ipa3_ctx->uc_ctx.pending_cmd) { + ipa3_ctx->uc_ctx.uc_status = uc_rsp.params.status; + complete_all(&ipa3_ctx->uc_ctx.uc_completion); + } else { + IPAERR("Expected cmd=%u rcvd cmd=%u\n", + ipa3_ctx->uc_ctx.pending_cmd, + uc_rsp.params.originalCmdOp); + } + } else { + IPAERR("Unsupported uC rsp opcode = %u\n", + ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp); + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + +static int ipa3_uc_send_cmd_64b_param(u32 cmd_lo, u32 cmd_hi, u32 opcode, + u32 expected_status, bool polling_mode, unsigned long timeout_jiffies) +{ + int index; + union IpaHwCpuCmdCompletedResponseData_t uc_rsp; + unsigned long flags = 0; + int retries = 0; + u32 uc_error_type; + +send_cmd_lock: + IPA3_UC_LOCK(flags); + + if (ipa3_uc_state_check()) { + IPADBG("uC send command aborted\n"); + IPA3_UC_UNLOCK(flags); + return -EBADF; + } +send_cmd: + if (ipa3_ctx->apply_rg10_wa) { + if (!polling_mode) + IPADBG("Overriding mode to polling mode\n"); + polling_mode = true; + } else { + init_completion(&ipa3_ctx->uc_ctx.uc_completion); + } + + ipa3_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd_lo; + ipa3_ctx->uc_ctx.uc_sram_mmio->cmdParams_hi = cmd_hi; + ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp = opcode; + ipa3_ctx->uc_ctx.pending_cmd = opcode; + ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp = 0; + ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams = 0; + + ipa3_ctx->uc_ctx.uc_status = 0; + + /* ensure write to shared memory is done before triggering uc */ + wmb(); + + if (ipa3_ctx->apply_rg10_wa) + ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n, + IPA_CPU_2_HW_CMD_MBOX_m, + IPA_CPU_2_HW_CMD_MBOX_n, 0x1); + else + ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1); + + if (polling_mode) { + struct IpaHwSharedMemCommonMapping_t *uc_sram_ptr = + ipa3_ctx->uc_ctx.uc_sram_mmio; + for (index = 0; index < IPA_UC_POLL_MAX_RETRY; index++) { + if (uc_sram_ptr->responseOp == + IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) { + uc_rsp.raw32b = uc_sram_ptr->responseParams; + if (uc_rsp.params.originalCmdOp == + ipa3_ctx->uc_ctx.pending_cmd) { + ipa3_ctx->uc_ctx.uc_status = + uc_rsp.params.status; + break; + } + } + if (ipa3_ctx->apply_rg10_wa) + udelay(IPA_UC_POLL_SLEEP_USEC); + else + usleep_range(IPA_UC_POLL_SLEEP_USEC, + IPA_UC_POLL_SLEEP_USEC); + } + + if (index == IPA_UC_POLL_MAX_RETRY) { + IPAERR("uC max polling retries reached\n"); + if (ipa3_ctx->uc_ctx.uc_failed) { + uc_error_type = ipa3_ctx->uc_ctx.uc_error_type; + IPAERR("uC reported on Error, errorType = %s\n", + ipa_hw_error_str(uc_error_type)); + } + IPA3_UC_UNLOCK(flags); + /* Unexpected UC hardware state */ + BUG(); + } + } else { + if (wait_for_completion_timeout(&ipa3_ctx->uc_ctx.uc_completion, + timeout_jiffies) == 0) { + IPAERR("uC timed out\n"); + if (ipa3_ctx->uc_ctx.uc_failed) { + uc_error_type = ipa3_ctx->uc_ctx.uc_error_type; + IPAERR("uC reported on Error, errorType = %s\n", + ipa_hw_error_str(uc_error_type)); + } + IPA3_UC_UNLOCK(flags); + /* Unexpected UC hardware state */ + BUG(); + } + } + + if (ipa3_ctx->uc_ctx.uc_status != expected_status) { + if (ipa3_ctx->uc_ctx.uc_status == + IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE || + ipa3_ctx->uc_ctx.uc_status == + IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE) { + retries++; + if (retries == IPA_GSI_CHANNEL_STOP_MAX_RETRY) { + IPAERR("Failed after %d tries\n", retries); + IPA3_UC_UNLOCK(flags); + /* Unexpected UC hardware state */ + BUG(); + } + IPA3_UC_UNLOCK(flags); + if (ipa3_ctx->uc_ctx.uc_status == + IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE) + ipa3_inject_dma_task_for_gsi(); + /* sleep for short period to flush IPA */ + usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC, + IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC); + goto send_cmd_lock; + } + + if (ipa3_ctx->uc_ctx.uc_status == + IPA_HW_GSI_CH_NOT_EMPTY_FAILURE) { + retries++; + if (retries >= IPA_GSI_CHANNEL_EMPTY_MAX_RETRY) { + IPAERR("Failed after %d tries\n", retries); + IPA3_UC_UNLOCK(flags); + return -EFAULT; + } + if (ipa3_ctx->apply_rg10_wa) + udelay( + IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC / 2 + + IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC / 2); + else + usleep_range( + IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC, + IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC); + goto send_cmd; + } + + IPAERR("Recevied status %u, Expected status %u\n", + ipa3_ctx->uc_ctx.uc_status, expected_status); + IPA3_UC_UNLOCK(flags); + return -EFAULT; + } + + IPA3_UC_UNLOCK(flags); + + IPADBG("uC cmd %u send succeeded\n", opcode); + + return 0; +} + +/** + * ipa3_uc_interface_init() - Initialize the interface with the uC + * + * Return value: 0 on success, negative value otherwise + */ +int ipa3_uc_interface_init(void) +{ + int result; + unsigned long phys_addr; + + if (ipa3_ctx->uc_ctx.uc_inited) { + IPADBG("uC interface already initialized\n"); + return 0; + } + + mutex_init(&ipa3_ctx->uc_ctx.uc_lock); + spin_lock_init(&ipa3_ctx->uc_ctx.uc_spinlock); + + phys_addr = ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0); + ipa3_ctx->uc_ctx.uc_sram_mmio = ioremap(phys_addr, + IPA_RAM_UC_SMEM_SIZE); + if (!ipa3_ctx->uc_ctx.uc_sram_mmio) { + IPAERR("Fail to ioremap IPA uC SRAM\n"); + result = -ENOMEM; + goto remap_fail; + } + + if (!ipa3_ctx->apply_rg10_wa) { + result = ipa3_add_interrupt_handler(IPA_UC_IRQ_0, + ipa3_uc_event_handler, true, + ipa3_ctx); + if (result) { + IPAERR("Fail to register for UC_IRQ0 rsp interrupt\n"); + result = -EFAULT; + goto irq_fail0; + } + + result = ipa3_add_interrupt_handler(IPA_UC_IRQ_1, + ipa3_uc_response_hdlr, true, + ipa3_ctx); + if (result) { + IPAERR("fail to register for UC_IRQ1 rsp interrupt\n"); + result = -EFAULT; + goto irq_fail1; + } + } + + ipa3_ctx->uc_ctx.uc_inited = true; + + IPADBG("IPA uC interface is initialized\n"); + return 0; + +irq_fail1: + ipa3_remove_interrupt_handler(IPA_UC_IRQ_0); +irq_fail0: + iounmap(ipa3_ctx->uc_ctx.uc_sram_mmio); +remap_fail: + return result; +} + +/** + * ipa3_uc_load_notify() - Notification about uC loading + * + * This function should be called when IPA uC interface layer cannot + * determine by itself about uC loading by waits for external notification. + * Example is resource group 10 limitation were ipa driver does not get uC + * interrupts. + * The function should perform actions that were not done at init due to uC + * not being loaded then. + */ +void ipa3_uc_load_notify(void) +{ + int i; + int result; + + if (!ipa3_ctx->apply_rg10_wa) + return; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipa3_ctx->uc_ctx.uc_loaded = true; + IPADBG("IPA uC loaded\n"); + + ipa3_proxy_clk_unvote(); + + ipa3_init_interrupts(); + + result = ipa3_add_interrupt_handler(IPA_UC_IRQ_0, + ipa3_uc_event_handler, true, + ipa3_ctx); + if (result) + IPAERR("Fail to register for UC_IRQ0 rsp interrupt.\n"); + + for (i = 0; i < IPA_HW_NUM_FEATURES; i++) { + if (ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr) + ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr(); + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} +EXPORT_SYMBOL(ipa3_uc_load_notify); + +/** + * ipa3_uc_send_cmd() - Send a command to the uC + * + * Note1: This function sends command with 32bit parameter and do not + * use the higher 32bit of the command parameter (set to zero). + * + * Note2: In case the operation times out (No response from the uC) or + * polling maximal amount of retries has reached, the logic + * considers it as an invalid state of the uC/IPA, and + * issues a kernel panic. + * + * Returns: 0 on success. + * -EINVAL in case of invalid input. + * -EBADF in case uC interface is not initialized / + * or the uC has failed previously. + * -EFAULT in case the received status doesn't match + * the expected. + */ +int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status, + bool polling_mode, unsigned long timeout_jiffies) +{ + return ipa3_uc_send_cmd_64b_param(cmd, 0, opcode, + expected_status, polling_mode, timeout_jiffies); +} + +/** + * ipa3_uc_register_handlers() - Registers event, response and log event + * handlers for a specific feature.Please note + * that currently only one handler can be + * registered per feature. + * + * Return value: None + */ +void ipa3_uc_register_handlers(enum ipa3_hw_features feature, + struct ipa3_uc_hdlrs *hdlrs) +{ + unsigned long flags = 0; + + if (0 > feature || IPA_HW_FEATURE_MAX <= feature) { + IPAERR("Feature %u is invalid, not registering hdlrs\n", + feature); + return; + } + + IPA3_UC_LOCK(flags); + ipa3_uc_hdlrs[feature] = *hdlrs; + IPA3_UC_UNLOCK(flags); + + IPADBG("uC handlers registered for feature %u\n", feature); +} + +int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client) +{ + const struct ipa_gsi_ep_config *gsi_ep_info; + union IpaHwChkChEmptyCmdData_t cmd; + int ret; + + gsi_ep_info = ipa3_get_gsi_ep_info(ipa_client); + if (!gsi_ep_info) { + IPAERR("Failed getting GSI EP info for client=%d\n", + ipa_client); + return 0; + } + + if (ipa3_uc_state_check()) { + IPADBG("uC cannot be used to validate ch emptiness clnt=%d\n" + , ipa_client); + return 0; + } + + cmd.params.ee_n = gsi_ep_info->ee; + cmd.params.vir_ch_id = gsi_ep_info->ipa_gsi_chan_num; + + IPADBG("uC emptiness check for IPA GSI Channel %d\n", + gsi_ep_info->ipa_gsi_chan_num); + + ret = ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_GSI_CH_EMPTY, 0, + false, 10*HZ); + + return ret; +} + + +/** + * ipa3_uc_notify_clk_state() - notify to uC of clock enable / disable + * @enabled: true if clock are enabled + * + * The function uses the uC interface in order to notify uC before IPA clocks + * are disabled to make sure uC is not in the middle of operation. + * Also after clocks are enabled ned to notify uC to start processing. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_uc_notify_clk_state(bool enabled) +{ + u32 opcode; + + /* + * If the uC interface has not been initialized yet, + * don't notify the uC on the enable/disable + */ + if (ipa3_uc_state_check()) { + IPADBG("uC interface will not notify the UC on clock state\n"); + return 0; + } + + IPADBG("uC clock %s notification\n", (enabled) ? "UNGATE" : "GATE"); + + opcode = (enabled) ? IPA_CPU_2_HW_CMD_CLK_UNGATE : + IPA_CPU_2_HW_CMD_CLK_GATE; + + return ipa3_uc_send_cmd(0, opcode, 0, true, 0); +} + +/** + * ipa3_uc_update_hw_flags() - send uC the HW flags to be used + * @flags: This field is expected to be used as bitmask for enum ipa3_hw_flags + * + * Returns: 0 on success, negative on failure + */ +int ipa3_uc_update_hw_flags(u32 flags) +{ + union IpaHwUpdateFlagsCmdData_t cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.newFlags = flags; + return ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_UPDATE_FLAGS, 0, + false, HZ); +} + +/** + * ipa3_uc_rg10_write_reg() - write to register possibly via uC + * + * if the RG10 limitation workaround is enabled, then writing + * to a register will be proxied by the uC due to H/W limitation. + * This func should be called for RG10 registers only + * + * @Parameters: Like ipahal_write_reg_n() parameters + * + */ +void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val) +{ + int ret; + u32 paddr; + + if (!ipa3_ctx->apply_rg10_wa) + return ipahal_write_reg_n(reg, n, val); + + + /* calculate register physical address */ + paddr = ipa3_ctx->ipa_wrapper_base + ipa3_ctx->ctrl->ipa_reg_base_ofst; + paddr += ipahal_get_reg_n_ofst(reg, n); + + IPADBG("Sending uC cmd to reg write: addr=0x%x val=0x%x\n", + paddr, val); + ret = ipa3_uc_send_cmd_64b_param(paddr, val, + IPA_CPU_2_HW_CMD_REG_WRITE, 0, true, 0); + if (ret) { + IPAERR("failed to send cmd to uC for reg write\n"); + /* Unexpected UC hardware state */ + BUG(); + } +} + +/** + * ipa3_uc_memcpy() - Perform a memcpy action using IPA uC + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Returns: 0 on success, negative on failure + */ +int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len) +{ + int res; + struct ipa_mem_buffer mem; + struct IpaHwMemCopyData_t *cmd; + + IPADBG("dest 0x%pa src 0x%pa len %d\n", &dest, &src, len); + mem.size = sizeof(cmd); + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + cmd = (struct IpaHwMemCopyData_t *)mem.base; + memset(cmd, 0, sizeof(*cmd)); + cmd->destination_addr = dest; + cmd->dest_buffer_size = len; + cmd->source_addr = src; + cmd->source_buffer_size = len; + res = ipa3_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MEMCPY, 0, + true, 10 * HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + goto free_coherent; + } + + res = 0; +free_coherent: + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + return res; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c new file mode 100644 index 000000000000..a65dbdc6c0f2 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c @@ -0,0 +1,964 @@ +/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "ipa_i.h" + +/* MHI uC interface definitions */ +#define IPA_HW_INTERFACE_MHI_VERSION 0x0004 + +#define IPA_HW_MAX_NUMBER_OF_CHANNELS 2 +#define IPA_HW_MAX_NUMBER_OF_EVENTRINGS 2 +#define IPA_HW_MAX_CHANNEL_HANDLE (IPA_HW_MAX_NUMBER_OF_CHANNELS-1) + +/** + * Values that represent the MHI commands from CPU to IPA HW. + * @IPA_CPU_2_HW_CMD_MHI_INIT: Initialize HW to be ready for MHI processing. + * Once operation was completed HW shall respond with + * IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED. + * @IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL: Initialize specific channel to be ready + * to serve MHI transfers. Once initialization was completed HW shall + * respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE. + * IPA_HW_MHI_CHANNEL_STATE_ENABLE + * @IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI: Update MHI MSI interrupts data. + * Once operation was completed HW shall respond with + * IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED. + * @IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE: Change specific channel + * processing state following host request. Once operation was completed + * HW shall respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE. + * @IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO: Info related to DL UL syncronization. + * @IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE: Cmd to stop event ring processing. + */ +enum ipa_cpu_2_hw_mhi_commands { + IPA_CPU_2_HW_CMD_MHI_INIT + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), + IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), + IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2), + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 3), + IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4), + IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5) +}; + +/** + * Values that represent MHI related HW responses to CPU commands. + * @IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE: Response to + * IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL or + * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE commands. + */ +enum ipa_hw_2_cpu_mhi_responses { + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), +}; + +/** + * Values that represent MHI related HW event to be sent to CPU. + * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR: Event specify the device detected an + * error in an element from the transfer ring associated with the channel + * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST: Event specify a transport + * interrupt was asserted when MHI engine is suspended + */ +enum ipa_hw_2_cpu_mhi_events { + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), +}; + +/** + * Channel error types. + * @IPA_HW_CHANNEL_ERROR_NONE: No error persists. + * @IPA_HW_CHANNEL_INVALID_RE_ERROR: Invalid Ring Element was detected + */ +enum ipa_hw_channel_errors { + IPA_HW_CHANNEL_ERROR_NONE, + IPA_HW_CHANNEL_INVALID_RE_ERROR +}; + +/** + * MHI error types. + * @IPA_HW_INVALID_MMIO_ERROR: Invalid data read from MMIO space + * @IPA_HW_INVALID_CHANNEL_ERROR: Invalid data read from channel context array + * @IPA_HW_INVALID_EVENT_ERROR: Invalid data read from event ring context array + * @IPA_HW_NO_ED_IN_RING_ERROR: No event descriptors are available to report on + * secondary event ring + * @IPA_HW_LINK_ERROR: Link error + */ +enum ipa_hw_mhi_errors { + IPA_HW_INVALID_MMIO_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), + IPA_HW_INVALID_CHANNEL_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), + IPA_HW_INVALID_EVENT_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2), + IPA_HW_NO_ED_IN_RING_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4), + IPA_HW_LINK_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5), +}; + + +/** + * Structure referring to the common and MHI section of 128B shared memory + * located in offset zero of SW Partition in IPA SRAM. + * The shared memory is used for communication between IPA HW and CPU. + * @common: common section in IPA SRAM + * @interfaceVersionMhi: The MHI interface version as reported by HW + * @mhiState: Overall MHI state + * @reserved_2B: reserved + * @mhiCnl0State: State of MHI channel 0. + * The state carries information regarding the error type. + * See IPA_HW_MHI_CHANNEL_STATES. + * @mhiCnl0State: State of MHI channel 1. + * @mhiCnl0State: State of MHI channel 2. + * @mhiCnl0State: State of MHI channel 3 + * @mhiCnl0State: State of MHI channel 4. + * @mhiCnl0State: State of MHI channel 5. + * @mhiCnl0State: State of MHI channel 6. + * @mhiCnl0State: State of MHI channel 7. + * @reserved_37_34: reserved + * @reserved_3B_38: reserved + * @reserved_3F_3C: reserved + */ +struct IpaHwSharedMemMhiMapping_t { + struct IpaHwSharedMemCommonMapping_t common; + u16 interfaceVersionMhi; + u8 mhiState; + u8 reserved_2B; + u8 mhiCnl0State; + u8 mhiCnl1State; + u8 mhiCnl2State; + u8 mhiCnl3State; + u8 mhiCnl4State; + u8 mhiCnl5State; + u8 mhiCnl6State; + u8 mhiCnl7State; + u32 reserved_37_34; + u32 reserved_3B_38; + u32 reserved_3F_3C; +}; + + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT command. + * Parameters are sent as pointer thus should be reside in address accessible + * to HW. + * @msiAddress: The MSI base (in device space) used for asserting the interrupt + * (MSI) associated with the event ring + * mmioBaseAddress: The address (in device space) of MMIO structure in + * host space + * deviceMhiCtrlBaseAddress: Base address of the memory region in the device + * address space where the MHI control data structures are allocated by + * the host, including channel context array, event context array, + * and rings. This value is used for host/device address translation. + * deviceMhiDataBaseAddress: Base address of the memory region in the device + * address space where the MHI data buffers are allocated by the host. + * This value is used for host/device address translation. + * firstChannelIndex: First channel ID. Doorbell 0 is mapped to this channel + * firstEventRingIndex: First event ring ID. Doorbell 16 is mapped to this + * event ring. + */ +struct IpaHwMhiInitCmdData_t { + u32 msiAddress; + u32 mmioBaseAddress; + u32 deviceMhiCtrlBaseAddress; + u32 deviceMhiDataBaseAddress; + u32 firstChannelIndex; + u32 firstEventRingIndex; +}; + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL + * command. Parameters are sent as 32b immediate parameters. + * @hannelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @contexArrayIndex: Unique index for channels, between 0 and 255. The index is + * used as an index in channel context array structures. + * @bamPipeId: The IPA pipe number for pipe dedicated for this channel + * @channelDirection: The direction of the channel as defined in the channel + * type field (CHTYPE) in the channel context data structure. + * @reserved: reserved. + */ +union IpaHwMhiInitChannelCmdData_t { + struct IpaHwMhiInitChannelCmdParams_t { + u32 channelHandle:8; + u32 contexArrayIndex:8; + u32 bamPipeId:6; + u32 channelDirection:2; + u32 reserved:8; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI command. + * @msiAddress_low: The MSI lower base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiMask: Mask indicating number of messages assigned by the host to device + * @msiData: Data Pattern to use when generating the MSI + */ +struct IpaHwMhiMsiCmdData_t { + u32 msiAddress_low; + u32 msiAddress_hi; + u32 msiMask; + u32 msiData; +}; + +/** + * Structure holding the parameters for + * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE command. + * Parameters are sent as 32b immediate parameters. + * @requestedState: The requested channel state as was indicated from Host. + * Use IPA_HW_MHI_CHANNEL_STATES to specify the requested state + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @LPTransitionRejected: Indication that low power state transition was + * rejected + * @reserved: reserved + */ +union IpaHwMhiChangeChannelStateCmdData_t { + struct IpaHwMhiChangeChannelStateCmdParams_t { + u32 requestedState:8; + u32 channelHandle:8; + u32 LPTransitionRejected:8; + u32 reserved:8; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE command. + * Parameters are sent as 32b immediate parameters. + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @reserved: reserved + */ +union IpaHwMhiStopEventUpdateData_t { + struct IpaHwMhiStopEventUpdateDataParams_t { + u32 channelHandle:8; + u32 reserved:24; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE response. + * Parameters are sent as 32b immediate parameters. + * @state: The new channel state. In case state is not as requested this is + * error indication for the last command + * @channelHandle: The channel identifier + * @additonalParams: For stop: the number of pending transport descriptors + * currently queued + */ +union IpaHwMhiChangeChannelStateResponseData_t { + struct IpaHwMhiChangeChannelStateResponseParams_t { + u32 state:8; + u32 channelHandle:8; + u32 additonalParams:16; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR event. + * Parameters are sent as 32b immediate parameters. + * @errorType: Type of error - IPA_HW_CHANNEL_ERRORS + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @reserved: reserved + */ +union IpaHwMhiChannelErrorEventData_t { + struct IpaHwMhiChannelErrorEventParams_t { + u32 errorType:8; + u32 channelHandle:8; + u32 reserved:16; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST event. + * Parameters are sent as 32b immediate parameters. + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @reserved: reserved + */ +union IpaHwMhiChannelWakeupEventData_t { + struct IpaHwMhiChannelWakeupEventParams_t { + u32 channelHandle:8; + u32 reserved:24; + } params; + u32 raw32b; +}; + +/** + * Structure holding the MHI Common statistics + * @numULDLSync: Number of times UL activity trigged due to DL activity + * @numULTimerExpired: Number of times UL Accm Timer expired + */ +struct IpaHwStatsMhiCmnInfoData_t { + u32 numULDLSync; + u32 numULTimerExpired; + u32 numChEvCtxWpRead; + u32 reserved; +}; + +/** + * Structure holding the MHI Channel statistics + * @doorbellInt: The number of doorbell int + * @reProccesed: The number of ring elements processed + * @bamFifoFull: Number of times Bam Fifo got full + * @bamFifoEmpty: Number of times Bam Fifo got empty + * @bamFifoUsageHigh: Number of times Bam fifo usage went above 75% + * @bamFifoUsageLow: Number of times Bam fifo usage went below 25% + * @bamInt: Number of BAM Interrupts + * @ringFull: Number of times Transfer Ring got full + * @ringEmpty: umber of times Transfer Ring got empty + * @ringUsageHigh: Number of times Transfer Ring usage went above 75% + * @ringUsageLow: Number of times Transfer Ring usage went below 25% + * @delayedMsi: Number of times device triggered MSI to host after + * Interrupt Moderation Timer expiry + * @immediateMsi: Number of times device triggered MSI to host immediately + * @thresholdMsi: Number of times device triggered MSI due to max pending + * events threshold reached + * @numSuspend: Number of times channel was suspended + * @numResume: Number of times channel was suspended + * @num_OOB: Number of times we indicated that we are OOB + * @num_OOB_timer_expiry: Number of times we indicated that we are OOB + * after timer expiry + * @num_OOB_moderation_timer_start: Number of times we started timer after + * sending OOB and hitting OOB again before we processed threshold + * number of packets + * @num_db_mode_evt: Number of times we indicated that we are in Doorbell mode + */ +struct IpaHwStatsMhiCnlInfoData_t { + u32 doorbellInt; + u32 reProccesed; + u32 bamFifoFull; + u32 bamFifoEmpty; + u32 bamFifoUsageHigh; + u32 bamFifoUsageLow; + u32 bamInt; + u32 ringFull; + u32 ringEmpty; + u32 ringUsageHigh; + u32 ringUsageLow; + u32 delayedMsi; + u32 immediateMsi; + u32 thresholdMsi; + u32 numSuspend; + u32 numResume; + u32 num_OOB; + u32 num_OOB_timer_expiry; + u32 num_OOB_moderation_timer_start; + u32 num_db_mode_evt; +}; + +/** + * Structure holding the MHI statistics + * @mhiCmnStats: Stats pertaining to MHI + * @mhiCnlStats: Stats pertaining to each channel + */ +struct IpaHwStatsMhiInfoData_t { + struct IpaHwStatsMhiCmnInfoData_t mhiCmnStats; + struct IpaHwStatsMhiCnlInfoData_t mhiCnlStats[ + IPA_HW_MAX_NUMBER_OF_CHANNELS]; +}; + +/** + * Structure holding the MHI Common Config info + * @isDlUlSyncEnabled: Flag to indicate if DL-UL synchronization is enabled + * @UlAccmVal: Out Channel(UL) accumulation time in ms when DL UL Sync is + * enabled + * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events + * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events + */ +struct IpaHwConfigMhiCmnInfoData_t { + u8 isDlUlSyncEnabled; + u8 UlAccmVal; + u8 ulMsiEventThreshold; + u8 dlMsiEventThreshold; +}; + +/** + * Structure holding the parameters for MSI info data + * @msiAddress_low: The MSI lower base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiMask: Mask indicating number of messages assigned by the host to device + * @msiData: Data Pattern to use when generating the MSI + */ +struct IpaHwConfigMhiMsiInfoData_t { + u32 msiAddress_low; + u32 msiAddress_hi; + u32 msiMask; + u32 msiData; +}; + +/** + * Structure holding the MHI Channel Config info + * @transferRingSize: The Transfer Ring size in terms of Ring Elements + * @transferRingIndex: The Transfer Ring channel number as defined by host + * @eventRingIndex: The Event Ring Index associated with this Transfer Ring + * @bamPipeIndex: The BAM Pipe associated with this channel + * @isOutChannel: Indication for the direction of channel + * @reserved_0: Reserved byte for maintaining 4byte alignment + * @reserved_1: Reserved byte for maintaining 4byte alignment + */ +struct IpaHwConfigMhiCnlInfoData_t { + u16 transferRingSize; + u8 transferRingIndex; + u8 eventRingIndex; + u8 bamPipeIndex; + u8 isOutChannel; + u8 reserved_0; + u8 reserved_1; +}; + +/** + * Structure holding the MHI Event Config info + * @msiVec: msi vector to invoke MSI interrupt + * @intmodtValue: Interrupt moderation timer (in milliseconds) + * @eventRingSize: The Event Ring size in terms of Ring Elements + * @eventRingIndex: The Event Ring number as defined by host + * @reserved_0: Reserved byte for maintaining 4byte alignment + * @reserved_1: Reserved byte for maintaining 4byte alignment + * @reserved_2: Reserved byte for maintaining 4byte alignment + */ +struct IpaHwConfigMhiEventInfoData_t { + u32 msiVec; + u16 intmodtValue; + u16 eventRingSize; + u8 eventRingIndex; + u8 reserved_0; + u8 reserved_1; + u8 reserved_2; +}; + +/** + * Structure holding the MHI Config info + * @mhiCmnCfg: Common Config pertaining to MHI + * @mhiMsiCfg: Config pertaining to MSI config + * @mhiCnlCfg: Config pertaining to each channel + * @mhiEvtCfg: Config pertaining to each event Ring + */ +struct IpaHwConfigMhiInfoData_t { + struct IpaHwConfigMhiCmnInfoData_t mhiCmnCfg; + struct IpaHwConfigMhiMsiInfoData_t mhiMsiCfg; + struct IpaHwConfigMhiCnlInfoData_t mhiCnlCfg[ + IPA_HW_MAX_NUMBER_OF_CHANNELS]; + struct IpaHwConfigMhiEventInfoData_t mhiEvtCfg[ + IPA_HW_MAX_NUMBER_OF_EVENTRINGS]; +}; + + +struct ipa3_uc_mhi_ctx { + u8 expected_responseOp; + u32 expected_responseParams; + void (*ready_cb)(void); + void (*wakeup_request_cb)(void); + u32 mhi_uc_stats_ofst; + struct IpaHwStatsMhiInfoData_t *mhi_uc_stats_mmio; +}; + +#define PRINT_COMMON_STATS(x) \ + (nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \ + #x "=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCmnStats.x)) + +#define PRINT_CHANNEL_STATS(ch, x) \ + (nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \ + #x "=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCnlStats[ch].x)) + +struct ipa3_uc_mhi_ctx *ipa3_uc_mhi_ctx; + +static int ipa3_uc_mhi_response_hdlr(struct IpaHwSharedMemCommonMapping_t + *uc_sram_mmio, u32 *uc_status) +{ + IPADBG("responseOp=%d\n", uc_sram_mmio->responseOp); + if (uc_sram_mmio->responseOp == ipa3_uc_mhi_ctx->expected_responseOp && + uc_sram_mmio->responseParams == + ipa3_uc_mhi_ctx->expected_responseParams) { + *uc_status = 0; + return 0; + } + return -EINVAL; +} + +static void ipa3_uc_mhi_event_hdlr(struct IpaHwSharedMemCommonMapping_t + *uc_sram_mmio) +{ + if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR) { + union IpaHwMhiChannelErrorEventData_t evt; + + IPAERR("Channel error\n"); + evt.raw32b = uc_sram_mmio->eventParams; + IPAERR("errorType=%d channelHandle=%d reserved=%d\n", + evt.params.errorType, evt.params.channelHandle, + evt.params.reserved); + } else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST) { + union IpaHwMhiChannelWakeupEventData_t evt; + + IPADBG("WakeUp channel request\n"); + evt.raw32b = uc_sram_mmio->eventParams; + IPADBG("channelHandle=%d reserved=%d\n", + evt.params.channelHandle, evt.params.reserved); + ipa3_uc_mhi_ctx->wakeup_request_cb(); + } +} + +static void ipa3_uc_mhi_event_log_info_hdlr( + struct IpaHwEventLogInfoData_t *uc_event_top_mmio) +{ + struct Ipa3HwEventInfoData_t *evt_info_ptr; + u32 size; + + if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_MHI)) == 0) { + IPAERR("MHI feature missing 0x%x\n", + uc_event_top_mmio->featureMask); + return; + } + + evt_info_ptr = &uc_event_top_mmio->statsInfo; + size = evt_info_ptr->featureInfo[IPA_HW_FEATURE_MHI].params.size; + if (size != sizeof(struct IpaHwStatsMhiInfoData_t)) { + IPAERR("mhi stats sz invalid exp=%zu is=%u\n", + sizeof(struct IpaHwStatsMhiInfoData_t), + size); + return; + } + + ipa3_uc_mhi_ctx->mhi_uc_stats_ofst = + evt_info_ptr->baseAddrOffset + + evt_info_ptr->featureInfo[IPA_HW_FEATURE_MHI].params.offset; + IPAERR("MHI stats ofst=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_ofst); + if (ipa3_uc_mhi_ctx->mhi_uc_stats_ofst + + sizeof(struct IpaHwStatsMhiInfoData_t) >= + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) + + ipa3_ctx->smem_sz) { + IPAERR("uc_mhi_stats 0x%x outside SRAM\n", + ipa3_uc_mhi_ctx->mhi_uc_stats_ofst); + return; + } + + ipa3_uc_mhi_ctx->mhi_uc_stats_mmio = + ioremap(ipa3_ctx->ipa_wrapper_base + + ipa3_uc_mhi_ctx->mhi_uc_stats_ofst, + sizeof(struct IpaHwStatsMhiInfoData_t)); + if (!ipa3_uc_mhi_ctx->mhi_uc_stats_mmio) { + IPAERR("fail to ioremap uc mhi stats\n"); + return; + } +} + +int ipa3_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void)) +{ + struct ipa3_uc_hdlrs hdlrs; + + if (ipa3_uc_mhi_ctx) { + IPAERR("Already initialized\n"); + return -EFAULT; + } + + ipa3_uc_mhi_ctx = kzalloc(sizeof(*ipa3_uc_mhi_ctx), GFP_KERNEL); + if (!ipa3_uc_mhi_ctx) { + IPAERR("no mem\n"); + return -ENOMEM; + } + + ipa3_uc_mhi_ctx->ready_cb = ready_cb; + ipa3_uc_mhi_ctx->wakeup_request_cb = wakeup_request_cb; + + memset(&hdlrs, 0, sizeof(hdlrs)); + hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_mhi_ctx->ready_cb; + hdlrs.ipa3_uc_response_hdlr = ipa3_uc_mhi_response_hdlr; + hdlrs.ipa_uc_event_hdlr = ipa3_uc_mhi_event_hdlr; + hdlrs.ipa_uc_event_log_info_hdlr = ipa3_uc_mhi_event_log_info_hdlr; + ipa3_uc_register_handlers(IPA_HW_FEATURE_MHI, &hdlrs); + + IPADBG("Done\n"); + return 0; +} + +void ipa3_uc_mhi_cleanup(void) +{ + struct ipa3_uc_hdlrs null_hdlrs = { 0 }; + + IPADBG("Enter\n"); + + if (!ipa3_uc_mhi_ctx) { + IPAERR("ipa3_uc_mhi_ctx is not initialized\n"); + return; + } + ipa3_uc_register_handlers(IPA_HW_FEATURE_MHI, &null_hdlrs); + kfree(ipa3_uc_mhi_ctx); + ipa3_uc_mhi_ctx = NULL; + + IPADBG("Done\n"); +} + +int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr, + u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx, + u32 first_evt_idx) +{ + int res; + struct ipa_mem_buffer mem; + struct IpaHwMhiInitCmdData_t *init_cmd_data; + struct IpaHwMhiMsiCmdData_t *msi_cmd; + + if (!ipa3_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + res = ipa3_uc_update_hw_flags(0); + if (res) { + IPAERR("ipa3_uc_update_hw_flags failed %d\n", res); + goto disable_clks; + } + + mem.size = sizeof(*init_cmd_data); + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + res = -ENOMEM; + goto disable_clks; + } + memset(mem.base, 0, mem.size); + init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base; + init_cmd_data->msiAddress = msi->addr_low; + init_cmd_data->mmioBaseAddress = mmio_addr; + init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr; + init_cmd_data->deviceMhiDataBaseAddress = host_data_addr; + init_cmd_data->firstChannelIndex = first_ch_idx; + init_cmd_data->firstEventRingIndex = first_evt_idx; + res = ipa3_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0, + false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, + mem.phys_base); + goto disable_clks; + } + + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + + mem.size = sizeof(*msi_cmd); + mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + res = -ENOMEM; + goto disable_clks; + } + + msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base; + msi_cmd->msiAddress_hi = msi->addr_hi; + msi_cmd->msiAddress_low = msi->addr_low; + msi_cmd->msiData = msi->data; + msi_cmd->msiMask = msi->mask; + res = ipa3_uc_send_cmd((u32)mem.phys_base, + IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, + mem.phys_base); + goto disable_clks; + } + + dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base); + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; + +} + +int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle, + int contexArrayIndex, int channelDirection) + +{ + int res; + union IpaHwMhiInitChannelCmdData_t init_cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + + if (!ipa3_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + if (ipa_ep_idx < 0 || ipa_ep_idx >= ipa3_ctx->ipa_num_pipes) { + IPAERR("Invalid ipa_ep_idx.\n"); + return -EINVAL; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; + uc_rsp.params.channelHandle = channelHandle; + ipa3_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&init_cmd, 0, sizeof(init_cmd)); + init_cmd.params.channelHandle = channelHandle; + init_cmd.params.contexArrayIndex = contexArrayIndex; + init_cmd.params.bamPipeId = ipa_ep_idx; + init_cmd.params.channelDirection = channelDirection; + + res = ipa3_uc_send_cmd(init_cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL, 0, false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + + +int ipa3_uc_mhi_reset_channel(int channelHandle) +{ + union IpaHwMhiChangeChannelStateCmdData_t cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + int res; + + if (!ipa3_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE; + uc_rsp.params.channelHandle = channelHandle; + ipa3_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_DISABLE; + cmd.params.channelHandle = channelHandle; + res = ipa3_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa3_uc_mhi_suspend_channel(int channelHandle) +{ + union IpaHwMhiChangeChannelStateCmdData_t cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + int res; + + if (!ipa3_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND; + uc_rsp.params.channelHandle = channelHandle; + ipa3_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_SUSPEND; + cmd.params.channelHandle = channelHandle; + res = ipa3_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected) +{ + union IpaHwMhiChangeChannelStateCmdData_t cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + int res; + + if (!ipa3_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; + uc_rsp.params.channelHandle = channelHandle; + ipa3_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_RUN; + cmd.params.channelHandle = channelHandle; + cmd.params.LPTransitionRejected = LPTransitionRejected; + res = ipa3_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa3_uc_mhi_stop_event_update_channel(int channelHandle) +{ + union IpaHwMhiStopEventUpdateData_t cmd; + int res; + + if (!ipa3_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.channelHandle = channelHandle; + + ipa3_uc_mhi_ctx->expected_responseOp = + IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE; + ipa3_uc_mhi_ctx->expected_responseParams = cmd.raw32b; + + res = ipa3_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE, 0, false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd) +{ + int res; + + if (!ipa3_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n", + cmd->params.isDlUlSyncEnabled, cmd->params.UlAccmVal); + IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n", + cmd->params.ulMsiEventThreshold, + cmd->params.dlMsiEventThreshold); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + res = ipa3_uc_send_cmd(cmd->raw32b, + IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ); + if (res) { + IPAERR("ipa3_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa3_uc_mhi_print_stats(char *dbg_buff, int size) +{ + int nBytes = 0; + int i; + + if (!ipa3_uc_mhi_ctx->mhi_uc_stats_mmio) { + IPAERR("MHI uc stats is not valid\n"); + return 0; + } + + nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, + "Common Stats:\n"); + PRINT_COMMON_STATS(numULDLSync); + PRINT_COMMON_STATS(numULTimerExpired); + PRINT_COMMON_STATS(numChEvCtxWpRead); + + for (i = 0; i < IPA_HW_MAX_NUMBER_OF_CHANNELS; i++) { + nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, + "Channel %d Stats:\n", i); + PRINT_CHANNEL_STATS(i, doorbellInt); + PRINT_CHANNEL_STATS(i, reProccesed); + PRINT_CHANNEL_STATS(i, bamFifoFull); + PRINT_CHANNEL_STATS(i, bamFifoEmpty); + PRINT_CHANNEL_STATS(i, bamFifoUsageHigh); + PRINT_CHANNEL_STATS(i, bamFifoUsageLow); + PRINT_CHANNEL_STATS(i, bamInt); + PRINT_CHANNEL_STATS(i, ringFull); + PRINT_CHANNEL_STATS(i, ringEmpty); + PRINT_CHANNEL_STATS(i, ringUsageHigh); + PRINT_CHANNEL_STATS(i, ringUsageLow); + PRINT_CHANNEL_STATS(i, delayedMsi); + PRINT_CHANNEL_STATS(i, immediateMsi); + PRINT_CHANNEL_STATS(i, thresholdMsi); + PRINT_CHANNEL_STATS(i, numSuspend); + PRINT_CHANNEL_STATS(i, numResume); + PRINT_CHANNEL_STATS(i, num_OOB); + PRINT_CHANNEL_STATS(i, num_OOB_timer_expiry); + PRINT_CHANNEL_STATS(i, num_OOB_moderation_timer_start); + PRINT_CHANNEL_STATS(i, num_db_mode_evt); + } + + return nBytes; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c new file mode 100644 index 000000000000..51cf8270dc21 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c @@ -0,0 +1,443 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "ipa_i.h" + +#define IPA_UC_NTN_DB_PA_TX 0x79620DC +#define IPA_UC_NTN_DB_PA_RX 0x79620D8 + +static void ipa3_uc_ntn_event_handler(struct IpaHwSharedMemCommonMapping_t + *uc_sram_mmio) + +{ + union Ipa3HwNTNErrorEventData_t ntn_evt; + + if (uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_NTN_ERROR) { + ntn_evt.raw32b = uc_sram_mmio->eventParams; + IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n", + ntn_evt.params.ntn_error_type, + ntn_evt.params.ipa_pipe_number, + ntn_evt.params.ntn_ch_err_type); + } +} + +static void ipa3_uc_ntn_event_log_info_handler( +struct IpaHwEventLogInfoData_t *uc_event_top_mmio) +{ + struct Ipa3HwEventInfoData_t *statsPtr = &uc_event_top_mmio->statsInfo; + + if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) { + IPAERR("NTN feature missing 0x%x\n", + uc_event_top_mmio->featureMask); + return; + } + + if (statsPtr->featureInfo[IPA_HW_FEATURE_NTN].params.size != + sizeof(struct Ipa3HwStatsNTNInfoData_t)) { + IPAERR("NTN stats sz invalid exp=%zu is=%u\n", + sizeof(struct Ipa3HwStatsNTNInfoData_t), + statsPtr->featureInfo[IPA_HW_FEATURE_NTN].params.size); + return; + } + + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = + uc_event_top_mmio->statsInfo.baseAddrOffset + + statsPtr->featureInfo[IPA_HW_FEATURE_NTN].params.offset; + IPAERR("NTN stats ofst=0x%x\n", ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst); + if (ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst + + sizeof(struct Ipa3HwStatsNTNInfoData_t) >= + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) + + ipa3_ctx->smem_sz) { + IPAERR("uc_ntn_stats 0x%x outside SRAM\n", + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst); + return; + } + + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio = + ioremap(ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst, + sizeof(struct Ipa3HwStatsNTNInfoData_t)); + if (!ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) { + IPAERR("fail to ioremap uc ntn stats\n"); + return; + } +} + +/** + * ipa2_get_wdi_stats() - Query WDI statistics from uc + * @stats: [inout] stats blob from client populated by driver + * + * Returns: 0 on success, negative on failure + * + * @note Cannot be called from atomic context + * + */ +int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats) +{ +#define TX_STATS(y) stats->tx_ch_stats[0].y = \ + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y +#define RX_STATS(y) stats->rx_ch_stats[0].y = \ + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y + + if (unlikely(!ipa3_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (!stats || !ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) { + IPAERR("bad parms stats=%pK ntn_stats=%pK\n", + stats, + ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio); + return -EINVAL; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + TX_STATS(num_pkts_processed); + TX_STATS(ring_stats.ringFull); + TX_STATS(ring_stats.ringEmpty); + TX_STATS(ring_stats.ringUsageHigh); + TX_STATS(ring_stats.ringUsageLow); + TX_STATS(ring_stats.RingUtilCount); + TX_STATS(gsi_stats.bamFifoFull); + TX_STATS(gsi_stats.bamFifoEmpty); + TX_STATS(gsi_stats.bamFifoUsageHigh); + TX_STATS(gsi_stats.bamFifoUsageLow); + TX_STATS(gsi_stats.bamUtilCount); + TX_STATS(num_db); + TX_STATS(num_qmb_int_handled); + TX_STATS(ipa_pipe_number); + + RX_STATS(num_pkts_processed); + RX_STATS(ring_stats.ringFull); + RX_STATS(ring_stats.ringEmpty); + RX_STATS(ring_stats.ringUsageHigh); + RX_STATS(ring_stats.ringUsageLow); + RX_STATS(ring_stats.RingUtilCount); + RX_STATS(gsi_stats.bamFifoFull); + RX_STATS(gsi_stats.bamFifoEmpty); + RX_STATS(gsi_stats.bamFifoUsageHigh); + RX_STATS(gsi_stats.bamFifoUsageLow); + RX_STATS(gsi_stats.bamUtilCount); + RX_STATS(num_db); + RX_STATS(num_qmb_int_handled); + RX_STATS(ipa_pipe_number); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + + +int ipa3_ntn_uc_reg_rdyCB(void (*ipa_ready_cb)(void *), void *user_data) +{ + int ret; + + if (!ipa3_ctx) { + IPAERR("IPA ctx is null\n"); + return -ENXIO; + } + + ret = ipa3_uc_state_check(); + if (ret) { + ipa3_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb; + ipa3_ctx->uc_ntn_ctx.priv = user_data; + return 0; + } + + return -EEXIST; +} + +void ipa3_ntn_uc_dereg_rdyCB(void) +{ + ipa3_ctx->uc_ntn_ctx.uc_ready_cb = NULL; + ipa3_ctx->uc_ntn_ctx.priv = NULL; +} + +static void ipa3_uc_ntn_loaded_handler(void) +{ + if (!ipa3_ctx) { + IPAERR("IPA ctx is null\n"); + return; + } + + if (ipa3_ctx->uc_ntn_ctx.uc_ready_cb) { + ipa3_ctx->uc_ntn_ctx.uc_ready_cb( + ipa3_ctx->uc_ntn_ctx.priv); + + ipa3_ctx->uc_ntn_ctx.uc_ready_cb = + NULL; + ipa3_ctx->uc_ntn_ctx.priv = NULL; + } +} + +int ipa3_ntn_init(void) +{ + struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 }; + + uc_ntn_cbs.ipa_uc_event_hdlr = ipa3_uc_ntn_event_handler; + uc_ntn_cbs.ipa_uc_event_log_info_hdlr = + ipa3_uc_ntn_event_log_info_handler; + uc_ntn_cbs.ipa_uc_loaded_hdlr = + ipa3_uc_ntn_loaded_handler; + + ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs); + + return 0; +} + +static int ipa3_uc_send_ntn_setup_pipe_cmd( + struct ipa_ntn_setup_info *ntn_info, u8 dir) +{ + int ipa_ep_idx; + int result = 0; + struct ipa_mem_buffer cmd; + struct Ipa3HwNtnSetUpCmdData_t *Ntn_params; + struct IpaHwOffloadSetUpCmdData_t *cmd_data; + + if (ntn_info == NULL) { + IPAERR("invalid input\n"); + return -EINVAL; + } + + ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client); + if (ipa_ep_idx == -1) { + IPAERR("fail to get ep idx.\n"); + return -EFAULT; + } + + IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx); + + IPADBG("ring_base_pa = 0x%pa\n", + &ntn_info->ring_base_pa); + IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size); + IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa); + IPADBG("num_buffers = %d\n", ntn_info->num_buffers); + IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size); + IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa); + + cmd.size = sizeof(*cmd_data); + cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -ENOMEM; + } + + cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base; + cmd_data->protocol = IPA_HW_FEATURE_NTN; + + Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params; + Ntn_params->ring_base_pa = ntn_info->ring_base_pa; + Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa; + Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size; + Ntn_params->num_buffers = ntn_info->num_buffers; + Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa; + Ntn_params->data_buff_size = ntn_info->data_buff_size; + Ntn_params->ipa_pipe_number = ipa_ep_idx; + Ntn_params->dir = dir; + + result = ipa3_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) + result = -EFAULT; + + dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + return result; +} + +/** + * ipa3_setup_uc_ntn_pipes() - setup uc offload pipes + */ +int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp) +{ + struct ipa3_ep_context *ep_ul; + struct ipa3_ep_context *ep_dl; + int ipa_ep_idx_ul; + int ipa_ep_idx_dl; + int result = 0; + + if (in == NULL) { + IPAERR("invalid input\n"); + return -EINVAL; + } + + ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client); + ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client); + if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) { + IPAERR("fail to alloc EP.\n"); + return -EFAULT; + } + + ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul]; + ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl]; + + if (ep_ul->valid || ep_dl->valid) { + IPAERR("EP already allocated ul:%d dl:%d\n", + ep_ul->valid, ep_dl->valid); + return -EFAULT; + } + + memset(ep_ul, 0, offsetof(struct ipa3_ep_context, sys)); + memset(ep_dl, 0, offsetof(struct ipa3_ep_context, sys)); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + /* setup ul ep cfg */ + ep_ul->valid = 1; + ep_ul->client = in->ul.client; + ep_ul->client_notify = notify; + ep_ul->priv = priv; + + memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg)); + ep_ul->cfg.nat.nat_en = IPA_SRC_NAT; + ep_ul->cfg.hdr.hdr_len = hdr_len; + ep_ul->cfg.mode.mode = IPA_BASIC; + + if (ipa3_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) { + IPAERR("fail to setup ul pipe cfg\n"); + result = -EFAULT; + goto fail; + } + + if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) { + IPAERR("fail to send cmd to uc for ul pipe\n"); + result = -EFAULT; + goto fail; + } + ipa3_install_dflt_flt_rules(ipa_ep_idx_ul); + outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX; + ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED; + IPADBG("client %d (ep: %d) connected\n", in->ul.client, + ipa_ep_idx_ul); + + /* setup dl ep cfg */ + ep_dl->valid = 1; + ep_dl->client = in->dl.client; + memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg)); + ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT; + ep_dl->cfg.hdr.hdr_len = hdr_len; + ep_dl->cfg.mode.mode = IPA_BASIC; + + if (ipa3_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) { + IPAERR("fail to setup dl pipe cfg\n"); + result = -EFAULT; + goto fail; + } + + if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) { + IPAERR("fail to send cmd to uc for dl pipe\n"); + result = -EFAULT; + goto fail; + } + outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX; + ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED; + + result = ipa3_enable_data_path(ipa_ep_idx_dl); + if (result) { + IPAERR("Enable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx_dl); + result = -EFAULT; + goto fail; + } + IPADBG("client %d (ep: %d) connected\n", in->dl.client, + ipa_ep_idx_dl); + +fail: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +/** + * ipa3_tear_down_uc_offload_pipes() - tear down uc offload pipes + */ + +int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, + int ipa_ep_idx_dl) +{ + struct ipa_mem_buffer cmd; + struct ipa3_ep_context *ep_ul, *ep_dl; + struct IpaHwOffloadCommonChCmdData_t *cmd_data; + union Ipa3HwNtnCommonChCmdData_t *tear; + int result = 0; + + IPADBG("ep_ul = %d\n", ipa_ep_idx_ul); + IPADBG("ep_dl = %d\n", ipa_ep_idx_dl); + + ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul]; + ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl]; + + if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED || + ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) { + IPAERR("channel bad state: ul %d dl %d\n", + ep_ul->uc_offload_state, ep_dl->uc_offload_state); + return -EFAULT; + } + + cmd.size = sizeof(*cmd_data); + cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -ENOMEM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base; + cmd_data->protocol = IPA_HW_FEATURE_NTN; + tear = &cmd_data->CommonCh_params.NtnCommonCh_params; + + /* teardown the DL pipe */ + ipa3_disable_data_path(ipa_ep_idx_dl); + /* + * Reset ep before sending cmd otherwise disconnect + * during data transfer will result into + * enormous suspend interrupts + */ + memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context)); + IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl); + tear->params.ipa_pipe_number = ipa_ep_idx_dl; + result = ipa3_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) { + IPAERR("fail to tear down dl pipe\n"); + result = -EFAULT; + goto fail; + } + + /* teardown the UL pipe */ + tear->params.ipa_pipe_number = ipa_ep_idx_ul; + result = ipa3_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) { + IPAERR("fail to tear down ul pipe\n"); + result = -EFAULT; + goto fail; + } + ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul); + memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context)); + IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul); + +fail: + dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h new file mode 100644 index 000000000000..cd57e785ac3c --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h @@ -0,0 +1,553 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_UC_OFFLOAD_I_H_ +#define _IPA_UC_OFFLOAD_I_H_ + +#include +#include "ipa_i.h" + +/* + * Neutrino protocol related data structures + */ + +#define IPA_UC_MAX_NTN_TX_CHANNELS 1 +#define IPA_UC_MAX_NTN_RX_CHANNELS 1 + +#define IPA_NTN_TX_DIR 1 +#define IPA_NTN_RX_DIR 2 + +/** + * @brief Enum value determined based on the feature it + * corresponds to + * +----------------+----------------+ + * | 3 bits | 5 bits | + * +----------------+----------------+ + * | HW_FEATURE | OPCODE | + * +----------------+----------------+ + * + */ +#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode) +#define EXTRACT_UC_FEATURE(value) (value >> 5) + +#define IPA_HW_NUM_FEATURES 0x8 + +/** + * enum ipa3_hw_features - Values that represent the features supported + * in IPA HW + * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW + * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW + * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse + * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW + * @IPA_HW_FEATURE_ZIP: Feature related to CMP/DCMP operation in IPA HW + * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW + * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW + */ +enum ipa3_hw_features { + IPA_HW_FEATURE_COMMON = 0x0, + IPA_HW_FEATURE_MHI = 0x1, + IPA_HW_FEATURE_POWER_COLLAPSE = 0x2, + IPA_HW_FEATURE_WDI = 0x3, + IPA_HW_FEATURE_ZIP = 0x4, + IPA_HW_FEATURE_NTN = 0x5, + IPA_HW_FEATURE_OFFLOAD = 0x6, + IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES +}; + +/** + * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU. + * @IPA_HW_2_CPU_EVENT_NO_OP : No event present + * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the + * device + * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information + */ +enum ipa3_hw_2_cpu_events { + IPA_HW_2_CPU_EVENT_NO_OP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0), + IPA_HW_2_CPU_EVENT_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_HW_2_CPU_EVENT_LOG_INFO = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), +}; + +/** + * enum ipa3_hw_errors - Common error types. + * @IPA_HW_ERROR_NONE : No error persists + * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell + * @IPA_HW_DMA_ERROR : Unexpected DMA error + * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset. + * @IPA_HW_INVALID_OPCODE : Invalid opcode sent + * @IPA_HW_INVALID_PARAMS : Invalid params for the requested command + * @IPA_HW_GSI_CH_NOT_EMPTY_FAILURE : GSI channel emptiness validation failed + */ +enum ipa3_hw_errors { + IPA_HW_ERROR_NONE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0), + IPA_HW_INVALID_DOORBELL_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_HW_DMA_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), + IPA_HW_FATAL_SYSTEM_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3), + IPA_HW_INVALID_OPCODE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4), + IPA_HW_INVALID_PARAMS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5), + IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6), + IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7), + IPA_HW_GSI_CH_NOT_EMPTY_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8) +}; + +/** + * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common + * section in 128B shared memory located in offset zero of SW Partition in IPA + * SRAM. + * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS + * @cmdParams : CPU->HW command parameter lower 32bit. + * @cmdParams_hi : CPU->HW command parameter higher 32bit. + * of parameters (immediate parameters) and point on structure in system memory + * (in such case the address must be accessible for HW) + * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES + * @responseParams : HW->CPU response parameter. The parameter filed can hold 32 + * bits of parameters (immediate parameters) and point on structure in system + * memory + * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS + * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 + * bits of parameters (immediate parameters) and point on + * structure in system memory + * @firstErrorAddress : Contains the address of first error-source on SNOC + * @hwState : State of HW. The state carries information regarding the + * error type. + * @warningCounter : The warnings counter. The counter carries information + * regarding non fatal errors in HW + * @interfaceVersionCommon : The Common interface version as reported by HW + * + * The shared memory is used for communication between IPA HW and CPU. + */ +struct IpaHwSharedMemCommonMapping_t { + u8 cmdOp; + u8 reserved_01; + u16 reserved_03_02; + u32 cmdParams; + u32 cmdParams_hi; + u8 responseOp; + u8 reserved_0D; + u16 reserved_0F_0E; + u32 responseParams; + u8 eventOp; + u8 reserved_15; + u16 reserved_17_16; + u32 eventParams; + u32 firstErrorAddress; + u8 hwState; + u8 warningCounter; + u16 reserved_23_22; + u16 interfaceVersionCommon; + u16 reserved_27_26; +} __packed; + +/** + * union Ipa3HwFeatureInfoData_t - parameters for stats/config blob + * + * @offset : Location of a feature within the EventInfoData + * @size : Size of the feature + */ +union Ipa3HwFeatureInfoData_t { + struct IpaHwFeatureInfoParams_t { + u32 offset:16; + u32 size:16; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwErrorEventData_t - HW->CPU Common Events + * @errorType : Entered when a system error is detected by the HW. Type of + * error is specified by IPA_HW_ERRORS + * @reserved : Reserved + */ +union IpaHwErrorEventData_t { + struct IpaHwErrorEventParams_t { + u32 errorType:8; + u32 reserved:24; + } __packed params; + u32 raw32b; +} __packed; + +/** + * struct Ipa3HwEventInfoData_t - Structure holding the parameters for + * statistics and config info + * + * @baseAddrOffset : Base Address Offset of the statistics or config + * structure from IPA_WRAPPER_BASE + * @Ipa3HwFeatureInfoData_t : Location and size of each feature within + * the statistics or config structure + * + * @note Information about each feature in the featureInfo[] + * array is populated at predefined indices per the IPA_HW_FEATURES + * enum definition + */ +struct Ipa3HwEventInfoData_t { + u32 baseAddrOffset; + union Ipa3HwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES]; +} __packed; + +/** + * struct IpaHwEventLogInfoData_t - Structure holding the parameters for + * IPA_HW_2_CPU_EVENT_LOG_INFO Event + * + * @featureMask : Mask indicating the features enabled in HW. + * Refer IPA_HW_FEATURE_MASK + * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event + * Log Buffer structure + * @statsInfo : Statistics related information + * @configInfo : Configuration related information + * + * @note The offset location of this structure from IPA_WRAPPER_BASE + * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO + * Event + */ +struct IpaHwEventLogInfoData_t { + u32 featureMask; + u32 circBuffBaseAddrOffset; + struct Ipa3HwEventInfoData_t statsInfo; + struct Ipa3HwEventInfoData_t configInfo; + +} __packed; + +/** + * struct ipa3_uc_ntn_ctx + * @ntn_uc_stats_ofst: Neutrino stats offset + * @ntn_uc_stats_mmio: Neutrino stats + * @priv: private data of client + * @uc_ready_cb: uc Ready cb + */ +struct ipa3_uc_ntn_ctx { + u32 ntn_uc_stats_ofst; + struct Ipa3HwStatsNTNInfoData_t *ntn_uc_stats_mmio; + void *priv; + ipa_uc_ready_cb uc_ready_cb; +}; + +/** + * enum ipa3_hw_2_cpu_ntn_events - Values that represent HW event + * to be sent to CPU + * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW + * detected an error in NTN + * + */ +enum ipa3_hw_2_cpu_ntn_events { + IPA_HW_2_CPU_EVENT_NTN_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0), +}; + + +/** + * enum ipa3_hw_ntn_errors - NTN specific error types. + * @IPA_HW_NTN_ERROR_NONE : No error persists + * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel + */ +enum ipa3_hw_ntn_errors { + IPA_HW_NTN_ERROR_NONE = 0, + IPA_HW_NTN_CHANNEL_ERROR = 1 +}; + +/** + * enum ipa3_hw_ntn_channel_states - Values that represent NTN + * channel state machine. + * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is + * initialized but disabled + * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running. + * Entered after SET_UP_COMMAND is processed successfully + * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state + * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not + * be in use in operational scenario + * + * These states apply to both Tx and Rx paths. These do not reflect the + * sub-state the state machine may be in. + */ +enum ipa3_hw_ntn_channel_states { + IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1, + IPA_HW_NTN_CHANNEL_STATE_RUNNING = 2, + IPA_HW_NTN_CHANNEL_STATE_ERROR = 3, + IPA_HW_NTN_CHANNEL_STATE_INVALID = 0xFF +}; + +/** + * enum ipa3_hw_ntn_channel_errors - List of NTN Channel error + * types. This is present in the event param + * @IPA_HW_NTN_CH_ERR_NONE: No error persists + * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine + * transition + * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating + * num RE to bring + * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update + * failed in Rx ring + * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine + * transition + * @IPA_HW_NTN_RX_CACHE_NON_EMPTY: + * @IPA_HW_NTN_CH_ERR_RESERVED: + * + * These states apply to both Tx and Rx paths. These do not + * reflect the sub-state the state machine may be in. + */ +enum ipa3_hw_ntn_channel_errors { + IPA_HW_NTN_CH_ERR_NONE = 0, + IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1, + IPA_HW_NTN_TX_FSM_ERROR = 2, + IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL = 3, + IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4, + IPA_HW_NTN_RX_FSM_ERROR = 5, + IPA_HW_NTN_RX_CACHE_NON_EMPTY = 6, + IPA_HW_NTN_CH_ERR_RESERVED = 0xFF +}; + + +/** + * struct Ipa3HwNtnSetUpCmdData_t - Ntn setup command data + * @ring_base_pa: physical address of the base of the Tx/Rx NTN + * ring + * @buff_pool_base_pa: physical address of the base of the Tx/Rx + * buffer pool + * @ntn_ring_size: size of the Tx/Rx NTN ring + * @num_buffers: Rx/tx buffer pool size + * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN + * Ring's tail pointer + * @ipa_pipe_number: IPA pipe number that has to be used for the + * Tx/Rx path + * @dir: Tx/Rx Direction + * @data_buff_size: size of the each data buffer allocated in + * DDR + */ +struct Ipa3HwNtnSetUpCmdData_t { + u32 ring_base_pa; + u32 buff_pool_base_pa; + u16 ntn_ring_size; + u16 num_buffers; + u32 ntn_reg_base_ptr_pa; + u8 ipa_pipe_number; + u8 dir; + u16 data_buff_size; + +} __packed; + +/** + * struct Ipa3HwNtnCommonChCmdData_t - Structure holding the + * parameters for Ntn Tear down command data params + * + *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe + */ +union Ipa3HwNtnCommonChCmdData_t { + struct IpaHwNtnCommonChCmdParams_t { + u32 ipa_pipe_number :8; + u32 reserved :24; + } __packed params; + uint32_t raw32b; +} __packed; + + +/** + * struct Ipa3HwNTNErrorEventData_t - Structure holding the + * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed + * as immediate params in the shared memory + * + *@ntn_error_type: type of NTN error (ipa3_hw_ntn_errors) + *@ipa_pipe_number: IPA pipe number on which error has happened + * Applicable only if error type indicates channel error + *@ntn_ch_err_type: Information about the channel error (if + * available) + */ +union Ipa3HwNTNErrorEventData_t { + struct IpaHwNTNErrorEventParams_t { + u32 ntn_error_type :8; + u32 reserved :8; + u32 ipa_pipe_number :8; + u32 ntn_ch_err_type :8; + } __packed params; + uint32_t raw32b; +} __packed; + +/** + * struct NTN3RxInfoData_t - NTN Structure holding the Rx pipe + * information + * + *@num_pkts_processed: Number of packets processed - cumulative + * + *@ring_stats: + *@gsi_stats: + *@num_db: Number of times the doorbell was rung + *@num_qmb_int_handled: Number of QMB interrupts handled + *@ipa_pipe_number: The IPA Rx/Tx pipe number. + */ +struct NTN3RxInfoData_t { + u32 num_pkts_processed; + struct IpaHwRingStats_t ring_stats; + struct IpaHwBamStats_t gsi_stats; + u32 num_db; + u32 num_qmb_int_handled; + u32 ipa_pipe_number; +} __packed; + + +/** + * struct NTN3TxInfoData_t - Structure holding the NTN Tx channel + * Ensure that this is always word aligned + * + *@num_pkts_processed: Number of packets processed - cumulative + *@tail_ptr_val: Latest value of doorbell written to copy engine + *@num_db_fired: Number of DB from uC FW to Copy engine + * + *@tx_comp_ring_stats: + *@bam_stats: + *@num_db: Number of times the doorbell was rung + *@num_qmb_int_handled: Number of QMB interrupts handled + */ +struct NTN3TxInfoData_t { + u32 num_pkts_processed; + struct IpaHwRingStats_t ring_stats; + struct IpaHwBamStats_t gsi_stats; + u32 num_db; + u32 num_qmb_int_handled; + u32 ipa_pipe_number; +} __packed; + + +/** + * struct Ipa3HwStatsNTNInfoData_t - Structure holding the NTN Tx + * channel Ensure that this is always word aligned + * + */ +struct Ipa3HwStatsNTNInfoData_t { + struct NTN3RxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS]; + struct NTN3TxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS]; +} __packed; + + +/* + * uC offload related data structures + */ +#define IPA_UC_OFFLOAD_CONNECTED BIT(0) +#define IPA_UC_OFFLOAD_ENABLED BIT(1) +#define IPA_UC_OFFLOAD_RESUMED BIT(2) + +/** + * enum ipa_cpu_2_hw_offload_commands - Values that represent + * the offload commands from CPU + * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up + * Offload protocol's Tx/Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_RX_SET_UP : Command to tear down + * Offload protocol's Tx/ Rx Path + */ +enum ipa_cpu_2_hw_offload_commands { + IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1), + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN, +}; + + +/** + * enum ipa3_hw_offload_channel_states - Values that represent + * offload channel state machine. + * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is + * initialized but disabled + * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running. + * Entered after SET_UP_COMMAND is processed successfully + * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state + * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not + * be in use in operational scenario + * + * These states apply to both Tx and Rx paths. These do not + * reflect the sub-state the state machine may be in + */ +enum ipa3_hw_offload_channel_states { + IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1, + IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING = 2, + IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR = 3, + IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID = 0xFF +}; + + +/** + * enum ipa3_hw_2_cpu_cmd_resp_status - Values that represent + * offload related command response status to be sent to CPU. + */ +enum ipa3_hw_2_cpu_offload_cmd_resp_status { + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0), + IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1), + IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2), + IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3), + IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4), + IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5), + IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6), + IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7), + IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8), + IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9), + IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10), + IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11), + IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12), + IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13), + IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14), +}; + +/** + * struct IpaHwSetUpCmd - + * + * + */ +union IpaHwSetUpCmd { + struct Ipa3HwNtnSetUpCmdData_t NtnSetupCh_params; +} __packed; + +/** + * struct IpaHwOffloadSetUpCmdData_t - + * + * + */ +struct IpaHwOffloadSetUpCmdData_t { + u8 protocol; + union IpaHwSetUpCmd SetupCh_params; +} __packed; + +/** + * struct IpaHwCommonChCmd - Structure holding the parameters + * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN + * + * + */ +union IpaHwCommonChCmd { + union Ipa3HwNtnCommonChCmdData_t NtnCommonCh_params; +} __packed; + +struct IpaHwOffloadCommonChCmdData_t { + u8 protocol; + union IpaHwCommonChCmd CommonCh_params; +} __packed; + +#endif /* _IPA_UC_OFFLOAD_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c new file mode 100644 index 000000000000..9fa5d2723fb9 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c @@ -0,0 +1,1871 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "ipa_i.h" +#include +#include +#include +#include "ipa_qmi_service.h" + +#define IPA_HOLB_TMR_DIS 0x0 + +#define IPA_HW_INTERFACE_WDI_VERSION 0x0001 +#define IPA_HW_WDI_RX_MBOX_START_INDEX 48 +#define IPA_HW_WDI_TX_MBOX_START_INDEX 50 +#define IPA_WDI_RING_ALIGNMENT 8 + +#define IPA_WDI_CONNECTED BIT(0) +#define IPA_WDI_ENABLED BIT(1) +#define IPA_WDI_RESUMED BIT(2) +#define IPA_UC_POLL_SLEEP_USEC 100 + +#define IPA_WDI_RX_RING_RES 0 +#define IPA_WDI_RX_RING_RP_RES 1 +#define IPA_WDI_RX_COMP_RING_RES 2 +#define IPA_WDI_RX_COMP_RING_WP_RES 3 +#define IPA_WDI_TX_RING_RES 4 +#define IPA_WDI_CE_RING_RES 5 +#define IPA_WDI_CE_DB_RES 6 +#define IPA_WDI_MAX_RES 7 + +struct ipa_wdi_res { + struct ipa_wdi_buffer_info *res; + unsigned int nents; + bool valid; +}; + +static struct ipa_wdi_res wdi_res[IPA_WDI_MAX_RES]; + +static void ipa3_uc_wdi_loaded_handler(void); + +/** + * enum ipa_hw_2_cpu_wdi_events - Values that represent HW event to be sent to + * CPU. + * @IPA_HW_2_CPU_EVENT_WDI_ERROR : Event to specify that HW detected an error + * in WDI + */ +enum ipa_hw_2_cpu_wdi_events { + IPA_HW_2_CPU_EVENT_WDI_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0), +}; + +/** + * enum ipa_hw_wdi_channel_states - Values that represent WDI channel state + * machine. + * @IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED : Channel is initialized but + * disabled + * @IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND : Channel is enabled but in + * suspended state + * @IPA_HW_WDI_CHANNEL_STATE_RUNNING : Channel is running. Entered after + * SET_UP_COMMAND is processed successfully + * @IPA_HW_WDI_CHANNEL_STATE_ERROR : Channel is in error state + * @IPA_HW_WDI_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use in + * operational scenario + * + * These states apply to both Tx and Rx paths. These do not reflect the + * sub-state the state machine may be in. + */ +enum ipa_hw_wdi_channel_states { + IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED = 1, + IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND = 2, + IPA_HW_WDI_CHANNEL_STATE_RUNNING = 3, + IPA_HW_WDI_CHANNEL_STATE_ERROR = 4, + IPA_HW_WDI_CHANNEL_STATE_INVALID = 0xFF +}; + +/** + * enum ipa3_cpu_2_hw_commands - Values that represent the WDI commands from + * CPU + * @IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : Command to set up WDI Tx Path + * @IPA_CPU_2_HW_CMD_WDI_RX_SET_UP : Command to set up WDI Rx Path + * @IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG : Provide extended config info for Rx path + * @IPA_CPU_2_HW_CMD_WDI_CH_ENABLE : Command to enable a channel + * @IPA_CPU_2_HW_CMD_WDI_CH_DISABLE : Command to disable a channel + * @IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND : Command to suspend a channel + * @IPA_CPU_2_HW_CMD_WDI_CH_RESUME : Command to resume a channel + * @IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN : Command to tear down WDI Tx/ Rx Path + */ +enum ipa_cpu_2_hw_wdi_commands { + IPA_CPU_2_HW_CMD_WDI_TX_SET_UP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0), + IPA_CPU_2_HW_CMD_WDI_RX_SET_UP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1), + IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2), + IPA_CPU_2_HW_CMD_WDI_CH_ENABLE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3), + IPA_CPU_2_HW_CMD_WDI_CH_DISABLE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4), + IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5), + IPA_CPU_2_HW_CMD_WDI_CH_RESUME = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6), + IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7), +}; + +/** + * enum ipa_hw_2_cpu_cmd_resp_status - Values that represent WDI related + * command response status to be sent to CPU. + */ +enum ipa_hw_2_cpu_cmd_resp_status { + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0), + IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1), + IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2), + IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3), + IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4), + IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5), + IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6), + IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7), + IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8), + IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9), + IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10), + IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11), + IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12), + IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13), + IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14), + IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15), + IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16), +}; + +/** + * enum ipa_hw_wdi_errors - WDI specific error types. + * @IPA_HW_WDI_ERROR_NONE : No error persists + * @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel + */ +enum ipa_hw_wdi_errors { + IPA_HW_WDI_ERROR_NONE = 0, + IPA_HW_WDI_CHANNEL_ERROR = 1 +}; + +/** + * enum ipa_hw_wdi_ch_errors = List of WDI Channel error types. This is present + * in the event param. + * @IPA_HW_WDI_CH_ERR_NONE : No error persists + * @IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL : Write pointer update failed in Tx + * Completion ring + * @IPA_HW_WDI_TX_FSM_ERROR : Error in the state machine transition + * @IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL : Error while calculating num RE to bring + * @IPA_HW_WDI_CH_ERR_RESERVED : Reserved - Not available for CPU to use + */ +enum ipa_hw_wdi_ch_errors { + IPA_HW_WDI_CH_ERR_NONE = 0, + IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL = 1, + IPA_HW_WDI_TX_FSM_ERROR = 2, + IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL = 3, + IPA_HW_WDI_CH_ERR_RESERVED = 0xFF +}; + +/** + * struct IpaHwSharedMemWdiMapping_t - Structure referring to the common and + * WDI section of 128B shared memory located in offset zero of SW Partition in + * IPA SRAM. + * + * The shared memory is used for communication between IPA HW and CPU. + */ +struct IpaHwSharedMemWdiMapping_t { + struct IpaHwSharedMemCommonMapping_t common; + u32 reserved_2B_28; + u32 reserved_2F_2C; + u32 reserved_33_30; + u32 reserved_37_34; + u32 reserved_3B_38; + u32 reserved_3F_3C; + u16 interfaceVersionWdi; + u16 reserved_43_42; + u8 wdi_tx_ch_0_state; + u8 wdi_rx_ch_0_state; + u16 reserved_47_46; +} __packed; + +/** + * struct IpaHwWdiTxSetUpCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_TX_SET_UP command. + * @comp_ring_base_pa : This is the physical address of the base of the Tx + * completion ring + * @comp_ring_size : This is the size of the Tx completion ring + * @reserved_comp_ring : Reserved field for expansion of Completion ring params + * @ce_ring_base_pa : This is the physical address of the base of the Copy + * Engine Source Ring + * @ce_ring_size : Copy Engine Ring size + * @reserved_ce_ring : Reserved field for expansion of CE ring params + * @ce_ring_doorbell_pa : This is the physical address of the doorbell that the + * IPA uC has to write into to trigger the copy engine + * @num_tx_buffers : Number of pkt buffers allocated. The size of the CE ring + * and the Tx completion ring has to be atleast ( num_tx_buffers + 1) + * @ipa_pipe_number : This is the IPA pipe number that has to be used for the + * Tx path + * @reserved : Reserved field + * + * Parameters are sent as pointer thus should be reside in address accessible + * to HW + */ +struct IpaHwWdiTxSetUpCmdData_t { + u32 comp_ring_base_pa; + u16 comp_ring_size; + u16 reserved_comp_ring; + u32 ce_ring_base_pa; + u16 ce_ring_size; + u16 reserved_ce_ring; + u32 ce_ring_doorbell_pa; + u16 num_tx_buffers; + u8 ipa_pipe_number; + u8 reserved; +} __packed; + +struct IpaHwWdi2TxSetUpCmdData_t { + u32 comp_ring_base_pa; + u32 comp_ring_base_pa_hi; + u16 comp_ring_size; + u16 reserved_comp_ring; + u32 ce_ring_base_pa; + u32 ce_ring_base_pa_hi; + u16 ce_ring_size; + u16 reserved_ce_ring; + u32 ce_ring_doorbell_pa; + u32 ce_ring_doorbell_pa_hi; + u16 num_tx_buffers; + u8 ipa_pipe_number; + u8 reserved; +} __packed; +/** + * struct IpaHwWdiRxSetUpCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_RX_SET_UP command. + * @rx_ring_base_pa : This is the physical address of the base of the Rx ring + * (containing Rx buffers) + * @rx_ring_size : This is the size of the Rx ring + * @rx_ring_rp_pa : This is the physical address of the location through which + * IPA uc is expected to communicate about the Read pointer into the Rx Ring + * @ipa_pipe_number : This is the IPA pipe number that has to be used for the + * Rx path + * + * Parameters are sent as pointer thus should be reside in address accessible + * to HW + */ +struct IpaHwWdiRxSetUpCmdData_t { + u32 rx_ring_base_pa; + u32 rx_ring_size; + u32 rx_ring_rp_pa; + u8 ipa_pipe_number; +} __packed; + +struct IpaHwWdi2RxSetUpCmdData_t { + u32 rx_ring_base_pa; + u32 rx_ring_base_pa_hi; + u32 rx_ring_size; + u32 rx_ring_rp_pa; + u32 rx_ring_rp_pa_hi; + u32 rx_comp_ring_base_pa; + u32 rx_comp_ring_base_pa_hi; + u32 rx_comp_ring_size; + u32 rx_comp_ring_wp_pa; + u32 rx_comp_ring_wp_pa_hi; + u8 ipa_pipe_number; +} __packed; +/** + * union IpaHwWdiRxExtCfgCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG command. + * @ipa_pipe_number : The IPA pipe number for which this config is passed + * @qmap_id : QMAP ID to be set in the metadata register + * @reserved : Reserved + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwWdiRxExtCfgCmdData_t { + struct IpaHwWdiRxExtCfgCmdParams_t { + u32 ipa_pipe_number:8; + u32 qmap_id:8; + u32 reserved:16; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwWdiCommonChCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN, + * IPA_CPU_2_HW_CMD_WDI_CH_ENABLE, + * IPA_CPU_2_HW_CMD_WDI_CH_DISABLE, + * IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND, + * IPA_CPU_2_HW_CMD_WDI_CH_RESUME command. + * @ipa_pipe_number : The IPA pipe number. This could be Tx or an Rx pipe + * @reserved : Reserved + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwWdiCommonChCmdData_t { + struct IpaHwWdiCommonChCmdParams_t { + u32 ipa_pipe_number:8; + u32 reserved:24; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwWdiErrorEventData_t - parameters for IPA_HW_2_CPU_EVENT_WDI_ERROR + * event. + * @wdi_error_type : The IPA pipe number to be torn down. This could be Tx or + * an Rx pipe + * @reserved : Reserved + * @ipa_pipe_number : IPA pipe number on which error has happened. Applicable + * only if error type indicates channel error + * @wdi_ch_err_type : Information about the channel error (if available) + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwWdiErrorEventData_t { + struct IpaHwWdiErrorEventParams_t { + u32 wdi_error_type:8; + u32 reserved:8; + u32 ipa_pipe_number:8; + u32 wdi_ch_err_type:8; + } __packed params; + u32 raw32b; +} __packed; + +static void ipa3_uc_wdi_event_log_info_handler( +struct IpaHwEventLogInfoData_t *uc_event_top_mmio) + +{ + struct Ipa3HwEventInfoData_t *stats_ptr = &uc_event_top_mmio->statsInfo; + + if ((uc_event_top_mmio->featureMask & + (1 << IPA_HW_FEATURE_WDI)) == 0) { + IPAERR("WDI feature missing 0x%x\n", + uc_event_top_mmio->featureMask); + return; + } + + if (stats_ptr->featureInfo[IPA_HW_FEATURE_WDI].params.size != + sizeof(struct IpaHwStatsWDIInfoData_t)) { + IPAERR("wdi stats sz invalid exp=%zu is=%u\n", + sizeof(struct IpaHwStatsWDIInfoData_t), + stats_ptr->featureInfo[IPA_HW_FEATURE_WDI].params.size); + return; + } + + ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst = + stats_ptr->baseAddrOffset + + stats_ptr->featureInfo[IPA_HW_FEATURE_WDI].params.offset; + IPAERR("WDI stats ofst=0x%x\n", ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst); + if (ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst + + sizeof(struct IpaHwStatsWDIInfoData_t) >= + ipa3_ctx->ctrl->ipa_reg_base_ofst + + ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) + + ipa3_ctx->smem_sz) { + IPAERR("uc_wdi_stats 0x%x outside SRAM\n", + ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst); + return; + } + + ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio = + ioremap(ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst, + sizeof(struct IpaHwStatsWDIInfoData_t)); + if (!ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) { + IPAERR("fail to ioremap uc wdi stats\n"); + return; + } +} + +static void ipa3_uc_wdi_event_handler(struct IpaHwSharedMemCommonMapping_t + *uc_sram_mmio) + +{ + union IpaHwWdiErrorEventData_t wdi_evt; + struct IpaHwSharedMemWdiMapping_t *wdi_sram_mmio_ext; + + if (uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_WDI_ERROR) { + wdi_evt.raw32b = uc_sram_mmio->eventParams; + IPADBG("uC WDI evt errType=%u pipe=%d cherrType=%u\n", + wdi_evt.params.wdi_error_type, + wdi_evt.params.ipa_pipe_number, + wdi_evt.params.wdi_ch_err_type); + wdi_sram_mmio_ext = + (struct IpaHwSharedMemWdiMapping_t *) + uc_sram_mmio; + IPADBG("tx_ch_state=%u rx_ch_state=%u\n", + wdi_sram_mmio_ext->wdi_tx_ch_0_state, + wdi_sram_mmio_ext->wdi_rx_ch_0_state); + } +} + +/** + * ipa3_get_wdi_stats() - Query WDI statistics from uc + * @stats: [inout] stats blob from client populated by driver + * + * Returns: 0 on success, negative on failure + * + * @note Cannot be called from atomic context + * + */ +int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats) +{ +#define TX_STATS(y) stats->tx_ch_stats.y = \ + ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->tx_ch_stats.y +#define RX_STATS(y) stats->rx_ch_stats.y = \ + ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->rx_ch_stats.y + + if (!stats || !ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) { + IPAERR("bad parms stats=%pK wdi_stats=%pK\n", + stats, + ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio); + return -EINVAL; + } + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + TX_STATS(num_pkts_processed); + TX_STATS(copy_engine_doorbell_value); + TX_STATS(num_db_fired); + TX_STATS(tx_comp_ring_stats.ringFull); + TX_STATS(tx_comp_ring_stats.ringEmpty); + TX_STATS(tx_comp_ring_stats.ringUsageHigh); + TX_STATS(tx_comp_ring_stats.ringUsageLow); + TX_STATS(tx_comp_ring_stats.RingUtilCount); + TX_STATS(bam_stats.bamFifoFull); + TX_STATS(bam_stats.bamFifoEmpty); + TX_STATS(bam_stats.bamFifoUsageHigh); + TX_STATS(bam_stats.bamFifoUsageLow); + TX_STATS(bam_stats.bamUtilCount); + TX_STATS(num_db); + TX_STATS(num_unexpected_db); + TX_STATS(num_bam_int_handled); + TX_STATS(num_bam_int_in_non_running_state); + TX_STATS(num_qmb_int_handled); + TX_STATS(num_bam_int_handled_while_wait_for_bam); + + RX_STATS(max_outstanding_pkts); + RX_STATS(num_pkts_processed); + RX_STATS(rx_ring_rp_value); + RX_STATS(rx_ind_ring_stats.ringFull); + RX_STATS(rx_ind_ring_stats.ringEmpty); + RX_STATS(rx_ind_ring_stats.ringUsageHigh); + RX_STATS(rx_ind_ring_stats.ringUsageLow); + RX_STATS(rx_ind_ring_stats.RingUtilCount); + RX_STATS(bam_stats.bamFifoFull); + RX_STATS(bam_stats.bamFifoEmpty); + RX_STATS(bam_stats.bamFifoUsageHigh); + RX_STATS(bam_stats.bamFifoUsageLow); + RX_STATS(bam_stats.bamUtilCount); + RX_STATS(num_bam_int_handled); + RX_STATS(num_db); + RX_STATS(num_unexpected_db); + RX_STATS(num_pkts_in_dis_uninit_state); + RX_STATS(num_ic_inj_vdev_change); + RX_STATS(num_ic_inj_fw_desc_change); + RX_STATS(num_qmb_int_handled); + RX_STATS(reserved1); + RX_STATS(reserved2); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +int ipa3_wdi_init(void) +{ + struct ipa3_uc_hdlrs uc_wdi_cbs = { 0 }; + + uc_wdi_cbs.ipa_uc_event_hdlr = ipa3_uc_wdi_event_handler; + uc_wdi_cbs.ipa_uc_event_log_info_hdlr = + ipa3_uc_wdi_event_log_info_handler; + uc_wdi_cbs.ipa_uc_loaded_hdlr = + ipa3_uc_wdi_loaded_handler; + + ipa3_uc_register_handlers(IPA_HW_FEATURE_WDI, &uc_wdi_cbs); + + return 0; +} + +static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len, + bool device, unsigned long *iova) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx(); + unsigned long va = roundup(cb->next_addr, PAGE_SIZE); + int prot = IOMMU_READ | IOMMU_WRITE; + size_t true_len = roundup(len + pa - rounddown(pa, PAGE_SIZE), + PAGE_SIZE); + int ret; + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + + ret = ipa3_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE), + true_len, + device ? (prot | IOMMU_MMIO) : prot); + if (ret) { + IPAERR("iommu map failed for pa=%pa len=%zu\n", &pa, true_len); + return -EINVAL; + } + + ipa3_ctx->wdi_map_cnt++; + cb->next_addr = va + true_len; + *iova = va + pa - rounddown(pa, PAGE_SIZE); + return 0; +} + +static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt, + unsigned long *iova) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx(); + unsigned long va = roundup(cb->next_addr, PAGE_SIZE); + int prot = IOMMU_READ | IOMMU_WRITE; + int ret; + int i; + struct scatterlist *sg; + unsigned long start_iova = va; + phys_addr_t phys; + size_t len; + int count = 0; + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + if (!sgt) { + IPAERR("Bad parameters, scatter / gather list is NULL\n"); + return -EINVAL; + } + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + /* directly get sg_tbl PA from wlan-driver */ + phys = sg->dma_address; + len = PAGE_ALIGN(sg->offset + sg->length); + + ret = ipa3_iommu_map(cb->mapping->domain, va, phys, len, prot); + if (ret) { + IPAERR("iommu map failed for pa=%pa len=%zu\n", + &phys, len); + goto bad_mapping; + } + va += len; + ipa3_ctx->wdi_map_cnt++; + count++; + } + cb->next_addr = va; + *iova = start_iova; + + return 0; + +bad_mapping: + for_each_sg(sgt->sgl, sg, count, i) + iommu_unmap(cb->mapping->domain, sg_dma_address(sg), + sg_dma_len(sg)); + return -EINVAL; +} + +static void ipa_release_uc_smmu_mappings(enum ipa_client_type client) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx(); + int i; + int j; + int start; + int end; + + if (IPA_CLIENT_IS_CONS(client)) { + start = IPA_WDI_TX_RING_RES; + end = IPA_WDI_CE_DB_RES; + } else { + start = IPA_WDI_RX_RING_RES; + if (ipa3_ctx->ipa_wdi2) + end = IPA_WDI_RX_COMP_RING_WP_RES; + else + end = IPA_WDI_RX_RING_RP_RES; + } + + for (i = start; i <= end; i++) { + if (wdi_res[i].valid) { + for (j = 0; j < wdi_res[i].nents; j++) { + iommu_unmap(cb->mapping->domain, + wdi_res[i].res[j].iova, + wdi_res[i].res[j].size); + ipa3_ctx->wdi_map_cnt--; + } + kfree(wdi_res[i].res); + wdi_res[i].valid = false; + } + } + + if (ipa3_ctx->wdi_map_cnt == 0) + cb->next_addr = cb->va_end; + +} + +static void ipa_save_uc_smmu_mapping_pa(int res_idx, phys_addr_t pa, + unsigned long iova, size_t len) +{ + IPADBG("--res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx, + &pa, iova, len); + wdi_res[res_idx].res = kzalloc(sizeof(struct ipa_wdi_res), GFP_KERNEL); + if (!wdi_res[res_idx].res) { + WARN_ON(1); + return; + } + wdi_res[res_idx].nents = 1; + wdi_res[res_idx].valid = true; + wdi_res[res_idx].res->pa = rounddown(pa, PAGE_SIZE); + wdi_res[res_idx].res->iova = rounddown(iova, PAGE_SIZE); + wdi_res[res_idx].res->size = roundup(len + pa - rounddown(pa, + PAGE_SIZE), PAGE_SIZE); + IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx, + &wdi_res[res_idx].res->pa, wdi_res[res_idx].res->iova, + wdi_res[res_idx].res->size); +} + +static void ipa_save_uc_smmu_mapping_sgt(int res_idx, struct sg_table *sgt, + unsigned long iova) +{ + int i; + struct scatterlist *sg; + unsigned long curr_iova = iova; + + if (!sgt) { + IPAERR("Bad parameters, scatter / gather list is NULL\n"); + return; + } + + wdi_res[res_idx].res = kcalloc(sgt->nents, sizeof(struct ipa_wdi_res), + GFP_KERNEL); + if (!wdi_res[res_idx].res) { + WARN_ON(1); + return; + } + wdi_res[res_idx].nents = sgt->nents; + wdi_res[res_idx].valid = true; + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + /* directly get sg_tbl PA from wlan */ + wdi_res[res_idx].res[i].pa = sg->dma_address; + wdi_res[res_idx].res[i].iova = curr_iova; + wdi_res[res_idx].res[i].size = PAGE_ALIGN(sg->offset + + sg->length); + IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx, + &wdi_res[res_idx].res[i].pa, + wdi_res[res_idx].res[i].iova, + wdi_res[res_idx].res[i].size); + curr_iova += wdi_res[res_idx].res[i].size; + } +} + +static int ipa_create_uc_smmu_mapping(int res_idx, bool wlan_smmu_en, + phys_addr_t pa, struct sg_table *sgt, size_t len, bool device, + unsigned long *iova) +{ + /* support for SMMU on WLAN but no SMMU on IPA */ + if (wlan_smmu_en && ipa3_ctx->smmu_s1_bypass) { + IPAERR("Unsupported SMMU pairing\n"); + return -EINVAL; + } + + /* legacy: no SMMUs on either end */ + if (!wlan_smmu_en && ipa3_ctx->smmu_s1_bypass) { + *iova = pa; + return 0; + } + + /* no SMMU on WLAN but SMMU on IPA */ + if (!wlan_smmu_en && !ipa3_ctx->smmu_s1_bypass) { + if (ipa_create_uc_smmu_mapping_pa(pa, len, + (res_idx == IPA_WDI_CE_DB_RES) ? true : false, iova)) { + IPAERR("Fail to create mapping res %d\n", res_idx); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len); + return 0; + } + + /* SMMU on WLAN and SMMU on IPA */ + if (wlan_smmu_en && !ipa3_ctx->smmu_s1_bypass) { + switch (res_idx) { + case IPA_WDI_RX_RING_RP_RES: + case IPA_WDI_RX_COMP_RING_WP_RES: + case IPA_WDI_CE_DB_RES: + if (ipa_create_uc_smmu_mapping_pa(pa, len, + (res_idx == IPA_WDI_CE_DB_RES) ? true : false, + iova)) { + IPAERR("Fail to create mapping res %d\n", + res_idx); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len); + break; + case IPA_WDI_RX_RING_RES: + case IPA_WDI_RX_COMP_RING_RES: + case IPA_WDI_TX_RING_RES: + case IPA_WDI_CE_RING_RES: + if (ipa_create_uc_smmu_mapping_sgt(sgt, iova)) { + IPAERR("Fail to create mapping res %d\n", + res_idx); + WARN_ON(1); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova); + break; + default: + WARN_ON(1); + } + } + + return 0; +} + +/** + * ipa3_connect_wdi_pipe() - WDI client connect + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out) +{ + int ipa_ep_idx; + int result = -EFAULT; + struct ipa3_ep_context *ep; + struct ipa_mem_buffer cmd; + struct IpaHwWdiTxSetUpCmdData_t *tx; + struct IpaHwWdiRxSetUpCmdData_t *rx; + struct IpaHwWdi2TxSetUpCmdData_t *tx_2; + struct IpaHwWdi2RxSetUpCmdData_t *rx_2; + + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + unsigned long va; + phys_addr_t pa; + u32 len; + + if (in == NULL || out == NULL || in->sys.client >= IPA_CLIENT_MAX) { + IPAERR("bad parm. in=%pK out=%pK\n", in, out); + if (in) + IPAERR("client = %d\n", in->sys.client); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + if (in->u.dl.comp_ring_base_pa % IPA_WDI_RING_ALIGNMENT || + in->u.dl.ce_ring_base_pa % IPA_WDI_RING_ALIGNMENT) { + IPAERR("alignment failure on TX\n"); + return -EINVAL; + } + } else { + if (in->u.ul.rdy_ring_base_pa % IPA_WDI_RING_ALIGNMENT) { + IPAERR("alignment failure on RX\n"); + return -EINVAL; + } + } + + result = ipa3_uc_state_check(); + if (result) + return result; + + ipa_ep_idx = ipa3_get_ep_mapping(in->sys.client); + if (ipa_ep_idx == -1) { + IPAERR("fail to alloc EP.\n"); + goto fail; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (ep->valid) { + IPAERR("EP already allocated.\n"); + goto fail; + } + + memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); + IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client); + + IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx); + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + if (ipa3_ctx->ipa_wdi2) + cmd.size = sizeof(*tx_2); + else + cmd.size = sizeof(*tx); + IPADBG("comp_ring_base_pa=0x%pa\n", + &in->u.dl.comp_ring_base_pa); + IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size); + IPADBG("ce_ring_base_pa=0x%pa\n", &in->u.dl.ce_ring_base_pa); + IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size); + IPADBG("ce_ring_doorbell_pa=0x%pa\n", + &in->u.dl.ce_door_bell_pa); + IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers); + } else { + if (ipa3_ctx->ipa_wdi2) + cmd.size = sizeof(*rx_2); + else + cmd.size = sizeof(*rx); + IPADBG("rx_ring_base_pa=0x%pa\n", + &in->u.ul.rdy_ring_base_pa); + IPADBG("rx_ring_size=%d\n", + in->u.ul.rdy_ring_size); + IPADBG("rx_ring_rp_pa=0x%pa\n", + &in->u.ul.rdy_ring_rp_pa); + IPADBG("rx_comp_ring_base_pa=0x%pa\n", + &in->u.ul.rdy_comp_ring_base_pa); + IPADBG("rx_comp_ring_size=%d\n", + in->u.ul.rdy_comp_ring_size); + IPADBG("rx_comp_ring_wp_pa=0x%pa\n", + &in->u.ul.rdy_comp_ring_wp_pa); + ipa3_ctx->uc_ctx.rdy_ring_base_pa = + in->u.ul.rdy_ring_base_pa; + ipa3_ctx->uc_ctx.rdy_ring_rp_pa = + in->u.ul.rdy_ring_rp_pa; + ipa3_ctx->uc_ctx.rdy_ring_size = + in->u.ul.rdy_ring_size; + ipa3_ctx->uc_ctx.rdy_comp_ring_base_pa = + in->u.ul.rdy_comp_ring_base_pa; + ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa = + in->u.ul.rdy_comp_ring_wp_pa; + ipa3_ctx->uc_ctx.rdy_comp_ring_size = + in->u.ul.rdy_comp_ring_size; + + /* check if the VA is empty */ + if (ipa3_ctx->ipa_wdi2) { + if (in->smmu_enabled) { + if (!in->u.ul_smmu.rdy_ring_rp_va || + !in->u.ul_smmu.rdy_comp_ring_wp_va) + goto dma_alloc_fail; + } else { + if (!in->u.ul.rdy_ring_rp_va || + !in->u.ul.rdy_comp_ring_wp_va) + goto dma_alloc_fail; + } + IPADBG("rdy_ring_rp value =%d\n", + in->smmu_enabled ? + *in->u.ul_smmu.rdy_ring_rp_va : + *in->u.ul.rdy_ring_rp_va); + IPADBG("rx_comp_ring_wp value=%d\n", + in->smmu_enabled ? + *in->u.ul_smmu.rdy_comp_ring_wp_va : + *in->u.ul.rdy_comp_ring_wp_va); + ipa3_ctx->uc_ctx.rdy_ring_rp_va = + in->smmu_enabled ? + in->u.ul_smmu.rdy_ring_rp_va : + in->u.ul.rdy_ring_rp_va; + ipa3_ctx->uc_ctx.rdy_comp_ring_wp_va = + in->smmu_enabled ? + in->u.ul_smmu.rdy_comp_ring_wp_va : + in->u.ul.rdy_comp_ring_wp_va; + } + } + + cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + result = -ENOMEM; + goto dma_alloc_fail; + } + + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + if (ipa3_ctx->ipa_wdi2) { + tx_2 = (struct IpaHwWdi2TxSetUpCmdData_t *)cmd.base; + + len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size : + in->u.dl.comp_ring_size; + IPADBG("TX_2 ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.comp_ring_size, + in->u.dl.comp_ring_size); + if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES, + in->smmu_enabled, + in->u.dl.comp_ring_base_pa, + &in->u.dl_smmu.comp_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping TX ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx_2->comp_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + tx_2->comp_ring_base_pa = (u32) (va & 0xFFFFFFFF); + tx_2->comp_ring_size = len; + IPADBG("TX_2 comp_ring_base_pa_hi=0x%08x :0x%08x\n", + tx_2->comp_ring_base_pa_hi, + tx_2->comp_ring_base_pa); + + len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size : + in->u.dl.ce_ring_size; + IPADBG("TX_2 CE ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.ce_ring_size, + in->u.dl.ce_ring_size); + /* WA: wlan passed ce_ring sg_table PA directly */ + if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES, + in->smmu_enabled, + in->u.dl.ce_ring_base_pa, + &in->u.dl_smmu.ce_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping CE ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx_2->ce_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + tx_2->ce_ring_base_pa = (u32) (va & 0xFFFFFFFF); + tx_2->ce_ring_size = len; + IPADBG("TX_2 ce_ring_base_pa_hi=0x%08x :0x%08x\n", + tx_2->ce_ring_base_pa_hi, + tx_2->ce_ring_base_pa); + + pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa : + in->u.dl.ce_door_bell_pa; + if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES, + in->smmu_enabled, + pa, + NULL, + 4, + true, + &va)) { + IPAERR("fail to create uc mapping CE DB.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx_2->ce_ring_doorbell_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + tx_2->ce_ring_doorbell_pa = (u32) (va & 0xFFFFFFFF); + IPADBG("TX_2 ce_ring_doorbell_pa_hi=0x%08x :0x%08x\n", + tx_2->ce_ring_doorbell_pa_hi, + tx_2->ce_ring_doorbell_pa); + + tx_2->num_tx_buffers = in->smmu_enabled ? + in->u.dl_smmu.num_tx_buffers : + in->u.dl.num_tx_buffers; + tx_2->ipa_pipe_number = ipa_ep_idx; + } else { + tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base; + + len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size : + in->u.dl.comp_ring_size; + IPADBG("TX ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.comp_ring_size, + in->u.dl.comp_ring_size); + if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES, + in->smmu_enabled, + in->u.dl.comp_ring_base_pa, + &in->u.dl_smmu.comp_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping TX ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx->comp_ring_base_pa = va; + tx->comp_ring_size = len; + len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size : + in->u.dl.ce_ring_size; + IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.ce_ring_size, + in->u.dl.ce_ring_size); + if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES, + in->smmu_enabled, + in->u.dl.ce_ring_base_pa, + &in->u.dl_smmu.ce_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping CE ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx->ce_ring_base_pa = va; + tx->ce_ring_size = len; + pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa : + in->u.dl.ce_door_bell_pa; + if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES, + in->smmu_enabled, + pa, + NULL, + 4, + true, + &va)) { + IPAERR("fail to create uc mapping CE DB.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx->ce_ring_doorbell_pa = va; + tx->num_tx_buffers = in->u.dl.num_tx_buffers; + tx->ipa_pipe_number = ipa_ep_idx; + } + out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IPA_HW_WDI_TX_MBOX_START_INDEX/32, + IPA_HW_WDI_TX_MBOX_START_INDEX % 32); + } else { + if (ipa3_ctx->ipa_wdi2) { + rx_2 = (struct IpaHwWdi2RxSetUpCmdData_t *)cmd.base; + + len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size : + in->u.ul.rdy_ring_size; + IPADBG("RX_2 ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.ul_smmu.rdy_ring_size, + in->u.ul.rdy_ring_size); + if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES, + in->smmu_enabled, + in->u.ul.rdy_ring_base_pa, + &in->u.ul_smmu.rdy_ring, + len, + false, + &va)) { + IPAERR("fail to create uc RX_2 ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_ring_base_pa = (u32) (va & 0xFFFFFFFF); + rx_2->rx_ring_size = len; + IPADBG("RX_2 rx_ring_base_pa_hi=0x%08x:0x%08x\n", + rx_2->rx_ring_base_pa_hi, + rx_2->rx_ring_base_pa); + + pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa : + in->u.ul.rdy_ring_rp_pa; + if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES, + in->smmu_enabled, + pa, + NULL, + 4, + false, + &va)) { + IPAERR("fail to create uc RX_2 rng RP\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_ring_rp_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_ring_rp_pa = (u32) (va & 0xFFFFFFFF); + IPADBG("RX_2 rx_ring_rp_pa_hi=0x%08x :0x%08x\n", + rx_2->rx_ring_rp_pa_hi, + rx_2->rx_ring_rp_pa); + len = in->smmu_enabled ? + in->u.ul_smmu.rdy_comp_ring_size : + in->u.ul.rdy_comp_ring_size; + IPADBG("RX_2 ring smmu_en=%d comp_ring_size=%d %d\n", + in->smmu_enabled, + in->u.ul_smmu.rdy_comp_ring_size, + in->u.ul.rdy_comp_ring_size); + if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_COMP_RING_RES, + in->smmu_enabled, + in->u.ul.rdy_comp_ring_base_pa, + &in->u.ul_smmu.rdy_comp_ring, + len, + false, + &va)) { + IPAERR("fail to create uc RX_2 comp_ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_comp_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_comp_ring_base_pa = (u32) (va & 0xFFFFFFFF); + rx_2->rx_comp_ring_size = len; + IPADBG("RX_2 rx_comp_ring_base_pa_hi=0x%08x:0x%08x\n", + rx_2->rx_comp_ring_base_pa_hi, + rx_2->rx_comp_ring_base_pa); + + pa = in->smmu_enabled ? + in->u.ul_smmu.rdy_comp_ring_wp_pa : + in->u.ul.rdy_comp_ring_wp_pa; + if (ipa_create_uc_smmu_mapping( + IPA_WDI_RX_COMP_RING_WP_RES, + in->smmu_enabled, + pa, + NULL, + 4, + false, + &va)) { + IPAERR("fail to create uc RX_2 comp_rng WP\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_comp_ring_wp_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_comp_ring_wp_pa = (u32) (va & 0xFFFFFFFF); + IPADBG("RX_2 rx_comp_ring_wp_pa_hi=0x%08x:0x%08x\n", + rx_2->rx_comp_ring_wp_pa_hi, + rx_2->rx_comp_ring_wp_pa); + rx_2->ipa_pipe_number = ipa_ep_idx; + } else { + rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base; + + len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size : + in->u.ul.rdy_ring_size; + IPADBG("RX ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.ul_smmu.rdy_ring_size, + in->u.ul.rdy_ring_size); + if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES, + in->smmu_enabled, + in->u.ul.rdy_ring_base_pa, + &in->u.ul_smmu.rdy_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping RX ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx->rx_ring_base_pa = va; + rx->rx_ring_size = len; + + pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa : + in->u.ul.rdy_ring_rp_pa; + if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES, + in->smmu_enabled, + pa, + NULL, + 4, + false, + &va)) { + IPAERR("fail to create uc mapping RX rng RP\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx->rx_ring_rp_pa = va; + rx->ipa_pipe_number = ipa_ep_idx; + } + out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IPA_HW_WDI_RX_MBOX_START_INDEX/32, + IPA_HW_WDI_RX_MBOX_START_INDEX % 32); + } + + ep->valid = 1; + ep->client = in->sys.client; + ep->keep_ipa_awake = in->sys.keep_ipa_awake; + result = ipa3_disable_data_path(ipa_ep_idx); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx); + goto uc_timeout; + } + if (IPA_CLIENT_IS_PROD(in->sys.client)) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = true; + ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl); + } + + result = ipa3_uc_send_cmd((u32)(cmd.phys_base), + IPA_CLIENT_IS_CONS(in->sys.client) ? + IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : + IPA_CPU_2_HW_CMD_WDI_RX_SET_UP, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + ep->skip_ep_cfg = in->sys.skip_ep_cfg; + ep->client_notify = in->sys.notify; + ep->priv = in->sys.priv; + + /* for AP+STA stats update */ + if (in->wdi_notify) + ipa3_ctx->uc_wdi_ctx.stats_notify = in->wdi_notify; + else + IPADBG("in->wdi_notify is null\n"); + + if (!ep->skip_ep_cfg) { + if (ipa3_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto ipa_cfg_ep_fail; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("Skipping endpoint configuration.\n"); + } + + ipa3_enable_data_path(ipa_ep_idx); + + out->clnt_hdl = ipa_ep_idx; + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client)) + ipa3_install_dflt_flt_rules(ipa_ep_idx); + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); + + dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + ep->uc_offload_state |= IPA_WDI_CONNECTED; + IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx); + + return 0; + +ipa_cfg_ep_fail: + memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context)); +uc_timeout: + ipa_release_uc_smmu_mappings(in->sys.client); + dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); +dma_alloc_fail: + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); +fail: + return result; +} + +/** + * ipa3_disconnect_wdi_pipe() - WDI client disconnect + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_disconnect_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa3_ep_context *ep; + union IpaHwWdiCommonChCmdData_t tear; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa3_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != IPA_WDI_CONNECTED) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + tear.params.ipa_pipe_number = clnt_hdl; + + result = ipa3_uc_send_cmd(tear.raw32b, + IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + ipa3_delete_dflt_flt_rules(clnt_hdl); + ipa_release_uc_smmu_mappings(ep->client); + + memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + + /* for AP+STA stats update */ + if (ipa3_ctx->uc_wdi_ctx.stats_notify) + ipa3_ctx->uc_wdi_ctx.stats_notify = NULL; + else + IPADBG("uc_wdi_ctx.stats_notify already null\n"); + +uc_timeout: + return result; +} + +/** + * ipa3_enable_wdi_pipe() - WDI client enable + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_enable_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa3_ep_context *ep; + union IpaHwWdiCommonChCmdData_t enable; + struct ipa_ep_cfg_holb holb_cfg; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa3_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != IPA_WDI_CONNECTED) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + enable.params.ipa_pipe_number = clnt_hdl; + + result = ipa3_uc_send_cmd(enable.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_ENABLE, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + if (IPA_CLIENT_IS_CONS(ep->client)) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_DIS; + holb_cfg.tmr_val = 0; + result = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg); + } + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + ep->uc_offload_state |= IPA_WDI_ENABLED; + IPADBG("client (ep: %d) enabled\n", clnt_hdl); + +uc_timeout: + return result; +} + +/** + * ipa3_disable_wdi_pipe() - WDI client disable + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_disable_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa3_ep_context *ep; + union IpaHwWdiCommonChCmdData_t disable; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + u32 prod_hdl; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa3_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + result = ipa3_disable_data_path(clnt_hdl); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", result, + clnt_hdl); + result = -EPERM; + goto uc_timeout; + } + + /** + * To avoid data stall during continuous SAP on/off before + * setting delay to IPA Consumer pipe, remove delay and enable + * holb on IPA Producer pipe + */ + if (IPA_CLIENT_IS_PROD(ep->client)) { + IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + /* remove delay on wlan-prod pipe*/ + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + + prod_hdl = ipa3_get_ep_mapping(IPA_CLIENT_WLAN1_CONS); + if (ipa3_ctx->ep[prod_hdl].valid == 1) { + result = ipa3_disable_data_path(prod_hdl); + if (result) { + IPAERR("disable data path failed\n"); + IPAERR("res=%d clnt=%d\n", + result, prod_hdl); + result = -EPERM; + goto uc_timeout; + } + } + usleep_range(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC, + IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC); + + } + + disable.params.ipa_pipe_number = clnt_hdl; + result = ipa3_uc_send_cmd(disable.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_DISABLE, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + /* Set the delay after disabling IPA Producer pipe */ + if (IPA_CLIENT_IS_PROD(ep->client)) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = true; + ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + ep->uc_offload_state &= ~IPA_WDI_ENABLED; + IPADBG("client (ep: %d) disabled\n", clnt_hdl); + + +uc_timeout: + return result; +} + +/** + * ipa3_resume_wdi_pipe() - WDI client resume + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_resume_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa3_ep_context *ep; + union IpaHwWdiCommonChCmdData_t resume; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa3_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + resume.params.ipa_pipe_number = clnt_hdl; + + result = ipa3_uc_send_cmd(resume.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_RESUME, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + if (result) + IPAERR("client (ep: %d) fail un-susp/delay result=%d\n", + clnt_hdl, result); + else + IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl); + + ep->uc_offload_state |= IPA_WDI_RESUMED; + IPADBG("client (ep: %d) resumed\n", clnt_hdl); + +uc_timeout: + return result; +} + +/** + * ipa3_suspend_wdi_pipe() - WDI client suspend + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_suspend_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa3_ep_context *ep; + union IpaHwWdiCommonChCmdData_t suspend; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + u32 source_pipe_bitmask = 0; + bool disable_force_clear = false; + struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 }; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa3_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED | + IPA_WDI_RESUMED)) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + suspend.params.ipa_pipe_number = clnt_hdl; + + if (IPA_CLIENT_IS_PROD(ep->client)) { + /* + * For WDI 2.0 need to ensure pipe will be empty before suspend + * as IPA uC will fail to suspend the pipe otherwise. + */ + if (ipa3_ctx->ipa_wdi2) { + source_pipe_bitmask = 1 << + ipa3_get_ep_mapping(ep->client); + result = ipa3_enable_force_clear(clnt_hdl, + false, source_pipe_bitmask); + if (result) { + /* + * assuming here modem SSR, AP can remove + * the delay in this case + */ + IPAERR("failed to force clear %d\n", result); + IPAERR("remove delay from SCND reg\n"); + ep_ctrl_scnd.endp_delay = false; + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_CTRL_SCND_n, clnt_hdl, + &ep_ctrl_scnd); + } else { + disable_force_clear = true; + } + } + + IPADBG("Post suspend event first for IPA Producer\n"); + IPADBG("Client: %d clnt_hdl: %d\n", ep->client, clnt_hdl); + result = ipa3_uc_send_cmd(suspend.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + } + + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + if (IPA_CLIENT_IS_CONS(ep->client)) { + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + ep_cfg_ctrl.ipa_ep_suspend = true; + result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + if (result) + IPAERR("(ep: %d) failed to suspend result=%d\n", + clnt_hdl, result); + else + IPADBG("(ep: %d) suspended\n", clnt_hdl); + } + } else { + ep_cfg_ctrl.ipa_ep_delay = true; + result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + if (result) + IPAERR("client (ep: %d) failed to delay result=%d\n", + clnt_hdl, result); + else + IPADBG("client (ep: %d) delayed\n", clnt_hdl); + } + + if (IPA_CLIENT_IS_CONS(ep->client)) { + result = ipa3_uc_send_cmd(suspend.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + } + + if (disable_force_clear) + ipa3_disable_force_clear(clnt_hdl); + + ipa3_ctx->tag_process_before_gating = true; + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + ep->uc_offload_state &= ~IPA_WDI_RESUMED; + IPADBG("client (ep: %d) suspended\n", clnt_hdl); + +uc_timeout: + return result; +} + +/** + * ipa_broadcast_wdi_quota_reach_ind() - quota reach + * @uint32_t fid: [in] input netdev ID + * @uint64_t num_bytes: [in] used bytes + * + * Returns: 0 on success, negative on failure + */ +int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, + uint64_t num_bytes) +{ + IPAERR("Quota reached indication on fid(%d) Mbytes(%lu)\n", + fid, + (unsigned long int) num_bytes); + ipa3_broadcast_quota_reach_ind(0, IPA_UPSTEAM_WLAN); + return 0; +} + +int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id) +{ + int result = 0; + struct ipa3_ep_context *ep; + union IpaHwWdiRxExtCfgCmdData_t qmap; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR_RL("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa3_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) { + IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + qmap.params.ipa_pipe_number = clnt_hdl; + qmap.params.qmap_id = qmap_id; + + result = ipa3_uc_send_cmd(qmap.raw32b, + IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id); + +uc_timeout: + return result; +} + +/** + * ipa3_uc_reg_rdyCB() - To register uC + * ready CB if uC not ready + * @inout: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa3_uc_reg_rdyCB( + struct ipa_wdi_uc_ready_params *inout) +{ + int result = 0; + + if (inout == NULL) { + IPAERR("bad parm. inout=%pK ", inout); + return -EINVAL; + } + + result = ipa3_uc_state_check(); + if (result) { + inout->is_uC_ready = false; + ipa3_ctx->uc_wdi_ctx.uc_ready_cb = inout->notify; + ipa3_ctx->uc_wdi_ctx.priv = inout->priv; + } else { + inout->is_uC_ready = true; + } + + return 0; +} + +/** + * ipa3_uc_dereg_rdyCB() - To de-register uC ready CB + * + * Returns: 0 on success, negative on failure + * + */ +int ipa3_uc_dereg_rdyCB(void) +{ + ipa3_ctx->uc_wdi_ctx.uc_ready_cb = NULL; + ipa3_ctx->uc_wdi_ctx.priv = NULL; + + return 0; +} + + +/** + * ipa3_uc_wdi_get_dbpa() - To retrieve + * doorbell physical address of wlan pipes + * @param: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa3_uc_wdi_get_dbpa( + struct ipa_wdi_db_params *param) +{ + if (param == NULL || param->client >= IPA_CLIENT_MAX) { + IPAERR("bad parm. param=%pK ", param); + if (param) + IPAERR("client = %d\n", param->client); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(param->client)) { + param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IPA_HW_WDI_TX_MBOX_START_INDEX/32, + IPA_HW_WDI_TX_MBOX_START_INDEX % 32); + } else { + param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base + + ipahal_get_reg_base() + + ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n, + IPA_HW_WDI_RX_MBOX_START_INDEX/32, + IPA_HW_WDI_RX_MBOX_START_INDEX % 32); + } + + return 0; +} + +static void ipa3_uc_wdi_loaded_handler(void) +{ + if (!ipa3_ctx) { + IPAERR("IPA ctx is null\n"); + return; + } + + if (ipa3_ctx->uc_wdi_ctx.uc_ready_cb) { + ipa3_ctx->uc_wdi_ctx.uc_ready_cb( + ipa3_ctx->uc_wdi_ctx.priv); + + ipa3_ctx->uc_wdi_ctx.uc_ready_cb = + NULL; + ipa3_ctx->uc_wdi_ctx.priv = NULL; + } +} + +int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx(); + int i; + int ret = 0; + int prot = IOMMU_READ | IOMMU_WRITE; + + if (!info) { + IPAERR("info = %pK\n", info); + return -EINVAL; + } + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + + for (i = 0; i < num_buffers; i++) { + IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i, + &info[i].pa, info[i].iova, info[i].size); + info[i].result = ipa3_iommu_map(cb->iommu, + rounddown(info[i].iova, PAGE_SIZE), + rounddown(info[i].pa, PAGE_SIZE), + roundup(info[i].size + info[i].pa - + rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE), + prot); + } + + return ret; +} + +int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info) +{ + struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx(); + int i; + int ret = 0; + + if (!info) { + IPAERR("info = %pK\n", info); + return -EINVAL; + } + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + + for (i = 0; i < num_buffers; i++) { + IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i, + &info[i].pa, info[i].iova, info[i].size); + info[i].result = iommu_unmap(cb->iommu, + rounddown(info[i].iova, PAGE_SIZE), + roundup(info[i].size + info[i].pa - + rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE)); + } + + return ret; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c new file mode 100644 index 000000000000..1af2e4f540fb --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -0,0 +1,5313 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include /* gen_pool_alloc() */ +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" +#include "ipahal/ipahal.h" +#include "ipahal/ipahal_fltrt.h" +#include "ipahal/ipahal_hw_stats.h" +#include "../ipa_rm_i.h" + +#define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL) +#define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL) +#define IPA_V3_0_CLK_RATE_TURBO (200 * 1000 * 1000UL) + +#define IPA_V3_5_CLK_RATE_SVS (200 * 1000 * 1000UL) +#define IPA_V3_5_CLK_RATE_NOMINAL (400 * 1000 * 1000UL) +#define IPA_V3_5_CLK_RATE_TURBO (42640 * 10 * 1000UL) + +#define IPA_V4_0_CLK_RATE_SVS (125 * 1000 * 1000UL) +#define IPA_V4_0_CLK_RATE_NOMINAL (220 * 1000 * 1000UL) +#define IPA_V4_0_CLK_RATE_TURBO (250 * 1000 * 1000UL) + +#define IPA_V3_0_MAX_HOLB_TMR_VAL (4294967296 - 1) + +#define IPA_V3_0_BW_THRESHOLD_TURBO_MBPS (1000) +#define IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS (600) + +#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000 +#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10 + +/* Max pipes + ICs for TAG process */ +#define IPA_TAG_MAX_DESC (IPA3_MAX_NUM_PIPES + 6) + +#define IPA_TAG_SLEEP_MIN_USEC (1000) +#define IPA_TAG_SLEEP_MAX_USEC (2000) +#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ) +#define IPA_BCR_REG_VAL_v3_0 (0x00000001) +#define IPA_BCR_REG_VAL_v3_5 (0x0000003B) +#define IPA_BCR_REG_VAL_v4_0 (0x00000039) +#define IPA_CLKON_CFG_v4_0 (0x30000000) +#define IPA_AGGR_GRAN_MIN (1) +#define IPA_AGGR_GRAN_MAX (32) +#define IPA_EOT_COAL_GRAN_MIN (1) +#define IPA_EOT_COAL_GRAN_MAX (16) + +#define IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC (15) + +#define IPA_AGGR_BYTE_LIMIT (\ + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \ + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT) +#define IPA_AGGR_PKT_LIMIT (\ + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \ + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT) + +/* In IPAv3 only endpoints 0-3 can be configured to deaggregation */ +#define IPA_EP_SUPPORTS_DEAGGR(idx) ((idx) >= 0 && (idx) <= 3) + +/* configure IPA spare register 1 in order to have correct IPA version + * set bits 0,2,3 and 4. see SpareBits documentation.xlsx + */ + +/* HPS, DPS sequencers Types*/ +#define IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY 0x00000000 +/* DMA + DECIPHER/CIPHER */ +#define IPA_DPS_HPS_SEQ_TYPE_DMA_DEC 0x00000011 +/* Packet Processing + no decipher + uCP (for Ethernet Bridging) */ +#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP 0x00000002 +/* Packet Processing + decipher + uCP */ +#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_UCP 0x00000013 +/* 2 Packet Processing pass + no decipher + uCP */ +#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP 0x00000004 +/* 2 Packet Processing pass + decipher + uCP */ +#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP 0x00000015 +/* Packet Processing + no decipher + no uCP */ +#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP 0x00000006 +/* Packet Processing + no decipher + no uCP */ +#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_NO_UCP 0x00000017 +/* COMP/DECOMP */ +#define IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP 0x00000020 +/* Invalid sequencer type */ +#define IPA_DPS_HPS_SEQ_TYPE_INVALID 0xFFFFFFFF + +#define IPA_DPS_HPS_SEQ_TYPE_IS_DMA(seq_type) \ + (seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY || \ + seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_DEC || \ + seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP) + +#define QMB_MASTER_SELECT_DDR (0) +#define QMB_MASTER_SELECT_PCIE (1) + +/* Resource Group index*/ +#define IPA_v3_0_GROUP_UL (0) +#define IPA_v3_0_GROUP_DL (1) +#define IPA_v3_0_GROUP_DPL IPA_v3_0_GROUP_DL +#define IPA_v3_0_GROUP_DIAG (2) +#define IPA_v3_0_GROUP_DMA (3) +#define IPA_v3_0_GROUP_IMM_CMD IPA_v3_0_GROUP_UL +#define IPA_v3_0_GROUP_Q6ZIP (4) +#define IPA_v3_0_GROUP_Q6ZIP_GENERAL IPA_v3_0_GROUP_Q6ZIP +#define IPA_v3_0_GROUP_UC_RX_Q (5) +#define IPA_v3_0_GROUP_Q6ZIP_ENGINE IPA_v3_0_GROUP_UC_RX_Q +#define IPA_v3_0_GROUP_MAX (6) + +#define IPA_v3_5_GROUP_LWA_DL (0) /* currently not used */ +#define IPA_v3_5_MHI_GROUP_PCIE IPA_v3_5_GROUP_LWA_DL +#define IPA_v3_5_GROUP_UL_DL (1) +#define IPA_v3_5_MHI_GROUP_DDR IPA_v3_5_GROUP_UL_DL +#define IPA_v3_5_MHI_GROUP_DMA (2) +#define IPA_v3_5_GROUP_UC_RX_Q (3) /* currently not used */ +#define IPA_v3_5_SRC_GROUP_MAX (4) +#define IPA_v3_5_DST_GROUP_MAX (3) + +#define IPA_v4_0_GROUP_LWA_DL (0) +#define IPA_v4_0_MHI_GROUP_PCIE (0) +#define IPA_v4_0_ETHERNET (0) +#define IPA_v4_0_GROUP_UL_DL (1) +#define IPA_v4_0_MHI_GROUP_DDR (1) +#define IPA_v4_0_MHI_GROUP_DMA (2) +#define IPA_v4_0_GROUP_UC_RX_Q (3) +#define IPA_v4_0_SRC_GROUP_MAX (4) +#define IPA_v4_0_DST_GROUP_MAX (4) + +#define IPA_GROUP_MAX IPA_v3_0_GROUP_MAX + +enum ipa_rsrc_grp_type_src { + IPA_v3_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS, + IPA_v3_0_RSRC_GRP_TYPE_SRC_HDR_SECTORS, + IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER, + IPA_v3_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS, + IPA_v3_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS, + IPA_v3_0_RSRC_GRP_TYPE_SRC_HPS_DMARS, + IPA_v3_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES, + IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX, + + IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_v3_5_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS, + IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS, + IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES, + IPA_v3_5_RSRC_GRP_TYPE_SRC_MAX, + + IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS = 0, + IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS, + IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF, + IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS, + IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES, + IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX +}; + +#define IPA_RSRC_GRP_TYPE_SRC_MAX IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX + +enum ipa_rsrc_grp_type_dst { + IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTORS, + IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS, + IPA_v3_0_RSRC_GRP_TYPE_DST_DPS_DMARS, + IPA_v3_0_RSRC_GRP_TYPE_DST_MAX, + + IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS = 0, + IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS, + IPA_v3_5_RSRC_GRP_TYPE_DST_MAX, + + IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS = 0, + IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS, + IPA_v4_0_RSRC_GRP_TYPE_DST_MAX, +}; +#define IPA_RSRC_GRP_TYPE_DST_MAX IPA_v3_0_RSRC_GRP_TYPE_DST_MAX + +enum ipa_rsrc_grp_type_rx { + IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ, + IPA_RSRC_GRP_TYPE_RX_MAX +}; + +enum ipa_rsrc_grp_rx_hps_weight_config { + IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG, + IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_MAX +}; + +struct rsrc_min_max { + u32 min; + u32 max; +}; + +enum ipa_ver { + IPA_3_0, + IPA_3_5, + IPA_3_5_MHI, + IPA_3_5_1, + IPA_4_0, + IPA_4_0_MHI, + IPA_VER_MAX, +}; + +static const struct rsrc_min_max ipa3_rsrc_src_grp_config + [IPA_VER_MAX][IPA_RSRC_GRP_TYPE_SRC_MAX][IPA_GROUP_MAX] = { + [IPA_3_0] = { + /*UL DL DIAG DMA Not Used uC Rx*/ + [IPA_v3_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {3, 255}, {3, 255}, {1, 255}, {1, 255}, {1, 255}, {2, 255} }, + [IPA_v3_0_RSRC_GRP_TYPE_SRC_HDR_SECTORS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} }, + [IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} }, + [IPA_v3_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {14, 14}, {16, 16}, {5, 5}, {5, 5}, {0, 0}, {8, 8} }, + [IPA_v3_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {19, 19}, {26, 26}, {3, 3}, {7, 7}, {0, 0}, {8, 8} }, + [IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} }, + [IPA_v3_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} }, + [IPA_v3_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {14, 14}, {16, 16}, {5, 5}, {5, 5}, {0, 0}, {8, 8} }, + }, + [IPA_3_5] = { + /* LWA_DL UL_DL not used UC_RX_Q, other are invalid */ + [IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {0, 0}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {0, 0}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {0, 0}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {0, 0}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} }, + }, + [IPA_3_5_MHI] = { + /* PCIE DDR DMA not used, other are invalid */ + [IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {4, 4}, {5, 5}, {1, 1}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {10, 10}, {10, 10}, {8, 8}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {12, 12}, {12, 12}, {8, 8}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {14, 14}, {14, 14}, {14, 14}, {0, 0}, {0, 0}, {0, 0} }, + }, + [IPA_3_5_1] = { + /* LWA_DL UL_DL not used UC_RX_Q, other are invalid */ + [IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {1, 255}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {10, 10}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {12, 12}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} }, + }, + [IPA_4_0] = { + /* LWA_DL UL_DL not used UC_RX_Q, other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {1, 255}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {10, 10}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {12, 12}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} }, + }, + [IPA_4_0_MHI] = { + /* PCIE DDR DMA not used, other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {4, 4}, {5, 5}, {1, 1}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {10, 10}, {10, 10}, {8, 8}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {12, 12}, {12, 12}, {8, 8}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {14, 14}, {14, 14}, {14, 14}, {0, 0}, {0, 0}, {0, 0} }, + }, +}; + +static const struct rsrc_min_max ipa3_rsrc_dst_grp_config + [IPA_VER_MAX][IPA_RSRC_GRP_TYPE_DST_MAX][IPA_GROUP_MAX] = { + [IPA_3_0] = { + /* UL DL/DPL DIAG DMA Q6zip_gen Q6zip_eng */ + [IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {2, 2}, {3, 3}, {0, 0}, {2, 2}, {3, 3}, {3, 3} }, + [IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS] = { + {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} }, + [IPA_v3_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 0} }, + }, + [IPA_3_5] = { + /* unused UL/DL/DPL unused N/A N/A N/A */ + [IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} }, + }, + [IPA_3_5_MHI] = { + /* PCIE DDR DMA N/A N/A N/A */ + [IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} }, + }, + [IPA_3_5_1] = { + /* LWA_DL UL/DL/DPL unused N/A N/A N/A */ + [IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} }, + }, + [IPA_4_0] = { + /*LWA_DL UL/DL/DPL uC, other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {4, 4}, {4, 4}, {3, 3}, {2, 2}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 255}, {1, 255}, {1, 2}, {0, 2}, {0, 0}, {0, 0} }, + }, + [IPA_4_0_MHI] = { + /*LWA_DL UL/DL/DPL uC, other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {4, 4}, {4, 4}, {3, 3}, {2, 2}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 255}, {1, 255}, {1, 2}, {0, 2}, {0, 0}, {0, 0} }, + }, +}; + +static const struct rsrc_min_max ipa3_rsrc_rx_grp_config + [IPA_VER_MAX][IPA_RSRC_GRP_TYPE_RX_MAX][IPA_GROUP_MAX] = { + [IPA_3_0] = { + /* UL DL DIAG DMA Unused uC Rx */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + {16, 16}, {24, 24}, {8, 8}, {8, 8}, {0, 0}, {8, 8} }, + }, + [IPA_3_5] = { + /* unused UL_DL unused UC_RX_Q N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + {0, 0}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} }, + }, + [IPA_3_5_MHI] = { + /* PCIE DDR DMA unused N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + { 3, 3 }, { 7, 7 }, { 2, 2 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }, + }, + [IPA_3_5_1] = { + /* LWA_DL UL_DL unused UC_RX_Q N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + {3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} }, + }, + [IPA_4_0] = { + /* LWA_DL UL_DL not used UC_RX_Q, other are invalid */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + {3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} }, + }, + [IPA_4_0_MHI] = { + /* PCIE DDR DMA unused N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + { 3, 3 }, { 7, 7 }, { 2, 2 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }, + }, +}; + +static const u32 ipa3_rsrc_rx_grp_hps_weight_config + [IPA_VER_MAX][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_MAX][IPA_GROUP_MAX] = { + [IPA_3_0] = { + /* UL DL DIAG DMA Unused uC Rx */ + [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 0, 0, 0, 0, 0, 0 }, + }, + [IPA_3_5] = { + /* unused UL_DL unused UC_RX_Q N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 }, + }, + [IPA_3_5_MHI] = { + /* PCIE DDR DMA unused N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 3, 5, 1, 1, 0, 0 }, + }, + [IPA_3_5_1] = { + /* LWA_DL UL_DL unused UC_RX_Q N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 }, + }, + [IPA_4_0] = { + /* LWA_DL UL_DL not used UC_RX_Q, other are invalid */ + [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 }, + }, + [IPA_4_0_MHI] = { + /* PCIE DDR DMA unused N/A N/A */ + [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 3, 5, 1, 1, 0, 0 }, + }, +}; + +enum ipa_ees { + IPA_EE_AP = 0, + IPA_EE_Q6 = 1, + IPA_EE_UC = 3, +}; + +struct ipa_ep_configuration { + bool valid; + int group_num; + bool support_flt; + int sequencer_type; + u8 qmb_master_sel; + struct ipa_gsi_ep_config ipa_gsi_ep_info; +}; + +/* clients not included in the list below are considered as invalid */ +static const struct ipa_ep_configuration ipa3_ep_mapping + [IPA_VER_MAX][IPA_CLIENT_MAX] = { + [IPA_3_0][IPA_CLIENT_WLAN1_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 10, 1, 8, 16, IPA_EE_UC } }, + [IPA_3_0][IPA_CLIENT_USB_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 3, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_APPS_LAN_PROD] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 14, 11, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 5, 16, 32, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v3_0_GROUP_IMM_CMD, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 22, 6, 18, 28, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_ODU_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 12, 9, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_MHI_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_PCIE, + { 0, 0, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_Q6_LAN_PROD] = { + true, IPA_v3_0_GROUP_UL, false, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 9, 4, 8, 12, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v3_0_GROUP_DL, true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 5, 0, 16, 32, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v3_0_GROUP_IMM_CMD, false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 6, 1, 18, 28, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_DECOMP_PROD] = { + true, IPA_v3_0_GROUP_Q6ZIP, + false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 7, 2, 0, 0, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_DECOMP2_PROD] = { + true, IPA_v3_0_GROUP_Q6ZIP, + false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 3, 0, 0, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = { + true, IPA_v3_0_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_PCIE, + { 12, 9, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = { + true, IPA_v3_0_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_PCIE, + { 13, 10, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_ETHERNET_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {2, 0, 8, 16, IPA_EE_UC} }, + /* Only for test purpose */ + [IPA_3_0][IPA_CLIENT_TEST_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 3, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST1_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 3, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 5, 16, 32, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 12, 9, 8, 16, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v3_0_GROUP_UL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 13, 10, 8, 16, IPA_EE_AP } }, + + [IPA_3_0][IPA_CLIENT_WLAN1_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 25, 4, 8, 8, IPA_EE_UC } }, + [IPA_3_0][IPA_CLIENT_WLAN2_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 27, 4, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_WLAN3_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 28, 13, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_WLAN4_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 29, 14, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_USB_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 26, 12, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v3_0_GROUP_DPL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 2, 8, 12, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v3_0_GROUP_UL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 7, 8, 12, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 8, 8, 12, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_ODU_EMB_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 23, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_MHI_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 23, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 6, 8, 12, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v3_0_GROUP_UL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 5, 8, 12, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_DUN_CONS] = { + true, IPA_v3_0_GROUP_DIAG, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 30, 7, 4, 4, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_DECOMP_CONS] = { + true, IPA_v3_0_GROUP_Q6ZIP, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 21, 8, 4, 4, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_Q6_DECOMP2_CONS] = { + true, IPA_v3_0_GROUP_Q6ZIP, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 4, 9, 4, 4, IPA_EE_Q6 } }, + [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = { + true, IPA_v3_0_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 28, 13, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = { + true, IPA_v3_0_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 29, 14, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_ETHERNET_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + {24, 3, 8, 8, IPA_EE_UC} }, + /* Only for test purpose */ + [IPA_3_0][IPA_CLIENT_TEST_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 26, 12, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 26, 12, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 27, 4, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 28, 13, 8, 8, IPA_EE_AP } }, + [IPA_3_0][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 29, 14, 8, 8, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_3_0][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, + + /* IPA_3_5 */ + [IPA_3_5][IPA_CLIENT_WLAN1_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 6, 1, 8, 16, IPA_EE_UC } }, + [IPA_3_5][IPA_CLIENT_USB_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 0, 7, 8, 16, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_APPS_LAN_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 9, 8, 16, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 2, 3, 16, 32, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 5, 4, 20, 23, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_ODU_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_UC } }, + [IPA_3_5][IPA_CLIENT_Q6_LAN_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 0, 16, 32, IPA_EE_Q6 } }, + [IPA_3_5][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 1, 20, 23, IPA_EE_Q6 } }, + /* Only for test purpose */ + [IPA_3_5][IPA_CLIENT_TEST_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 7, 8, 16, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST1_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 7, 8, 16, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {7, 8, 8, 16, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 9, 8, 16, IPA_EE_AP } }, + + [IPA_3_5][IPA_CLIENT_WLAN1_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 3, 8, 8, IPA_EE_UC } }, + [IPA_3_5][IPA_CLIENT_WLAN2_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 12, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_WLAN3_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 13, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_USB_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 17, 11, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 10, 4, 6, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 9, 5, 8, 12, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 10, 6, 8, 12, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_ODU_EMB_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 13, 3, 8, 12, IPA_EE_Q6 } }, + [IPA_3_5][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 2, 8, 12, IPA_EE_Q6 } }, + /* Only for test purpose */ + /* MBIM aggregation test pipes should have the same QMB as USB_CONS */ + [IPA_3_5][IPA_CLIENT_TEST_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 15, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 17, 11, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 12, 8, 8, IPA_EE_AP } }, + [IPA_3_5][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 19, 13, 8, 8, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_3_5][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 31, 31, 8, 8, IPA_EE_AP } }, + + /* IPA_3_5_MHI */ + [IPA_3_5_MHI][IPA_CLIENT_USB_PROD] = { + true, IPA_v3_5_MHI_GROUP_DDR, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 0, 7, 8, 16, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v3_5_MHI_GROUP_DDR, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 2, 3, 16, 32, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 5, 4, 20, 23, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_MHI_PROD] = { + true, IPA_v3_5_MHI_GROUP_PCIE, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_PCIE, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_Q6_LAN_PROD] = { + true, IPA_v3_5_MHI_GROUP_DDR, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 0, 16, 32, IPA_EE_Q6 } }, + [IPA_3_5_MHI][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v3_5_MHI_GROUP_DDR, true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 6, 4, 10, 30, IPA_EE_Q6 } }, + [IPA_3_5_MHI][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v3_5_MHI_GROUP_PCIE, false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 1, 20, 23, IPA_EE_Q6 } }, + [IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 7, 8, 8, 16, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 8, 9, 8, 16, IPA_EE_AP } }, + /* Only for test purpose */ + [IPA_3_5_MHI][IPA_CLIENT_TEST_PROD] = { + true, IPA_v3_5_MHI_GROUP_DDR, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 7, 8, 16, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST1_PROD] = { + 0, IPA_v3_5_MHI_GROUP_DDR, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 7, 8, 16, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v3_5_MHI_GROUP_PCIE, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_PCIE, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v3_5_MHI_GROUP_DMA, true, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + {7, 8, 8, 16, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v3_5_MHI_GROUP_DMA, true, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 8, 9, 8, 16, IPA_EE_AP } }, + + [IPA_3_5_MHI][IPA_CLIENT_WLAN1_CONS] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 3, 8, 8, IPA_EE_UC } }, + [IPA_3_5_MHI][IPA_CLIENT_USB_CONS] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 11, 8, 8, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 10, 4, 6, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 9, 5, 8, 12, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 10, 6, 8, 12, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_MHI_CONS] = { + true, IPA_v3_5_MHI_GROUP_PCIE, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 15, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 13, 3, 8, 12, IPA_EE_Q6 } }, + [IPA_3_5_MHI][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 2, 8, 12, IPA_EE_Q6 } }, + [IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 18, 12, 8, 8, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 19, 13, 8, 8, IPA_EE_AP } }, + /* Only for test purpose */ + [IPA_3_5_MHI][IPA_CLIENT_TEST_CONS] = { + true, IPA_v3_5_MHI_GROUP_PCIE, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 15, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v3_5_MHI_GROUP_PCIE, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 15, 1, 8, 8, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v3_5_MHI_GROUP_DDR, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 11, 8, 8, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 18, 12, 8, 8, IPA_EE_AP } }, + [IPA_3_5_MHI][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 19, 13, 8, 8, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_3_5_MHI][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 31, 31, 8, 8, IPA_EE_AP } }, + + /* IPA_3_5_1 */ + [IPA_3_5_1][IPA_CLIENT_WLAN1_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 7, 1, 8, 16, IPA_EE_UC } }, + [IPA_3_5_1][IPA_CLIENT_USB_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 0, 0, 8, 16, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_APPS_LAN_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 7, 8, 16, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 2, 3, 16, 32, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 5, 4, 20, 23, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_Q6_LAN_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 0, 16, 32, IPA_EE_Q6 } }, + [IPA_3_5_1][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 6, 4, 12, 30, IPA_EE_Q6 } }, + [IPA_3_5_1][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 1, 20, 23, IPA_EE_Q6 } }, + /* Only for test purpose */ + [IPA_3_5_1][IPA_CLIENT_TEST_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 0, 0, 8, 16, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_TEST1_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 0, 0, 8, 16, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 2, 3, 16, 32, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 1, 20, 23, IPA_EE_Q6 } }, + [IPA_3_5_1][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v3_5_GROUP_UL_DL, true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_UC } }, + + [IPA_3_5_1][IPA_CLIENT_WLAN1_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 3, 8, 8, IPA_EE_UC } }, + [IPA_3_5_1][IPA_CLIENT_WLAN2_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 9, 8, 8, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_WLAN3_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 10, 8, 8, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_USB_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 8, 8, 8, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 2, 4, 6, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 9, 5, 8, 12, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 10, 6, 8, 12, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 13, 3, 8, 12, IPA_EE_Q6 } }, + [IPA_3_5_1][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 2, 8, 12, IPA_EE_Q6 } }, + /* Only for test purpose */ + [IPA_3_5_1][IPA_CLIENT_TEST_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 8, 8, 8, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 8, 8, 8, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 9, 8, 8, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 10, 8, 8, IPA_EE_AP } }, + [IPA_3_5_1][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 2, 4, 6, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_3_5_1][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, + + /* IPA_4_0 */ + [IPA_4_0][IPA_CLIENT_WLAN1_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 7, 9, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_USB_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 0, 8, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_APPS_LAN_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 10, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 2, 3, 16, 32, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 5, 4, 20, 24, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_ODU_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_ETHERNET_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 9, 0, 8, 16, IPA_EE_UC } }, + [IPA_4_0][IPA_CLIENT_Q6_LAN_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 6, 2, 12, 24, IPA_EE_Q6 } }, + [IPA_4_0][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 0, 16, 32, IPA_EE_Q6 } }, + [IPA_4_0][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 1, 20, 24, IPA_EE_Q6 } }, + /* Only for test purpose */ + [IPA_4_0][IPA_CLIENT_TEST_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 8, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST1_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 8, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {7, 9, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 10, 8, 16, IPA_EE_AP } }, + + + [IPA_4_0][IPA_CLIENT_WLAN1_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 12, 6, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_WLAN2_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 20, 14, 9, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_WLAN3_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 21, 15, 9, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_USB_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 19, 13, 9, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 7, 5, 5, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 10, 5, 9, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 6, 9, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_ODU_EMB_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 17, 1, 17, 17, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_ETHERNET_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 22, 1, 17, 17, IPA_EE_UC } }, + [IPA_4_0][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 4, 9, 9, IPA_EE_Q6 } }, + [IPA_4_0][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 13, 3, 9, 9, IPA_EE_Q6 } }, + [IPA_4_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 5, 9, 9, IPA_EE_Q6 } }, + /* Only for test purpose */ + /* MBIM aggregation test pipes should have the same QMB as USB_CONS */ + [IPA_4_0][IPA_CLIENT_TEST_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 12, 2, 5, 5, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 2, 5, 5, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 18, 12, 6, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 20, 14, 9, 9, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 21, 15, 9, 9, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_4_0][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, + + /* IPA_4_0_MHI */ + [IPA_4_0_MHI][IPA_CLIENT_USB_PROD] = { + true, IPA_v4_0_MHI_GROUP_DDR, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 0, 8, 8, 16, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v4_0_MHI_GROUP_DDR, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 2, 3, 16, 32, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v4_0_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 5, 4, 20, 24, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_MHI_PROD] = { + true, IPA_v4_0_MHI_GROUP_PCIE, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_PCIE, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_Q6_LAN_PROD] = { + true, IPA_v4_0_MHI_GROUP_DDR, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 0, 16, 32, IPA_EE_Q6 } }, + [IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 6, 2, 12, 24, IPA_EE_Q6 } }, + [IPA_4_0_MHI][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v4_0_MHI_GROUP_PCIE, + false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 1, 20, 24, IPA_EE_Q6 } }, + [IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = { + true, IPA_v4_0_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 7, 9, 8, 16, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = { + true, IPA_v4_0_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 8, 10, 8, 16, IPA_EE_AP } }, + /* Only for test purpose */ + [IPA_4_0_MHI][IPA_CLIENT_TEST_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 8, 8, 16, IPA_EE_AP } }, + [IPA_4_0][IPA_CLIENT_TEST1_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {0, 8, 8, 16, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 16, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + {7, 9, 8, 16, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + QMB_MASTER_SELECT_DDR, + { 8, 10, 8, 16, IPA_EE_AP } }, + + [IPA_4_0_MHI][IPA_CLIENT_USB_CONS] = { + true, IPA_v4_0_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 19, 13, 9, 9, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v4_0_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 7, 5, 5, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v4_0_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 10, 5, 9, 9, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v4_0_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 6, 9, 9, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_MHI_CONS] = { + true, IPA_v4_0_MHI_GROUP_PCIE, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 17, 1, 17, 17, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v4_0_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 4, 9, 9, IPA_EE_Q6 } }, + [IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v4_0_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 13, 3, 9, 9, IPA_EE_Q6 } }, + [IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = { + true, IPA_v4_0_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 20, 14, 9, 9, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = { + true, IPA_v4_0_MHI_GROUP_DMA, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 21, 15, 9, 9, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 5, 9, 9, IPA_EE_Q6 } }, + /* Only for test purpose */ + [IPA_4_0_MHI][IPA_CLIENT_TEST_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 12, 2, 5, 5, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 2, 5, 5, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 18, 12, 6, 9, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 20, 14, 9, 9, IPA_EE_AP } }, + [IPA_4_0_MHI][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 21, 15, 9, 9, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_4_0_MHI][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, +}; + +static struct msm_bus_vectors ipa_init_vectors_v3_0[] = { + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_EBI_CH0, + .ab = 0, + .ib = 0, + }, + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_OCIMEM, + .ab = 0, + .ib = 0, + }, +}; + +static struct msm_bus_vectors ipa_nominal_perf_vectors_v3_0[] = { + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_EBI_CH0, + .ab = 100000000, + .ib = 1300000000, + }, + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_OCIMEM, + .ab = 100000000, + .ib = 1300000000, + }, +}; + +static struct msm_bus_paths ipa_usecases_v3_0[] = { + { + .num_paths = ARRAY_SIZE(ipa_init_vectors_v3_0), + .vectors = ipa_init_vectors_v3_0, + }, + { + .num_paths = ARRAY_SIZE(ipa_nominal_perf_vectors_v3_0), + .vectors = ipa_nominal_perf_vectors_v3_0, + }, +}; + +static struct msm_bus_scale_pdata ipa_bus_client_pdata_v3_0 = { + .usecase = ipa_usecases_v3_0, + .num_usecases = ARRAY_SIZE(ipa_usecases_v3_0), + .name = "ipa", +}; + +/** + * ipa3_get_clients_from_rm_resource() - get IPA clients which are related to an + * IPA_RM resource + * + * @resource: [IN] IPA Resource Manager resource + * @clients: [OUT] Empty array which will contain the list of clients. The + * caller must initialize this array. + * + * Return codes: 0 on success, negative on failure. + */ +int ipa3_get_clients_from_rm_resource( + enum ipa_rm_resource_name resource, + struct ipa3_client_names *clients) +{ + int i = 0; + + if (resource < 0 || + resource >= IPA_RM_RESOURCE_MAX || + !clients) { + IPAERR("Bad parameters\n"); + return -EINVAL; + } + + switch (resource) { + case IPA_RM_RESOURCE_USB_CONS: + clients->names[i++] = IPA_CLIENT_USB_CONS; + break; + case IPA_RM_RESOURCE_USB_DPL_CONS: + clients->names[i++] = IPA_CLIENT_USB_DPL_CONS; + break; + case IPA_RM_RESOURCE_HSIC_CONS: + clients->names[i++] = IPA_CLIENT_HSIC1_CONS; + break; + case IPA_RM_RESOURCE_WLAN_CONS: + clients->names[i++] = IPA_CLIENT_WLAN1_CONS; + clients->names[i++] = IPA_CLIENT_WLAN2_CONS; + clients->names[i++] = IPA_CLIENT_WLAN3_CONS; + clients->names[i++] = IPA_CLIENT_WLAN4_CONS; + break; + case IPA_RM_RESOURCE_MHI_CONS: + clients->names[i++] = IPA_CLIENT_MHI_CONS; + break; + case IPA_RM_RESOURCE_ODU_ADAPT_CONS: + clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS; + clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS; + break; + case IPA_RM_RESOURCE_ETHERNET_CONS: + clients->names[i++] = IPA_CLIENT_ETHERNET_CONS; + break; + case IPA_RM_RESOURCE_USB_PROD: + clients->names[i++] = IPA_CLIENT_USB_PROD; + break; + case IPA_RM_RESOURCE_HSIC_PROD: + clients->names[i++] = IPA_CLIENT_HSIC1_PROD; + break; + case IPA_RM_RESOURCE_MHI_PROD: + clients->names[i++] = IPA_CLIENT_MHI_PROD; + break; + case IPA_RM_RESOURCE_ODU_ADAPT_PROD: + clients->names[i++] = IPA_CLIENT_ODU_PROD; + break; + case IPA_RM_RESOURCE_ETHERNET_PROD: + clients->names[i++] = IPA_CLIENT_ETHERNET_PROD; + break; + default: + break; + } + clients->length = i; + + return 0; +} + +/** + * ipa3_should_pipe_be_suspended() - returns true when the client's pipe should + * be suspended during a power save scenario. False otherwise. + * + * @client: [IN] IPA client + */ +bool ipa3_should_pipe_be_suspended(enum ipa_client_type client) +{ + struct ipa3_ep_context *ep; + int ipa_ep_idx; + + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + WARN_ON(1); + return false; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + /* + * starting IPA 4.0 pipe no longer can be suspended. Instead, + * the corresponding GSI channel should be stopped. Usually client + * driver will take care of stopping the channel. For client drivers + * that are not stopping the channel, IPA RM will do that based on + * ipa3_should_pipe_channel_be_stopped(). + */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + return false; + + if (ep->keep_ipa_awake) + return false; + + if (client == IPA_CLIENT_USB_CONS || + client == IPA_CLIENT_USB_DPL_CONS || + client == IPA_CLIENT_MHI_CONS || + client == IPA_CLIENT_HSIC1_CONS || + client == IPA_CLIENT_WLAN1_CONS || + client == IPA_CLIENT_WLAN2_CONS || + client == IPA_CLIENT_WLAN3_CONS || + client == IPA_CLIENT_WLAN4_CONS || + client == IPA_CLIENT_ODU_EMB_CONS || + client == IPA_CLIENT_ODU_TETH_CONS || + client == IPA_CLIENT_ETHERNET_CONS) + return true; + + return false; +} + +/** + * ipa3_should_pipe_channel_be_stopped() - returns true when the client's + * channel should be stopped during a power save scenario. False otherwise. + * Most client already stops the GSI channel on suspend, and are not included + * in the list below. + * + * @client: [IN] IPA client + */ +static bool ipa3_should_pipe_channel_be_stopped(enum ipa_client_type client) +{ + struct ipa3_ep_context *ep; + int ipa_ep_idx; + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) + return false; + + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + WARN_ON(1); + return false; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (ep->keep_ipa_awake) + return false; + + if (client == IPA_CLIENT_ODU_EMB_CONS || + client == IPA_CLIENT_ODU_TETH_CONS) + return true; + + return false; +} + +/** + * ipa3_suspend_resource_sync() - suspend client endpoints related to the IPA_RM + * resource and decrement active clients counter, which may result in clock + * gating of IPA clocks. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource) +{ + struct ipa3_client_names clients; + int res; + int index; + struct ipa_ep_cfg_ctrl suspend; + enum ipa_client_type client; + int ipa_ep_idx; + bool pipe_suspended = false; + + memset(&clients, 0, sizeof(clients)); + res = ipa3_get_clients_from_rm_resource(resource, &clients); + if (res) { + IPAERR("Bad params.\n"); + return res; + } + + for (index = 0; index < clients.length; index++) { + client = clients.names[index]; + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + res = -EINVAL; + continue; + } + ipa3_ctx->resume_on_connect[client] = false; + if (ipa3_ctx->ep[ipa_ep_idx].client == client && + ipa3_should_pipe_be_suspended(client)) { + if (ipa3_ctx->ep[ipa_ep_idx].valid) { + /* suspend endpoint */ + memset(&suspend, 0, sizeof(suspend)); + suspend.ipa_ep_suspend = true; + ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend); + pipe_suspended = true; + } + } + + if (ipa3_ctx->ep[ipa_ep_idx].client == client && + ipa3_should_pipe_channel_be_stopped(client)) { + if (ipa3_ctx->ep[ipa_ep_idx].valid) { + /* Stop GSI channel */ + res = ipa3_stop_gsi_channel(ipa_ep_idx); + if (res) { + IPAERR("failed stop gsi ch %lu\n", + ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl); + return res; + } + } + } + } + /* Sleep ~1 msec */ + if (pipe_suspended) + usleep_range(1000, 2000); + + /* before gating IPA clocks do TAG process */ + ipa3_ctx->tag_process_before_gating = true; + IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource)); + + return 0; +} + +/** + * ipa3_suspend_resource_no_block() - suspend client endpoints related to the + * IPA_RM resource and decrement active clients counter. This function is + * guaranteed to avoid sleeping. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource) +{ + int res; + struct ipa3_client_names clients; + int index; + enum ipa_client_type client; + struct ipa_ep_cfg_ctrl suspend; + int ipa_ep_idx; + struct ipa_active_client_logging_info log_info; + + memset(&clients, 0, sizeof(clients)); + res = ipa3_get_clients_from_rm_resource(resource, &clients); + if (res) { + IPAERR( + "ipa3_get_clients_from_rm_resource() failed, name = %d.\n", + resource); + goto bail; + } + + for (index = 0; index < clients.length; index++) { + client = clients.names[index]; + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + res = -EINVAL; + continue; + } + ipa3_ctx->resume_on_connect[client] = false; + if (ipa3_ctx->ep[ipa_ep_idx].client == client && + ipa3_should_pipe_be_suspended(client)) { + if (ipa3_ctx->ep[ipa_ep_idx].valid) { + /* suspend endpoint */ + memset(&suspend, 0, sizeof(suspend)); + suspend.ipa_ep_suspend = true; + ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend); + } + } + + if (ipa3_ctx->ep[ipa_ep_idx].client == client && + ipa3_should_pipe_channel_be_stopped(client)) { + res = -EPERM; + goto bail; + } + } + + if (res == 0) { + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, + ipa_rm_resource_str(resource)); + /* before gating IPA clocks do TAG process */ + ipa3_ctx->tag_process_before_gating = true; + ipa3_dec_client_disable_clks_no_block(&log_info); + } +bail: + return res; +} + +/** + * ipa3_resume_resource() - resume client endpoints related to the IPA_RM + * resource. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa3_resume_resource(enum ipa_rm_resource_name resource) +{ + + struct ipa3_client_names clients; + int res; + int index; + struct ipa_ep_cfg_ctrl suspend; + enum ipa_client_type client; + int ipa_ep_idx; + + memset(&clients, 0, sizeof(clients)); + res = ipa3_get_clients_from_rm_resource(resource, &clients); + if (res) { + IPAERR("ipa3_get_clients_from_rm_resource() failed.\n"); + return res; + } + + for (index = 0; index < clients.length; index++) { + client = clients.names[index]; + ipa_ep_idx = ipa3_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + res = -EINVAL; + continue; + } + /* + * The related ep, will be resumed on connect + * while its resource is granted + */ + ipa3_ctx->resume_on_connect[client] = true; + IPADBG("%d will be resumed on connect.\n", client); + if (ipa3_ctx->ep[ipa_ep_idx].client == client && + ipa3_should_pipe_be_suspended(client)) { + if (ipa3_ctx->ep[ipa_ep_idx].valid) { + memset(&suspend, 0, sizeof(suspend)); + suspend.ipa_ep_suspend = false; + ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend); + } + } + + if (ipa3_ctx->ep[ipa_ep_idx].client == client && + ipa3_should_pipe_channel_be_stopped(client)) { + if (ipa3_ctx->ep[ipa_ep_idx].valid) { + res = gsi_start_channel( + ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl); + if (res) { + IPAERR("failed to start gsi ch %lu\n", + ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl); + return res; + } + } + } + } + + return res; +} + +/** + * _ipa_sram_settings_read_v3_0() - Read SRAM settings from HW + * + * Returns: None + */ +void _ipa_sram_settings_read_v3_0(void) +{ + struct ipahal_reg_shared_mem_size smem_sz; + + memset(&smem_sz, 0, sizeof(smem_sz)); + + ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz); + + ipa3_ctx->smem_restricted_bytes = smem_sz.shared_mem_baddr; + ipa3_ctx->smem_sz = smem_sz.shared_mem_sz; + + /* reg fields are in 8B units */ + ipa3_ctx->smem_restricted_bytes *= 8; + ipa3_ctx->smem_sz *= 8; + ipa3_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst); + ipa3_ctx->hdr_tbl_lcl = 0; + ipa3_ctx->hdr_proc_ctx_tbl_lcl = 1; + + /* + * when proc ctx table is located in internal memory, + * modem entries resides first. + */ + if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) { + ipa3_ctx->hdr_proc_ctx_tbl.start_offset = + IPA_MEM_PART(modem_hdr_proc_ctx_size); + } + ipa3_ctx->ip4_rt_tbl_hash_lcl = 0; + ipa3_ctx->ip4_rt_tbl_nhash_lcl = 0; + ipa3_ctx->ip6_rt_tbl_hash_lcl = 0; + ipa3_ctx->ip6_rt_tbl_nhash_lcl = 0; + ipa3_ctx->ip4_flt_tbl_hash_lcl = 0; + ipa3_ctx->ip4_flt_tbl_nhash_lcl = 0; + ipa3_ctx->ip6_flt_tbl_hash_lcl = 0; + ipa3_ctx->ip6_flt_tbl_nhash_lcl = 0; +} + +/** + * ipa3_cfg_route() - configure IPA route + * @route: IPA route + * + * Return codes: + * 0: success + */ +int ipa3_cfg_route(struct ipahal_reg_route *route) +{ + + IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n", + route->route_dis, + route->route_def_pipe, + route->route_def_hdr_table); + IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n", + route->route_def_hdr_ofst, + route->route_frag_def_pipe); + + IPADBG("default_retain_hdr=%d\n", + route->route_def_retain_hdr); + + if (route->route_dis) { + IPAERR("Route disable is not supported!\n"); + return -EPERM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + ipahal_write_reg_fields(IPA_ROUTE, route); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa3_cfg_filter() - configure filter + * @disable: disable value + * + * Return codes: + * 0: success + */ +int ipa3_cfg_filter(u32 disable) +{ + IPAERR_RL("Filter disable is not supported!\n"); + return -EPERM; +} + +/** + * ipa3_cfg_qsb() - Configure IPA QSB maximal reads and writes + * + * Returns: None + */ +void ipa3_cfg_qsb(void) +{ + struct ipahal_reg_qsb_max_reads max_reads = { 0 }; + struct ipahal_reg_qsb_max_writes max_writes = { 0 }; + + max_reads.qmb_0_max_reads = 8, + max_reads.qmb_1_max_reads = 8, + + max_writes.qmb_0_max_writes = 8; + max_writes.qmb_1_max_writes = 2; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) { + max_writes.qmb_1_max_writes = 4; + max_reads.qmb_1_max_reads = 12; + } + + ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, &max_writes); + ipahal_write_reg_fields(IPA_QSB_MAX_READS, &max_reads); +} + +/** + * ipa3_init_hw() - initialize HW + * + * Return codes: + * 0: success + */ +int ipa3_init_hw(void) +{ + u32 ipa_version = 0; + u32 val; + + /* Read IPA version and make sure we have access to the registers */ + ipa_version = ipahal_read_reg(IPA_VERSION); + if (ipa_version == 0) + return -EFAULT; + + switch (ipa3_ctx->ipa_hw_type) { + case IPA_HW_v3_0: + case IPA_HW_v3_1: + val = IPA_BCR_REG_VAL_v3_0; + break; + case IPA_HW_v3_5: + case IPA_HW_v3_5_1: + val = IPA_BCR_REG_VAL_v3_5; + break; + case IPA_HW_v4_0: + val = IPA_BCR_REG_VAL_v4_0; + break; + default: + IPAERR("unknown HW type in dts\n"); + return -EFAULT; + } + + ipahal_write_reg(IPA_BCR, val); + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + struct ipahal_reg_tx_cfg cfg; + + ipahal_write_reg(IPA_CLKON_CFG, IPA_CLKON_CFG_v4_0); + ipahal_read_reg_fields(IPA_TX_CFG, &cfg); + /* disable PA_MASK_EN to allow holb drop */ + cfg.pa_mask_en = 0; + ipahal_write_reg_fields(IPA_TX_CFG, &cfg); + } + + ipa3_cfg_qsb(); + + return 0; +} + +/** + * ipa3_get_hw_type_index() - Get HW type index which is used as the entry index + * for ep\resource groups related arrays . + * + * Return value: HW type index + */ +u8 ipa3_get_hw_type_index(void) +{ + u8 hw_type_index; + + switch (ipa3_ctx->ipa_hw_type) { + case IPA_HW_v3_0: + case IPA_HW_v3_1: + hw_type_index = IPA_3_0; + break; + case IPA_HW_v3_5: + hw_type_index = IPA_3_5; + /* + *this flag is initialized only after fw load trigger from + * user space (ipa3_write) + */ + if (ipa3_ctx->ipa_config_is_mhi) + hw_type_index = IPA_3_5_MHI; + break; + case IPA_HW_v3_5_1: + hw_type_index = IPA_3_5_1; + break; + case IPA_HW_v4_0: + hw_type_index = IPA_4_0; + /* + *this flag is initialized only after fw load trigger from + * user space (ipa3_write) + */ + if (ipa3_ctx->ipa_config_is_mhi) + hw_type_index = IPA_4_0_MHI; + break; + default: + IPAERR("Incorrect IPA version %d\n", ipa3_ctx->ipa_hw_type); + hw_type_index = IPA_3_0; + break; + } + + return hw_type_index; +} + +/** + * ipa3_get_ep_mapping() - provide endpoint mapping + * @client: client type + * + * Return value: endpoint mapping + */ +int ipa3_get_ep_mapping(enum ipa_client_type client) +{ + int ipa_ep_idx; + u8 hw_idx = ipa3_get_hw_type_index(); + + if (client >= IPA_CLIENT_MAX || client < 0) { + IPAERR_RL("Bad client number! client =%d\n", client); + return IPA_EP_NOT_ALLOCATED; + } + + if (!ipa3_ep_mapping[hw_idx][client].valid) + return IPA_EP_NOT_ALLOCATED; + + ipa_ep_idx = + ipa3_ep_mapping[hw_idx][client].ipa_gsi_ep_info.ipa_ep_num; + if (ipa_ep_idx < 0 || (ipa_ep_idx >= IPA3_MAX_NUM_PIPES + && client != IPA_CLIENT_DUMMY_CONS)) + return IPA_EP_NOT_ALLOCATED; + + return ipa_ep_idx; +} + +/** + * ipa3_get_gsi_ep_info() - provide gsi ep information + * @client: IPA client value + * + * Return value: pointer to ipa_gsi_ep_info + */ +const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info + (enum ipa_client_type client) +{ + int ep_idx; + + ep_idx = ipa3_get_ep_mapping(client); + if (ep_idx == IPA_EP_NOT_ALLOCATED) + return NULL; + + if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid) + return NULL; + + return &(ipa3_ep_mapping[ipa3_get_hw_type_index()] + [client].ipa_gsi_ep_info); +} + +/** + * ipa_get_ep_group() - provide endpoint group by client + * @client: client type + * + * Return value: endpoint group + */ +int ipa_get_ep_group(enum ipa_client_type client) +{ + if (client >= IPA_CLIENT_MAX || client < 0) { + IPAERR("Bad client number! client =%d\n", client); + return -EINVAL; + } + + if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid) + return -EINVAL; + + return ipa3_ep_mapping[ipa3_get_hw_type_index()][client].group_num; +} + +/** + * ipa3_get_qmb_master_sel() - provide QMB master selection for the client + * @client: client type + * + * Return value: QMB master index + */ +u8 ipa3_get_qmb_master_sel(enum ipa_client_type client) +{ + if (client >= IPA_CLIENT_MAX || client < 0) { + IPAERR("Bad client number! client =%d\n", client); + return -EINVAL; + } + + if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid) + return -EINVAL; + + return ipa3_ep_mapping[ipa3_get_hw_type_index()] + [client].qmb_master_sel; +} + +/* ipa3_set_client() - provide client mapping + * @client: client type + * + * Return value: none + */ + +void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink) +{ + if (client > IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) { + IPAERR("Bad client number! client =%d\n", client); + } else if (index >= IPA3_MAX_NUM_PIPES || index < 0) { + IPAERR("Bad pipe index! index =%d\n", index); + } else { + ipa3_ctx->ipacm_client[index].client_enum = client; + ipa3_ctx->ipacm_client[index].uplink = uplink; + } +} + +/* ipa3_get_wlan_stats() - get ipa wifi stats + * + * Return value: success or failure + */ +int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats) +{ + if (ipa3_ctx->uc_wdi_ctx.stats_notify) { + ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS, + wdi_sap_stats); + } else { + IPAERR("uc_wdi_ctx.stats_notify NULL\n"); + return -EFAULT; + } + return 0; +} + +int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota) +{ + if (ipa3_ctx->uc_wdi_ctx.stats_notify) { + ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA, + wdi_quota); + } else { + IPAERR("uc_wdi_ctx.stats_notify NULL\n"); + return -EFAULT; + } + return 0; +} + +/** + * ipa3_get_client() - provide client mapping + * @client: client type + * + * Return value: client mapping enum + */ +enum ipacm_client_enum ipa3_get_client(int pipe_idx) +{ + if (pipe_idx >= IPA3_MAX_NUM_PIPES || pipe_idx < 0) { + IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx); + return IPACM_CLIENT_MAX; + } else { + return ipa3_ctx->ipacm_client[pipe_idx].client_enum; + } +} + +/** + * ipa2_get_client_uplink() - provide client mapping + * @client: client type + * + * Return value: none + */ +bool ipa3_get_client_uplink(int pipe_idx) +{ + if (pipe_idx < 0 || pipe_idx >= IPA3_MAX_NUM_PIPES) { + IPAERR("invalid pipe idx %d\n", pipe_idx); + return false; + } + + return ipa3_ctx->ipacm_client[pipe_idx].uplink; +} + +/** + * ipa3_get_rm_resource_from_ep() - get the IPA_RM resource which is related to + * the supplied pipe index. + * + * @pipe_idx: + * + * Return value: IPA_RM resource related to the pipe, -1 if a resource was not + * found. + */ +enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx) +{ + int i; + int j; + enum ipa_client_type client; + struct ipa3_client_names clients; + bool found = false; + + if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) { + IPAERR("Bad pipe index!\n"); + return -EINVAL; + } + + client = ipa3_ctx->ep[pipe_idx].client; + + for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) { + memset(&clients, 0, sizeof(clients)); + ipa3_get_clients_from_rm_resource(i, &clients); + for (j = 0; j < clients.length; j++) { + if (clients.names[j] == client) { + found = true; + break; + } + } + if (found) + break; + } + + if (!found) + return -EFAULT; + + return i; +} + +/** + * ipa3_get_client_mapping() - provide client mapping + * @pipe_idx: IPA end-point number + * + * Return value: client mapping + */ +enum ipa_client_type ipa3_get_client_mapping(int pipe_idx) +{ + if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) { + IPAERR("Bad pipe index!\n"); + WARN_ON(1); + return -EINVAL; + } + + return ipa3_ctx->ep[pipe_idx].client; +} + +/** + * ipa_init_ep_flt_bitmap() - Initialize the bitmap + * that represents the End-points that supports filtering + */ +void ipa_init_ep_flt_bitmap(void) +{ + enum ipa_client_type cl; + u8 hw_idx = ipa3_get_hw_type_index(); + u32 bitmap; + u32 pipe_num; + const struct ipa_gsi_ep_config *gsi_ep_ptr; + + bitmap = 0; + if (!ipa3_ctx->ep_flt_bitmap) { + WARN_ON(1); + return; + } + + for (cl = 0; cl < IPA_CLIENT_MAX ; cl++) { + if (ipa3_ep_mapping[hw_idx][cl].support_flt) { + gsi_ep_ptr = + &ipa3_ep_mapping[hw_idx][cl].ipa_gsi_ep_info; + pipe_num = + gsi_ep_ptr->ipa_ep_num; + bitmap |= (1U << pipe_num); + if (bitmap != ipa3_ctx->ep_flt_bitmap) { + ipa3_ctx->ep_flt_bitmap = bitmap; + ipa3_ctx->ep_flt_num++; + } + } + } +} + +/** + * ipa_is_ep_support_flt() - Given an End-point check + * whether it supports filtering or not. + * + * @pipe_idx: + * + * Return values: + * true if supports and false if not + */ +bool ipa_is_ep_support_flt(int pipe_idx) +{ + if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) { + IPAERR("Bad pipe index!\n"); + return false; + } + + return ipa3_ctx->ep_flt_bitmap & (1U<= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad param, clnt_hdl = %d", clnt_hdl); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) { + IPAERR("SEQ does not apply to IPA consumer EP %d\n", clnt_hdl); + return -EINVAL; + } + + /* + * Skip Configure sequencers type for test clients. + * These are configured dynamically in ipa3_cfg_ep_mode + */ + if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) { + IPADBG("Skip sequencers configuration for test clients\n"); + return 0; + } + + if (seq_cfg->set_dynamic) + type = seq_cfg->seq_type; + else + type = ipa3_ep_mapping[ipa3_get_hw_type_index()] + [ipa3_ctx->ep[clnt_hdl].client].sequencer_type; + + if (type != IPA_DPS_HPS_SEQ_TYPE_INVALID) { + if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA && + !IPA_DPS_HPS_SEQ_TYPE_IS_DMA(type)) { + IPAERR("Configuring non-DMA SEQ type to DMA pipe\n"); + WARN_ON(1); + return -EINVAL; + } + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + /* Configure sequencers type*/ + + IPADBG("set sequencers to sequence 0x%x, ep = %d\n", type, + clnt_hdl); + ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + } else { + IPADBG("should not set sequencer type of ep = %d\n", clnt_hdl); + } + + return 0; +} + +/** + * ipa3_cfg_ep - IPA end-point configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * This includes nat, IPv6CT, header, mode, aggregation and route settings and + * is a one shot API to configure the IPA end-point fully + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg) +{ + int result = -EINVAL; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + result = ipa3_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr); + if (result) + return result; + + result = ipa3_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext); + if (result) + return result; + + result = ipa3_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr); + if (result) + return result; + + result = ipa3_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg); + if (result) + return result; + + if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) { + result = ipa3_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat); + if (result) + return result; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + result = ipa3_cfg_ep_conn_track(clnt_hdl, + &ipa_ep_cfg->conn_track); + if (result) + return result; + } + + result = ipa3_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode); + if (result) + return result; + + result = ipa3_cfg_ep_seq(clnt_hdl, &ipa_ep_cfg->seq); + if (result) + return result; + + result = ipa3_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route); + if (result) + return result; + + result = ipa3_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr); + if (result) + return result; + } else { + result = ipa3_cfg_ep_metadata_mask(clnt_hdl, + &ipa_ep_cfg->metadata_mask); + if (result) + return result; + } + + return 0; +} + +static const char *ipa3_get_nat_en_str(enum ipa_nat_en_type nat_en) +{ + switch (nat_en) { + case (IPA_BYPASS_NAT): + return "NAT disabled"; + case (IPA_SRC_NAT): + return "Source NAT"; + case (IPA_DST_NAT): + return "Dst NAT"; + } + + return "undefined"; +} + +static const char *ipa3_get_ipv6ct_en_str(enum ipa_ipv6ct_en_type ipv6ct_en) +{ + switch (ipv6ct_en) { + case (IPA_BYPASS_IPV6CT): + return "ipv6ct disabled"; + case (IPA_ENABLE_IPV6CT): + return "ipv6ct enabled"; + } + + return "undefined"; +} + +/** + * ipa3_cfg_ep_nat() - IPA end-point NAT configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_nat: [in] IPA NAT end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat) +{ + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) { + IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl); + return -EINVAL; + } + + IPADBG("pipe=%d, nat_en=%d(%s)\n", + clnt_hdl, + ep_nat->nat_en, + ipa3_get_nat_en_str(ep_nat->nat_en)); + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].cfg.nat = *ep_nat; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_NAT_n, clnt_hdl, ep_nat); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_conn_track() - IPA end-point IPv6CT configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_conn_track: [in] IPA IPv6CT end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_conn_track(u32 clnt_hdl, + const struct ipa_ep_cfg_conn_track *ep_conn_track) +{ + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_conn_track == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) { + IPAERR("IPv6CT does not apply to IPA out EP %d\n", clnt_hdl); + return -EINVAL; + } + + IPADBG("pipe=%d, conn_track_en=%d(%s)\n", + clnt_hdl, + ep_conn_track->conn_track_en, + ipa3_get_ipv6ct_en_str(ep_conn_track->conn_track_en)); + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].cfg.conn_track = *ep_conn_track; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_CONN_TRACK_n, clnt_hdl, + ep_conn_track); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + + +/** + * ipa3_cfg_ep_status() - IPA end-point status configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_status(u32 clnt_hdl, + const struct ipahal_reg_ep_cfg_status *ep_status) +{ + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d, status_en=%d status_ep=%d status_location=%d\n", + clnt_hdl, + ep_status->status_en, + ep_status->status_ep, + ep_status->status_location); + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].status = *ep_status; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_STATUS_n, clnt_hdl, ep_status); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_cfg() - IPA end-point cfg configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg) +{ + u8 qmb_master_sel; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].cfg.cfg = *cfg; + + /* Override QMB master selection */ + qmb_master_sel = ipa3_get_qmb_master_sel(ipa3_ctx->ep[clnt_hdl].client); + ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel = qmb_master_sel; + IPADBG( + "pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d gen_qmb_master_sel=%d\n", + clnt_hdl, + ipa3_ctx->ep[clnt_hdl].cfg.cfg.frag_offload_en, + ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_offload_en, + ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_metadata_hdr_offset, + ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel); + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_CFG_n, clnt_hdl, + &ipa3_ctx->ep[clnt_hdl].cfg.cfg); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask + *metadata_mask) +{ + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d, metadata_mask=0x%x\n", + clnt_hdl, + metadata_mask->metadata_mask); + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_MASK_n, + clnt_hdl, metadata_mask); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_hdr() - IPA end-point header configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr) +{ + struct ipa3_ep_context *ep; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + IPADBG("pipe=%d metadata_reg_valid=%d\n", + clnt_hdl, + ep_hdr->hdr_metadata_reg_valid); + + IPADBG("remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n", + ep_hdr->hdr_remove_additional, + ep_hdr->hdr_a5_mux, + ep_hdr->hdr_ofst_pkt_size); + + IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n", + ep_hdr->hdr_ofst_pkt_size_valid, + ep_hdr->hdr_additional_const_len); + + IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x", + ep_hdr->hdr_ofst_metadata, + ep_hdr->hdr_ofst_metadata_valid, + ep_hdr->hdr_len); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + /* copy over EP cfg */ + ep->cfg.hdr = *ep_hdr; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl, &ep->cfg.hdr); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_hdr_ext() - IPA end-point extended header configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_hdr_ext: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) +{ + struct ipa3_ep_context *ep; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d hdr_pad_to_alignment=%d\n", + clnt_hdl, + ep_hdr_ext->hdr_pad_to_alignment); + + IPADBG("hdr_total_len_or_pad_offset=%d\n", + ep_hdr_ext->hdr_total_len_or_pad_offset); + + IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n", + ep_hdr_ext->hdr_payload_len_inc_padding, + ep_hdr_ext->hdr_total_len_or_pad); + + IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n", + ep_hdr_ext->hdr_total_len_or_pad_valid, + ep_hdr_ext->hdr_little_endian); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + /* copy over EP cfg */ + ep->cfg.hdr_ext = *ep_hdr_ext; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_EXT_n, clnt_hdl, + &ep->cfg.hdr_ext); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_ctrl() - IPA end-point Control configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl) +{ + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ep_ctrl == NULL) { + IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl); + return -EINVAL; + } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 && ep_ctrl->ipa_ep_suspend) { + IPAERR("pipe suspend is not supported\n"); + WARN_ON(1); + return -EPERM; + } + + IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n", + clnt_hdl, + ep_ctrl->ipa_ep_suspend, + ep_ctrl->ipa_ep_delay); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, clnt_hdl, ep_ctrl); + + if (ep_ctrl->ipa_ep_suspend == true && + IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) + ipa3_suspend_active_aggr_wa(clnt_hdl); + + return 0; +} + +const char *ipa3_get_mode_type_str(enum ipa_mode_type mode) +{ + switch (mode) { + case (IPA_BASIC): + return "Basic"; + case (IPA_ENABLE_FRAMING_HDLC): + return "HDLC framing"; + case (IPA_ENABLE_DEFRAMING_HDLC): + return "HDLC de-framing"; + case (IPA_DMA): + return "DMA"; + } + + return "undefined"; +} + +/** + * ipa3_cfg_ep_mode() - IPA end-point mode configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode) +{ + int ep; + int type; + struct ipahal_reg_endp_init_mode init_mode; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) { + IPAERR("bad params clnt_hdl=%d , ep_valid=%d ep_mode=%pK\n", + clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid, + ep_mode); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) { + IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl); + return -EINVAL; + } + + ep = ipa3_get_ep_mapping(ep_mode->dst); + if (ep == -1 && ep_mode->mode == IPA_DMA) { + IPAERR("dst %d does not exist in DMA mode\n", ep_mode->dst); + return -EINVAL; + } + + WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst)); + + if (!IPA_CLIENT_IS_CONS(ep_mode->dst)) + ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + + IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d", + clnt_hdl, + ep_mode->mode, + ipa3_get_mode_type_str(ep_mode->mode), + ep_mode->dst); + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].cfg.mode = *ep_mode; + ipa3_ctx->ep[clnt_hdl].dst_pipe_index = ep; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + init_mode.dst_pipe_number = ipa3_ctx->ep[clnt_hdl].dst_pipe_index; + init_mode.ep_mode = *ep_mode; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_MODE_n, clnt_hdl, &init_mode); + + /* Configure sequencers type for test clients*/ + if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) { + if (ep_mode->mode == IPA_DMA) + type = IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY; + else + type = + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP; + + IPADBG(" set sequencers to sequance 0x%x, ep = %d\n", type, + clnt_hdl); + ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type); + } + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +const char *ipa3_get_aggr_enable_str(enum ipa_aggr_en_type aggr_en) +{ + switch (aggr_en) { + case (IPA_BYPASS_AGGR): + return "no aggregation"; + case (IPA_ENABLE_AGGR): + return "aggregation enabled"; + case (IPA_ENABLE_DEAGGR): + return "de-aggregation enabled"; + } + + return "undefined"; +} + +const char *ipa3_get_aggr_type_str(enum ipa_aggr_type aggr_type) +{ + switch (aggr_type) { + case (IPA_MBIM_16): + return "MBIM_16"; + case (IPA_HDLC): + return "HDLC"; + case (IPA_TLP): + return "TLP"; + case (IPA_RNDIS): + return "RNDIS"; + case (IPA_GENERIC): + return "GENERIC"; + case (IPA_QCMAP): + return "QCMAP"; + } + return "undefined"; +} + +/** + * ipa3_cfg_ep_aggr() - IPA end-point aggregation configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr) +{ + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + if (ep_aggr->aggr_en == IPA_ENABLE_DEAGGR && + !IPA_EP_SUPPORTS_DEAGGR(clnt_hdl)) { + IPAERR("pipe=%d cannot be configured to DEAGGR\n", clnt_hdl); + WARN_ON(1); + return -EINVAL; + } + + IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n", + clnt_hdl, + ep_aggr->aggr_en, + ipa3_get_aggr_enable_str(ep_aggr->aggr_en), + ep_aggr->aggr, + ipa3_get_aggr_type_str(ep_aggr->aggr), + ep_aggr->aggr_byte_limit, + ep_aggr->aggr_time_limit); + IPADBG("hard_byte_limit_en=%d aggr_sw_eof_active=%d\n", + ep_aggr->aggr_hard_byte_limit_en, + ep_aggr->aggr_sw_eof_active); + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, ep_aggr); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_route() - IPA end-point routing configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route) +{ + struct ipahal_reg_endp_init_route init_rt; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) { + IPAERR("ROUTE does not apply to IPA out EP %d\n", + clnt_hdl); + return -EINVAL; + } + + /* + * if DMA mode was configured previously for this EP, return with + * success + */ + if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) { + IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n", + clnt_hdl); + return 0; + } + + if (ep_route->rt_tbl_hdl) + IPAERR("client specified non-zero RT TBL hdl - ignore it\n"); + + IPADBG("pipe=%d, rt_tbl_hdl=%d\n", + clnt_hdl, + ep_route->rt_tbl_hdl); + + /* always use "default" routing table when programming EP ROUTE reg */ + ipa3_ctx->ep[clnt_hdl].rt_tbl_idx = + IPA_MEM_PART(v4_apps_rt_index_lo); + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + init_rt.route_table_index = ipa3_ctx->ep[clnt_hdl].rt_tbl_idx; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_ROUTE_n, + clnt_hdl, &init_rt); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + } + + return 0; +} + +/** + * ipa3_cfg_ep_holb() - IPA end-point holb configuration + * + * If an IPA producer pipe is full, IPA HW by default will block + * indefinitely till space opens up. During this time no packets + * including those from unrelated pipes will be processed. Enabling + * HOLB means IPA HW will be allowed to drop packets as/when needed + * and indefinite blocking is avoided. + * + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb) +{ + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL || + ep_holb->tmr_val > ipa3_ctx->ctrl->max_holb_tmr_val || + ep_holb->en > 1) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) { + IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl); + return -EINVAL; + } + + ipa3_ctx->ep[clnt_hdl].holb = *ep_holb; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl, + ep_holb); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, clnt_hdl, + ep_holb); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl, + ep_holb->tmr_val); + + return 0; +} + +/** + * ipa3_cfg_ep_holb_by_client() - IPA end-point holb configuration + * + * Wrapper function for ipa3_cfg_ep_holb() with client name instead of + * client handle. This function is used for clients that does not have + * client handle. + * + * @client: [in] client name + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ep_holb) +{ + return ipa3_cfg_ep_holb(ipa3_get_ep_mapping(client), ep_holb); +} + +/** + * ipa3_cfg_ep_deaggr() - IPA end-point deaggregation configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_deaggr: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_deaggr(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ep_deaggr) +{ + struct ipa3_ep_context *ep; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d deaggr_hdr_len=%d\n", + clnt_hdl, + ep_deaggr->deaggr_hdr_len); + + IPADBG("packet_offset_valid=%d\n", + ep_deaggr->packet_offset_valid); + + IPADBG("packet_offset_location=%d max_packet_len=%d\n", + ep_deaggr->packet_offset_location, + ep_deaggr->max_packet_len); + + ep = &ipa3_ctx->ep[clnt_hdl]; + + /* copy over EP cfg */ + ep->cfg.deaggr = *ep_deaggr; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ipahal_write_reg_n_fields(IPA_ENDP_INIT_DEAGGR_n, clnt_hdl, + &ep->cfg.deaggr); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa3_cfg_ep_metadata() - IPA end-point metadata configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md) +{ + u32 qmap_id = 0; + struct ipa_ep_cfg_metadata ep_md_reg_wrt; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id); + + /* copy over EP cfg */ + ipa3_ctx->ep[clnt_hdl].cfg.meta = *ep_md; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + ep_md_reg_wrt = *ep_md; + qmap_id = (ep_md->qmap_id << + IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT) & + IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK; + + ep_md_reg_wrt.qmap_id = qmap_id; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_n, clnt_hdl, + &ep_md_reg_wrt); + ipa3_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1; + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl, + &ipa3_ctx->ep[clnt_hdl].cfg.hdr); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return 0; +} + +int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in) +{ + struct ipa_ep_cfg_metadata meta; + struct ipa3_ep_context *ep; + int ipa_ep_idx; + int result = -EINVAL; + + if (param_in->client >= IPA_CLIENT_MAX) { + IPAERR_RL("bad parm client:%d\n", param_in->client); + goto fail; + } + + ipa_ep_idx = ipa3_get_ep_mapping(param_in->client); + if (ipa_ep_idx == -1) { + IPAERR_RL("Invalid client.\n"); + goto fail; + } + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + if (!ep->valid) { + IPAERR_RL("EP not allocated.\n"); + goto fail; + } + + meta.qmap_id = param_in->qmap_id; + if (param_in->client == IPA_CLIENT_USB_PROD || + param_in->client == IPA_CLIENT_HSIC1_PROD || + param_in->client == IPA_CLIENT_ODU_PROD || + param_in->client == IPA_CLIENT_ETHERNET_PROD) { + result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta); + } else if (param_in->client == IPA_CLIENT_WLAN1_PROD) { + ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta; + result = ipa3_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id); + if (result) + IPAERR_RL("qmap_id %d write failed on ep=%d\n", + meta.qmap_id, ipa_ep_idx); + result = 0; + } + +fail: + return result; +} + +/** + * ipa3_dump_buff_internal() - dumps buffer for debug purposes + * @base: buffer base address + * @phy_base: buffer physical base address + * @size: size of the buffer + */ +void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size) +{ + int i; + u32 *cur = (u32 *)base; + u8 *byt; + + IPADBG("system phys addr=%pa len=%u\n", &phy_base, size); + for (i = 0; i < size / 4; i++) { + byt = (u8 *)(cur + i); + IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i), + byt[0], byt[1], byt[2], byt[3]); + } + IPADBG("END\n"); +} + +/** + * ipa3_set_aggr_mode() - Set the aggregation mode which is a global setting + * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM, + * etc + * + * Returns: 0 on success + */ +int ipa3_set_aggr_mode(enum ipa_aggr_mode mode) +{ + struct ipahal_reg_qcncm qcncm; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + if (mode != IPA_MBIM_AGGR) { + IPAERR("Only MBIM mode is supported staring 4.0\n"); + return -EPERM; + } + } else { + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipahal_read_reg_fields(IPA_QCNCM, &qcncm); + qcncm.mode_en = mode; + ipahal_write_reg_fields(IPA_QCNCM, &qcncm); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + } + + return 0; +} + +/** + * ipa3_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation + * mode + * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be + * "QND") + * + * Set the NDP signature used for QCNCM aggregation mode. The fourth byte + * (expected to be 'P') needs to be set using the header addition mechanism + * + * Returns: 0 on success, negative on failure + */ +int ipa3_set_qcncm_ndp_sig(char sig[3]) +{ + struct ipahal_reg_qcncm qcncm; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + IPAERR("QCNCM mode is not supported staring 4.0\n"); + return -EPERM; + } + + if (sig == NULL) { + IPAERR("bad argument\n"); + return -EINVAL; + } + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipahal_read_reg_fields(IPA_QCNCM, &qcncm); + qcncm.mode_val = ((sig[0] << 16) | (sig[1] << 8) | sig[2]); + ipahal_write_reg_fields(IPA_QCNCM, &qcncm); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa3_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame + * configuration + * @enable: [in] true for single NDP/MBIM; false otherwise + * + * Returns: 0 on success + */ +int ipa3_set_single_ndp_per_mbim(bool enable) +{ + struct ipahal_reg_single_ndp_mode mode; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + IPAERR("QCNCM mode is not supported staring 4.0\n"); + return -EPERM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipahal_read_reg_fields(IPA_SINGLE_NDP_MODE, &mode); + mode.single_ndp_en = enable; + ipahal_write_reg_fields(IPA_SINGLE_NDP_MODE, &mode); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa3_straddle_boundary() - Checks whether a memory buffer straddles a + * boundary + * @start: start address of the memory buffer + * @end: end address of the memory buffer + * @boundary: boundary + * + * Return value: + * 1: if the interval [start, end] straddles boundary + * 0: otherwise + */ +int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary) +{ + u32 next_start; + u32 prev_end; + + IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary); + + next_start = (start + (boundary - 1)) & ~(boundary - 1); + prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary; + + while (next_start < prev_end) + next_start += boundary; + + if (next_start == prev_end) + return 1; + else + return 0; +} + +/** + * ipa3_init_mem_partition() - Reads IPA memory map from DTS, performs alignment + * checks and logs the fetched values. + * + * Returns: 0 on success + */ +int ipa3_init_mem_partition(struct device_node *node) +{ + const size_t ram_mmap_current_version_size = + sizeof(ipa3_ctx->ctrl->mem_partition) / sizeof(u32); + int result; + + memset(&ipa3_ctx->ctrl->mem_partition, 0, + sizeof(ipa3_ctx->ctrl->mem_partition)); + + IPADBG("Reading from DTS as u32 array\n"); + + /* + * The size of ipa-ram-mmap array depends on the IPA version. The + * actual size can't be assumed because of possible DTS versions + * mismatch. The size of the array monotonically increasing because the + * obsolete entries are set to zero rather than deleted, so the + * possible sizes are in range + * [1, ram_mmap_current_version_size] + */ + result = of_property_read_variable_u32_array(node, "qcom,ipa-ram-mmap", + (u32 *)&ipa3_ctx->ctrl->mem_partition, + 1, ram_mmap_current_version_size); + + if (IPA_MEM_PART(uc_event_ring_ofst) & 1023) { + IPAERR("UC EVENT RING OFST 0x%x is unaligned\n", + IPA_MEM_PART(uc_event_ring_ofst)); + return -ENODEV; + } + + IPADBG("UC EVENT RING OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(uc_event_ring_ofst), + IPA_MEM_PART(uc_event_ring_size)); + + IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst), + IPA_MEM_PART(nat_size)); + + if (IPA_MEM_PART(uc_info_ofst) & 3) { + IPAERR("UC INFO OFST 0x%x is unaligned\n", + IPA_MEM_PART(uc_info_ofst)); + return -ENODEV; + } + + IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(uc_info_ofst), IPA_MEM_PART(uc_info_size)); + + IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start)); + + if (IPA_MEM_PART(v4_flt_hash_ofst) & 7) { + IPAERR("V4 FLT HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v4_flt_hash_ofst)); + return -ENODEV; + } + + IPADBG("V4 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v4_flt_hash_ofst), + IPA_MEM_PART(v4_flt_hash_size), + IPA_MEM_PART(v4_flt_hash_size_ddr)); + + if (IPA_MEM_PART(v4_flt_nhash_ofst) & 7) { + IPAERR("V4 FLT NON-HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v4_flt_nhash_ofst)); + return -ENODEV; + } + + IPADBG("V4 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v4_flt_nhash_ofst), + IPA_MEM_PART(v4_flt_nhash_size), + IPA_MEM_PART(v4_flt_nhash_size_ddr)); + + if (IPA_MEM_PART(v6_flt_hash_ofst) & 7) { + IPAERR("V6 FLT HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v6_flt_hash_ofst)); + return -ENODEV; + } + + IPADBG("V6 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v6_flt_hash_ofst), IPA_MEM_PART(v6_flt_hash_size), + IPA_MEM_PART(v6_flt_hash_size_ddr)); + + if (IPA_MEM_PART(v6_flt_nhash_ofst) & 7) { + IPAERR("V6 FLT NON-HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v6_flt_nhash_ofst)); + return -ENODEV; + } + + IPADBG("V6 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v6_flt_nhash_ofst), + IPA_MEM_PART(v6_flt_nhash_size), + IPA_MEM_PART(v6_flt_nhash_size_ddr)); + + IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_rt_num_index)); + + IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_modem_rt_index_lo), + IPA_MEM_PART(v4_modem_rt_index_hi)); + + IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_apps_rt_index_lo), + IPA_MEM_PART(v4_apps_rt_index_hi)); + + if (IPA_MEM_PART(v4_rt_hash_ofst) & 7) { + IPAERR("V4 RT HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v4_rt_hash_ofst)); + return -ENODEV; + } + + IPADBG("V4 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v4_rt_hash_ofst)); + + IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v4_rt_hash_size), + IPA_MEM_PART(v4_rt_hash_size_ddr)); + + if (IPA_MEM_PART(v4_rt_nhash_ofst) & 7) { + IPAERR("V4 RT NON-HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v4_rt_nhash_ofst)); + return -ENODEV; + } + + IPADBG("V4 RT NON-HASHABLE OFST 0x%x\n", + IPA_MEM_PART(v4_rt_nhash_ofst)); + + IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v4_rt_nhash_size), + IPA_MEM_PART(v4_rt_nhash_size_ddr)); + + IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_rt_num_index)); + + IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_modem_rt_index_lo), + IPA_MEM_PART(v6_modem_rt_index_hi)); + + IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_apps_rt_index_lo), + IPA_MEM_PART(v6_apps_rt_index_hi)); + + if (IPA_MEM_PART(v6_rt_hash_ofst) & 7) { + IPAERR("V6 RT HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v6_rt_hash_ofst)); + return -ENODEV; + } + + IPADBG("V6 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v6_rt_hash_ofst)); + + IPADBG("V6 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v6_rt_hash_size), + IPA_MEM_PART(v6_rt_hash_size_ddr)); + + if (IPA_MEM_PART(v6_rt_nhash_ofst) & 7) { + IPAERR("V6 RT NON-HASHABLE OFST 0x%x is unaligned\n", + IPA_MEM_PART(v6_rt_nhash_ofst)); + return -ENODEV; + } + + IPADBG("V6 RT NON-HASHABLE OFST 0x%x\n", + IPA_MEM_PART(v6_rt_nhash_ofst)); + + IPADBG("V6 RT NON-HASHABLE SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v6_rt_nhash_size), + IPA_MEM_PART(v6_rt_nhash_size_ddr)); + + if (IPA_MEM_PART(modem_hdr_ofst) & 7) { + IPAERR("MODEM HDR OFST 0x%x is unaligned\n", + IPA_MEM_PART(modem_hdr_ofst)); + return -ENODEV; + } + + IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size)); + + if (IPA_MEM_PART(apps_hdr_ofst) & 7) { + IPAERR("APPS HDR OFST 0x%x is unaligned\n", + IPA_MEM_PART(apps_hdr_ofst)); + return -ENODEV; + } + + IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size), + IPA_MEM_PART(apps_hdr_size_ddr)); + + if (IPA_MEM_PART(modem_hdr_proc_ctx_ofst) & 7) { + IPAERR("MODEM HDR PROC CTX OFST 0x%x is unaligned\n", + IPA_MEM_PART(modem_hdr_proc_ctx_ofst)); + return -ENODEV; + } + + IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(modem_hdr_proc_ctx_ofst), + IPA_MEM_PART(modem_hdr_proc_ctx_size)); + + if (IPA_MEM_PART(apps_hdr_proc_ctx_ofst) & 7) { + IPAERR("APPS HDR PROC CTX OFST 0x%x is unaligned\n", + IPA_MEM_PART(apps_hdr_proc_ctx_ofst)); + return -ENODEV; + } + + IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(apps_hdr_proc_ctx_ofst), + IPA_MEM_PART(apps_hdr_proc_ctx_size), + IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr)); + + if (IPA_MEM_PART(pdn_config_ofst) & 7) { + IPAERR("PDN CONFIG OFST 0x%x is unaligned\n", + IPA_MEM_PART(pdn_config_ofst)); + return -ENODEV; + } + + IPADBG("PDN CONFIG OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(pdn_config_ofst), + IPA_MEM_PART(pdn_config_size)); + + if (IPA_MEM_PART(modem_ofst) & 7) { + IPAERR("MODEM OFST 0x%x is unaligned\n", + IPA_MEM_PART(modem_ofst)); + return -ENODEV; + } + + IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst), + IPA_MEM_PART(modem_size)); + + IPADBG("V4 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v4_flt_hash_ofst), + IPA_MEM_PART(apps_v4_flt_hash_size)); + + IPADBG("V4 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v4_flt_nhash_ofst), + IPA_MEM_PART(apps_v4_flt_nhash_size)); + + IPADBG("V6 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v6_flt_hash_ofst), + IPA_MEM_PART(apps_v6_flt_hash_size)); + + IPADBG("V6 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v6_flt_nhash_ofst), + IPA_MEM_PART(apps_v6_flt_nhash_size)); + + IPADBG("RAM END OFST 0x%x\n", + IPA_MEM_PART(end_ofst)); + + IPADBG("V4 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v4_rt_hash_ofst), + IPA_MEM_PART(apps_v4_rt_hash_size)); + + IPADBG("V4 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v4_rt_nhash_ofst), + IPA_MEM_PART(apps_v4_rt_nhash_size)); + + IPADBG("V6 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v6_rt_hash_ofst), + IPA_MEM_PART(apps_v6_rt_hash_size)); + + IPADBG("V6 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v6_rt_nhash_ofst), + IPA_MEM_PART(apps_v6_rt_nhash_size)); + + return 0; +} + +/** + * ipa_ctrl_static_bind() - set the appropriate methods for + * IPA Driver based on the HW version + * + * @ctrl: data structure which holds the function pointers + * @hw_type: the HW type in use + * + * This function can avoid the runtime assignment by using C99 special + * struct initialization - hard decision... time.vs.mem + */ +int ipa3_controller_static_bind(struct ipa3_controller *ctrl, + enum ipa_hw_type hw_type) +{ + if (hw_type >= IPA_HW_v4_0) { + ctrl->ipa_clk_rate_turbo = IPA_V4_0_CLK_RATE_TURBO; + ctrl->ipa_clk_rate_nominal = IPA_V4_0_CLK_RATE_NOMINAL; + ctrl->ipa_clk_rate_svs = IPA_V4_0_CLK_RATE_SVS; + } else if (hw_type >= IPA_HW_v3_5) { + ctrl->ipa_clk_rate_turbo = IPA_V3_5_CLK_RATE_TURBO; + ctrl->ipa_clk_rate_nominal = IPA_V3_5_CLK_RATE_NOMINAL; + ctrl->ipa_clk_rate_svs = IPA_V3_5_CLK_RATE_SVS; + } else { + ctrl->ipa_clk_rate_turbo = IPA_V3_0_CLK_RATE_TURBO; + ctrl->ipa_clk_rate_nominal = IPA_V3_0_CLK_RATE_NOMINAL; + ctrl->ipa_clk_rate_svs = IPA_V3_0_CLK_RATE_SVS; + } + + ctrl->ipa_init_rt4 = _ipa_init_rt4_v3; + ctrl->ipa_init_rt6 = _ipa_init_rt6_v3; + ctrl->ipa_init_flt4 = _ipa_init_flt4_v3; + ctrl->ipa_init_flt6 = _ipa_init_flt6_v3; + ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v3_0; + ctrl->ipa3_commit_flt = __ipa_commit_flt_v3; + ctrl->ipa3_commit_rt = __ipa_commit_rt_v3; + ctrl->ipa3_commit_hdr = __ipa_commit_hdr_v3_0; + ctrl->ipa3_enable_clks = _ipa_enable_clks_v3_0; + ctrl->ipa3_disable_clks = _ipa_disable_clks_v3_0; + ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v3_0; + ctrl->clock_scaling_bw_threshold_nominal = + IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS; + ctrl->clock_scaling_bw_threshold_turbo = + IPA_V3_0_BW_THRESHOLD_TURBO_MBPS; + ctrl->ipa_reg_base_ofst = ipahal_get_reg_base(); + ctrl->ipa_init_sram = _ipa_init_sram_v3; + ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0; + ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0; + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v4_0; + + return 0; +} + +void ipa3_skb_recycle(struct sk_buff *skb) +{ + struct skb_shared_info *shinfo; + + shinfo = skb_shinfo(skb); + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); + atomic_set(&shinfo->dataref, 1); + + memset(skb, 0, offsetof(struct sk_buff, tail)); + skb->data = skb->head + NET_SKB_PAD; + skb_reset_tail_pointer(skb); +} + +int ipa3_alloc_rule_id(struct idr *rule_ids) +{ + /* There is two groups of rule-Ids, Modem ones and Apps ones. + * Distinction by high bit: Modem Ids are high bit asserted. + */ + return idr_alloc(rule_ids, NULL, + ipahal_get_low_rule_id(), ipahal_get_rule_id_hi_bit(), + GFP_KERNEL); +} + +int ipa3_id_alloc(void *ptr) +{ + int id; + + idr_preload(GFP_KERNEL); + spin_lock(&ipa3_ctx->idr_lock); + id = idr_alloc(&ipa3_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT); + spin_unlock(&ipa3_ctx->idr_lock); + idr_preload_end(); + + return id; +} + +void *ipa3_id_find(u32 id) +{ + void *ptr; + + spin_lock(&ipa3_ctx->idr_lock); + ptr = idr_find(&ipa3_ctx->ipa_idr, id); + spin_unlock(&ipa3_ctx->idr_lock); + + return ptr; +} + +void ipa3_id_remove(u32 id) +{ + spin_lock(&ipa3_ctx->idr_lock); + idr_remove(&ipa3_ctx->ipa_idr, id); + spin_unlock(&ipa3_ctx->idr_lock); +} + +void ipa3_tag_destroy_imm(void *user1, int user2) +{ + ipahal_destroy_imm_cmd(user1); +} + +static void ipa3_tag_free_skb(void *user1, int user2) +{ + dev_kfree_skb_any((struct sk_buff *)user1); +} + +#define REQUIRED_TAG_PROCESS_DESCRIPTORS 4 + +/* ipa3_tag_process() - Initiates a tag process. Incorporates the input + * descriptors + * + * @desc: descriptors with commands for IC + * @desc_size: amount of descriptors in the above variable + * + * Note: The descriptors are copied (if there's room), the client needs to + * free his descriptors afterwards + * + * Return: 0 or negative in case of failure + */ +int ipa3_tag_process(struct ipa3_desc desc[], + int descs_num, + unsigned long timeout) +{ + struct ipa3_sys_context *sys; + struct ipa3_desc *tag_desc; + int desc_idx = 0; + struct ipahal_imm_cmd_ip_packet_init pktinit_cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld = NULL; + struct ipahal_imm_cmd_ip_packet_tag_status status; + int i; + struct sk_buff *dummy_skb; + int res; + struct ipa3_tag_completion *comp; + int ep_idx; + + /* Not enough room for the required descriptors for the tag process */ + if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) { + IPAERR("up to %d descriptors are allowed (received %d)\n", + IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS, + descs_num); + return -ENOMEM; + } + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD); + if (-1 == ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_CMD_PROD); + return -EFAULT; + } + sys = ipa3_ctx->ep[ep_idx].sys; + + tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL); + if (!tag_desc) { + IPAERR("failed to allocate memory\n"); + return -ENOMEM; + } + + /* Copy the required descriptors from the client now */ + if (desc) { + memcpy(&(tag_desc[0]), desc, descs_num * + sizeof(tag_desc[0])); + desc_idx += descs_num; + } + + /* NO-OP IC for ensuring that IPA pipeline is empty */ + cmd_pyld = ipahal_construct_nop_imm_cmd( + false, IPAHAL_FULL_PIPELINE_CLEAR, false); + if (!cmd_pyld) { + IPAERR("failed to construct NOP imm cmd\n"); + res = -ENOMEM; + goto fail_free_tag_desc; + } + tag_desc[desc_idx].opcode = cmd_pyld->opcode; + tag_desc[desc_idx].pyld = cmd_pyld->data; + tag_desc[desc_idx].len = cmd_pyld->len; + tag_desc[desc_idx].type = IPA_IMM_CMD_DESC; + tag_desc[desc_idx].callback = ipa3_tag_destroy_imm; + tag_desc[desc_idx].user1 = cmd_pyld; + desc_idx++; + + /* IP_PACKET_INIT IC for tag status to be sent to apps */ + pktinit_cmd.destination_pipe_index = + ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_PACKET_INIT, &pktinit_cmd, false); + if (!cmd_pyld) { + IPAERR("failed to construct ip_packet_init imm cmd\n"); + res = -ENOMEM; + goto fail_free_desc; + } + tag_desc[desc_idx].opcode = cmd_pyld->opcode; + tag_desc[desc_idx].pyld = cmd_pyld->data; + tag_desc[desc_idx].len = cmd_pyld->len; + tag_desc[desc_idx].type = IPA_IMM_CMD_DESC; + tag_desc[desc_idx].callback = ipa3_tag_destroy_imm; + tag_desc[desc_idx].user1 = cmd_pyld; + desc_idx++; + + /* status IC */ + status.tag = IPA_COOKIE; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &status, false); + if (!cmd_pyld) { + IPAERR("failed to construct ip_packet_tag_status imm cmd\n"); + res = -ENOMEM; + goto fail_free_desc; + } + tag_desc[desc_idx].opcode = cmd_pyld->opcode; + tag_desc[desc_idx].pyld = cmd_pyld->data; + tag_desc[desc_idx].len = cmd_pyld->len; + tag_desc[desc_idx].type = IPA_IMM_CMD_DESC; + tag_desc[desc_idx].callback = ipa3_tag_destroy_imm; + tag_desc[desc_idx].user1 = cmd_pyld; + desc_idx++; + + comp = kzalloc(sizeof(*comp), GFP_KERNEL); + if (!comp) { + IPAERR("no mem\n"); + res = -ENOMEM; + goto fail_free_desc; + } + init_completion(&comp->comp); + + /* completion needs to be released from both here and rx handler */ + atomic_set(&comp->cnt, 2); + + /* dummy packet to send to IPA. packet payload is a completion object */ + dummy_skb = alloc_skb(sizeof(comp), GFP_KERNEL); + if (!dummy_skb) { + IPAERR("failed to allocate memory\n"); + res = -ENOMEM; + goto fail_free_comp; + } + + memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp)); + + tag_desc[desc_idx].pyld = dummy_skb->data; + tag_desc[desc_idx].len = dummy_skb->len; + tag_desc[desc_idx].type = IPA_DATA_DESC_SKB; + tag_desc[desc_idx].callback = ipa3_tag_free_skb; + tag_desc[desc_idx].user1 = dummy_skb; + desc_idx++; + + /* send all descriptors to IPA with single EOT */ + res = ipa3_send(sys, desc_idx, tag_desc, true); + if (res) { + IPAERR("failed to send TAG packets %d\n", res); + res = -ENOMEM; + goto fail_free_comp; + } + kfree(tag_desc); + tag_desc = NULL; + + IPADBG("waiting for TAG response\n"); + res = wait_for_completion_timeout(&comp->comp, timeout); + if (res == 0) { + IPAERR("timeout (%lu msec) on waiting for TAG response\n", + timeout); + WARN_ON(1); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + return -ETIME; + } + + IPADBG("TAG response arrived!\n"); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + + /* + * sleep for short period to ensure IPA wrote all packets to + * the transport + */ + usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC); + + return 0; + +fail_free_comp: + kfree(comp); +fail_free_desc: + /* + * Free only the first descriptors allocated here. + * [nop, pkt_init, status, dummy_skb] + * The user is responsible to free his allocations + * in case of failure. + * The min is required because we may fail during + * of the initial allocations above + */ + for (i = descs_num; + i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS, desc_idx); i++) + if (tag_desc[i].callback) + tag_desc[i].callback(tag_desc[i].user1, + tag_desc[i].user2); +fail_free_tag_desc: + kfree(tag_desc); + return res; +} + +/** + * ipa3_tag_generate_force_close_desc() - generate descriptors for force close + * immediate command + * + * @desc: descriptors for IC + * @desc_size: desc array size + * @start_pipe: first pipe to close aggregation + * @end_pipe: last (non-inclusive) pipe to close aggregation + * + * Return: number of descriptors written or negative in case of failure + */ +static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[], + int desc_size, int start_pipe, int end_pipe) +{ + int i; + struct ipa_ep_cfg_aggr ep_aggr; + int desc_idx = 0; + int res; + struct ipahal_imm_cmd_register_write reg_write_agg_close; + struct ipahal_imm_cmd_pyld *cmd_pyld; + struct ipahal_reg_valmask valmask; + + for (i = start_pipe; i < end_pipe; i++) { + ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, i, &ep_aggr); + if (!ep_aggr.aggr_en) + continue; + IPADBG("Force close ep: %d\n", i); + if (desc_idx + 1 > desc_size) { + IPAERR("Internal error - no descriptors\n"); + res = -EFAULT; + goto fail_no_desc; + } + + reg_write_agg_close.skip_pipeline_clear = false; + reg_write_agg_close.pipeline_clear_options = + IPAHAL_FULL_PIPELINE_CLEAR; + reg_write_agg_close.offset = + ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE); + ipahal_get_aggr_force_close_valmask(i, &valmask); + reg_write_agg_close.value = valmask.val; + reg_write_agg_close.value_mask = valmask.mask; + cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + ®_write_agg_close, false); + if (!cmd_pyld) { + IPAERR("failed to construct register_write imm cmd\n"); + res = -ENOMEM; + goto fail_alloc_reg_write_agg_close; + } + + desc[desc_idx].opcode = cmd_pyld->opcode; + desc[desc_idx].pyld = cmd_pyld->data; + desc[desc_idx].len = cmd_pyld->len; + desc[desc_idx].type = IPA_IMM_CMD_DESC; + desc[desc_idx].callback = ipa3_tag_destroy_imm; + desc[desc_idx].user1 = cmd_pyld; + desc_idx++; + } + + return desc_idx; + +fail_alloc_reg_write_agg_close: + for (i = 0; i < desc_idx; i++) + if (desc[desc_idx].callback) + desc[desc_idx].callback(desc[desc_idx].user1, + desc[desc_idx].user2); +fail_no_desc: + return res; +} + +/** + * ipa3_tag_aggr_force_close() - Force close aggregation + * + * @pipe_num: pipe number or -1 for all pipes + */ +int ipa3_tag_aggr_force_close(int pipe_num) +{ + struct ipa3_desc *desc; + int res = -1; + int start_pipe; + int end_pipe; + int num_descs; + int num_aggr_descs; + + if (pipe_num < -1 || pipe_num >= (int)ipa3_ctx->ipa_num_pipes) { + IPAERR("Invalid pipe number %d\n", pipe_num); + return -EINVAL; + } + + if (pipe_num == -1) { + start_pipe = 0; + end_pipe = ipa3_ctx->ipa_num_pipes; + } else { + start_pipe = pipe_num; + end_pipe = pipe_num + 1; + } + + num_descs = end_pipe - start_pipe; + + desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL); + if (!desc) { + IPAERR("no mem\n"); + return -ENOMEM; + } + + /* Force close aggregation on all valid pipes with aggregation */ + num_aggr_descs = ipa3_tag_generate_force_close_desc(desc, num_descs, + start_pipe, end_pipe); + if (num_aggr_descs < 0) { + IPAERR("ipa3_tag_generate_force_close_desc failed %d\n", + num_aggr_descs); + goto fail_free_desc; + } + + res = ipa3_tag_process(desc, num_aggr_descs, + IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT); + +fail_free_desc: + kfree(desc); + + return res; +} + +/** + * ipa3_is_ready() - check if IPA module was initialized + * successfully + * + * Return value: true for yes; false for no + */ +bool ipa3_is_ready(void) +{ + bool complete; + + if (ipa3_ctx == NULL) + return false; + mutex_lock(&ipa3_ctx->lock); + complete = ipa3_ctx->ipa_initialization_complete; + mutex_unlock(&ipa3_ctx->lock); + return complete; +} + +/** + * ipa3_is_client_handle_valid() - check if IPA client handle is valid handle + * + * Return value: true for yes; false for no + */ +bool ipa3_is_client_handle_valid(u32 clnt_hdl) +{ + if (clnt_hdl >= 0 && clnt_hdl < ipa3_ctx->ipa_num_pipes) + return true; + return false; +} + +/** + * ipa3_proxy_clk_unvote() - called to remove IPA clock proxy vote + * + * Return value: none + */ +void ipa3_proxy_clk_unvote(void) +{ + if (ipa3_is_ready() && ipa3_ctx->q6_proxy_clk_vote_valid) { + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE"); + ipa3_ctx->q6_proxy_clk_vote_valid = false; + } +} + +/** + * ipa3_proxy_clk_vote() - called to add IPA clock proxy vote + * + * Return value: none + */ +void ipa3_proxy_clk_vote(void) +{ + if (ipa3_is_ready() && !ipa3_ctx->q6_proxy_clk_vote_valid) { + IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE"); + ipa3_ctx->q6_proxy_clk_vote_valid = true; + } +} + +/** + * ipa3_get_smem_restr_bytes()- Return IPA smem restricted bytes + * + * Return value: u16 - number of IPA smem restricted bytes + */ +u16 ipa3_get_smem_restr_bytes(void) +{ + if (ipa3_ctx) + return ipa3_ctx->smem_restricted_bytes; + + IPAERR("IPA Driver not initialized\n"); + + return 0; +} + +/** + * ipa3_get_modem_cfg_emb_pipe_flt()- Return ipa3_ctx->modem_cfg_emb_pipe_flt + * + * Return value: true if modem configures embedded pipe flt, false otherwise + */ +bool ipa3_get_modem_cfg_emb_pipe_flt(void) +{ + if (ipa3_ctx) + return ipa3_ctx->modem_cfg_emb_pipe_flt; + + IPAERR("IPA driver has not been initialized\n"); + + return false; +} + +/** + * ipa3_get_transport_type() + * + * Return value: enum ipa_transport_type + */ +enum ipa_transport_type ipa3_get_transport_type(void) +{ + return IPA_TRANSPORT_TYPE_GSI; +} + +u32 ipa3_get_num_pipes(void) +{ + return ipahal_read_reg(IPA_ENABLED_PIPES); +} + +/** + * ipa3_disable_apps_wan_cons_deaggr()- + * set ipa_ctx->ipa_client_apps_wan_cons_agg_gro + * + * Return value: 0 or negative in case of failure + */ +int ipa3_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count) +{ + int res = -1; + u32 limit; + + /* checking if IPA-HW can support */ + limit = ipahal_aggr_get_max_byte_limit(); + if ((agg_size >> 10) > limit) { + IPAERR("IPA-AGG byte limit %d\n", limit); + IPAERR("exceed aggr_byte_limit\n"); + return res; + } + limit = ipahal_aggr_get_max_pkt_limit(); + if (agg_count > limit) { + IPAERR("IPA-AGG pkt limit %d\n", limit); + IPAERR("exceed aggr_pkt_limit\n"); + return res; + } + + if (ipa3_ctx) { + ipa3_ctx->ipa_client_apps_wan_cons_agg_gro = true; + return 0; + } + return res; +} + +static void *ipa3_get_ipc_logbuf(void) +{ + if (ipa3_ctx) + return ipa3_ctx->logbuf; + + return NULL; +} + +static void *ipa3_get_ipc_logbuf_low(void) +{ + if (ipa3_ctx) + return ipa3_ctx->logbuf_low; + + return NULL; +} + +static void ipa3_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb) +{ + *holb = ipa3_ctx->ep[ep_idx].holb; +} + +static void ipa3_set_tag_process_before_gating(bool val) +{ + ipa3_ctx->tag_process_before_gating = val; +} + +int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type, + struct ipa_api_controller *api_ctrl) +{ + if (ipa_hw_type < IPA_HW_v3_0) { + IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type); + WARN_ON(1); + return -EPERM; + } + + api_ctrl->ipa_reset_endpoint = NULL; + api_ctrl->ipa_clear_endpoint_delay = ipa3_clear_endpoint_delay; + api_ctrl->ipa_disable_endpoint = NULL; + api_ctrl->ipa_cfg_ep = ipa3_cfg_ep; + api_ctrl->ipa_cfg_ep_nat = ipa3_cfg_ep_nat; + api_ctrl->ipa_cfg_ep_conn_track = ipa3_cfg_ep_conn_track; + api_ctrl->ipa_cfg_ep_hdr = ipa3_cfg_ep_hdr; + api_ctrl->ipa_cfg_ep_hdr_ext = ipa3_cfg_ep_hdr_ext; + api_ctrl->ipa_cfg_ep_mode = ipa3_cfg_ep_mode; + api_ctrl->ipa_cfg_ep_aggr = ipa3_cfg_ep_aggr; + api_ctrl->ipa_cfg_ep_deaggr = ipa3_cfg_ep_deaggr; + api_ctrl->ipa_cfg_ep_route = ipa3_cfg_ep_route; + api_ctrl->ipa_cfg_ep_holb = ipa3_cfg_ep_holb; + api_ctrl->ipa_get_holb = ipa3_get_holb; + api_ctrl->ipa_set_tag_process_before_gating = + ipa3_set_tag_process_before_gating; + api_ctrl->ipa_cfg_ep_cfg = ipa3_cfg_ep_cfg; + api_ctrl->ipa_cfg_ep_metadata_mask = ipa3_cfg_ep_metadata_mask; + api_ctrl->ipa_cfg_ep_holb_by_client = ipa3_cfg_ep_holb_by_client; + api_ctrl->ipa_cfg_ep_ctrl = ipa3_cfg_ep_ctrl; + api_ctrl->ipa_add_hdr = ipa3_add_hdr; + api_ctrl->ipa_del_hdr = ipa3_del_hdr; + api_ctrl->ipa_commit_hdr = ipa3_commit_hdr; + api_ctrl->ipa_reset_hdr = ipa3_reset_hdr; + api_ctrl->ipa_get_hdr = ipa3_get_hdr; + api_ctrl->ipa_put_hdr = ipa3_put_hdr; + api_ctrl->ipa_copy_hdr = ipa3_copy_hdr; + api_ctrl->ipa_add_hdr_proc_ctx = ipa3_add_hdr_proc_ctx; + api_ctrl->ipa_del_hdr_proc_ctx = ipa3_del_hdr_proc_ctx; + api_ctrl->ipa_add_rt_rule = ipa3_add_rt_rule; + api_ctrl->ipa_del_rt_rule = ipa3_del_rt_rule; + api_ctrl->ipa_commit_rt = ipa3_commit_rt; + api_ctrl->ipa_reset_rt = ipa3_reset_rt; + api_ctrl->ipa_get_rt_tbl = ipa3_get_rt_tbl; + api_ctrl->ipa_put_rt_tbl = ipa3_put_rt_tbl; + api_ctrl->ipa_query_rt_index = ipa3_query_rt_index; + api_ctrl->ipa_mdfy_rt_rule = ipa3_mdfy_rt_rule; + api_ctrl->ipa_add_flt_rule = ipa3_add_flt_rule; + api_ctrl->ipa_del_flt_rule = ipa3_del_flt_rule; + api_ctrl->ipa_mdfy_flt_rule = ipa3_mdfy_flt_rule; + api_ctrl->ipa_commit_flt = ipa3_commit_flt; + api_ctrl->ipa_reset_flt = ipa3_reset_flt; + api_ctrl->allocate_nat_device = ipa3_allocate_nat_device; + api_ctrl->ipa_nat_init_cmd = ipa3_nat_init_cmd; + api_ctrl->ipa_nat_dma_cmd = ipa3_nat_dma_cmd; + api_ctrl->ipa_nat_del_cmd = ipa3_nat_del_cmd; + api_ctrl->ipa_send_msg = ipa3_send_msg; + api_ctrl->ipa_register_pull_msg = ipa3_register_pull_msg; + api_ctrl->ipa_deregister_pull_msg = ipa3_deregister_pull_msg; + api_ctrl->ipa_register_intf = ipa3_register_intf; + api_ctrl->ipa_register_intf_ext = ipa3_register_intf_ext; + api_ctrl->ipa_deregister_intf = ipa3_deregister_intf; + api_ctrl->ipa_set_aggr_mode = ipa3_set_aggr_mode; + api_ctrl->ipa_set_qcncm_ndp_sig = ipa3_set_qcncm_ndp_sig; + api_ctrl->ipa_set_single_ndp_per_mbim = ipa3_set_single_ndp_per_mbim; + api_ctrl->ipa_tx_dp = ipa3_tx_dp; + api_ctrl->ipa_tx_dp_mul = ipa3_tx_dp_mul; + api_ctrl->ipa_free_skb = ipa3_free_skb; + api_ctrl->ipa_setup_sys_pipe = ipa3_setup_sys_pipe; + api_ctrl->ipa_teardown_sys_pipe = ipa3_teardown_sys_pipe; + api_ctrl->ipa_sys_setup = ipa3_sys_setup; + api_ctrl->ipa_sys_teardown = ipa3_sys_teardown; + api_ctrl->ipa_sys_update_gsi_hdls = ipa3_sys_update_gsi_hdls; + api_ctrl->ipa_connect_wdi_pipe = ipa3_connect_wdi_pipe; + api_ctrl->ipa_disconnect_wdi_pipe = ipa3_disconnect_wdi_pipe; + api_ctrl->ipa_enable_wdi_pipe = ipa3_enable_wdi_pipe; + api_ctrl->ipa_disable_wdi_pipe = ipa3_disable_wdi_pipe; + api_ctrl->ipa_resume_wdi_pipe = ipa3_resume_wdi_pipe; + api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe; + api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats; + api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes; + api_ctrl->ipa_broadcast_wdi_quota_reach_ind = + ipa3_broadcast_wdi_quota_reach_ind; + api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa; + api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB; + api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB; + api_ctrl->teth_bridge_init = ipa3_teth_bridge_init; + api_ctrl->teth_bridge_disconnect = ipa3_teth_bridge_disconnect; + api_ctrl->teth_bridge_connect = ipa3_teth_bridge_connect; + api_ctrl->ipa_set_client = ipa3_set_client; + api_ctrl->ipa_get_client = ipa3_get_client; + api_ctrl->ipa_get_client_uplink = ipa3_get_client_uplink; + api_ctrl->ipa_dma_init = ipa3_dma_init; + api_ctrl->ipa_dma_enable = ipa3_dma_enable; + api_ctrl->ipa_dma_disable = ipa3_dma_disable; + api_ctrl->ipa_dma_sync_memcpy = ipa3_dma_sync_memcpy; + api_ctrl->ipa_dma_async_memcpy = ipa3_dma_async_memcpy; + api_ctrl->ipa_dma_uc_memcpy = ipa3_dma_uc_memcpy; + api_ctrl->ipa_dma_destroy = ipa3_dma_destroy; + api_ctrl->ipa_mhi_init_engine = ipa3_mhi_init_engine; + api_ctrl->ipa_connect_mhi_pipe = ipa3_connect_mhi_pipe; + api_ctrl->ipa_disconnect_mhi_pipe = ipa3_disconnect_mhi_pipe; + api_ctrl->ipa_mhi_stop_gsi_channel = ipa3_mhi_stop_gsi_channel; + api_ctrl->ipa_uc_mhi_reset_channel = ipa3_uc_mhi_reset_channel; + api_ctrl->ipa_qmi_enable_force_clear_datapath_send = + ipa3_qmi_enable_force_clear_datapath_send; + api_ctrl->ipa_qmi_disable_force_clear_datapath_send = + ipa3_qmi_disable_force_clear_datapath_send; + api_ctrl->ipa_mhi_reset_channel_internal = + ipa3_mhi_reset_channel_internal; + api_ctrl->ipa_mhi_start_channel_internal = + ipa3_mhi_start_channel_internal; + api_ctrl->ipa_mhi_query_ch_info = ipa3_mhi_query_ch_info; + api_ctrl->ipa_mhi_resume_channels_internal = + ipa3_mhi_resume_channels_internal; + api_ctrl->ipa_has_open_aggr_frame = ipa3_has_open_aggr_frame; + api_ctrl->ipa_mhi_destroy_channel = ipa3_mhi_destroy_channel; + api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info = + ipa3_uc_mhi_send_dl_ul_sync_info; + api_ctrl->ipa_uc_mhi_init = ipa3_uc_mhi_init; + api_ctrl->ipa_uc_mhi_suspend_channel = ipa3_uc_mhi_suspend_channel; + api_ctrl->ipa_uc_mhi_stop_event_update_channel = + ipa3_uc_mhi_stop_event_update_channel; + api_ctrl->ipa_uc_mhi_cleanup = ipa3_uc_mhi_cleanup; + api_ctrl->ipa_uc_state_check = ipa3_uc_state_check; + api_ctrl->ipa_write_qmap_id = ipa3_write_qmap_id; + api_ctrl->ipa_add_interrupt_handler = ipa3_add_interrupt_handler; + api_ctrl->ipa_remove_interrupt_handler = ipa3_remove_interrupt_handler; + api_ctrl->ipa_restore_suspend_handler = ipa3_restore_suspend_handler; + api_ctrl->ipa_bam_reg_dump = NULL; + api_ctrl->ipa_get_ep_mapping = ipa3_get_ep_mapping; + api_ctrl->ipa_is_ready = ipa3_is_ready; + api_ctrl->ipa_proxy_clk_vote = ipa3_proxy_clk_vote; + api_ctrl->ipa_proxy_clk_unvote = ipa3_proxy_clk_unvote; + api_ctrl->ipa_is_client_handle_valid = ipa3_is_client_handle_valid; + api_ctrl->ipa_get_client_mapping = ipa3_get_client_mapping; + api_ctrl->ipa_get_rm_resource_from_ep = ipa3_get_rm_resource_from_ep; + api_ctrl->ipa_get_modem_cfg_emb_pipe_flt = + ipa3_get_modem_cfg_emb_pipe_flt; + api_ctrl->ipa_get_transport_type = ipa3_get_transport_type; + api_ctrl->ipa_ap_suspend = ipa3_ap_suspend; + api_ctrl->ipa_ap_resume = ipa3_ap_resume; + api_ctrl->ipa_get_smmu_domain = ipa3_get_smmu_domain; + api_ctrl->ipa_disable_apps_wan_cons_deaggr = + ipa3_disable_apps_wan_cons_deaggr; + api_ctrl->ipa_get_dma_dev = ipa3_get_dma_dev; + api_ctrl->ipa_release_wdi_mapping = ipa3_release_wdi_mapping; + api_ctrl->ipa_create_wdi_mapping = ipa3_create_wdi_mapping; + api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info; + api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel; + api_ctrl->ipa_start_gsi_channel = ipa3_start_gsi_channel; + api_ctrl->ipa_register_ipa_ready_cb = ipa3_register_ipa_ready_cb; + api_ctrl->ipa_inc_client_enable_clks = ipa3_inc_client_enable_clks; + api_ctrl->ipa_dec_client_disable_clks = ipa3_dec_client_disable_clks; + api_ctrl->ipa_inc_client_enable_clks_no_block = + ipa3_inc_client_enable_clks_no_block; + api_ctrl->ipa_suspend_resource_no_block = + ipa3_suspend_resource_no_block; + api_ctrl->ipa_resume_resource = ipa3_resume_resource; + api_ctrl->ipa_suspend_resource_sync = ipa3_suspend_resource_sync; + api_ctrl->ipa_set_required_perf_profile = + ipa3_set_required_perf_profile; + api_ctrl->ipa_get_ipc_logbuf = ipa3_get_ipc_logbuf; + api_ctrl->ipa_get_ipc_logbuf_low = ipa3_get_ipc_logbuf_low; + api_ctrl->ipa_rx_poll = ipa3_rx_poll; + api_ctrl->ipa_recycle_wan_skb = ipa3_recycle_wan_skb; + api_ctrl->ipa_setup_uc_ntn_pipes = ipa3_setup_uc_ntn_pipes; + api_ctrl->ipa_tear_down_uc_offload_pipes = + ipa3_tear_down_uc_offload_pipes; + api_ctrl->ipa_get_pdev = ipa3_get_pdev; + api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa3_ntn_uc_reg_rdyCB; + api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa3_ntn_uc_dereg_rdyCB; + + return 0; +} + +/** + * ipa_is_modem_pipe()- Checks if pipe is owned by the modem + * + * @pipe_idx: pipe number + * Return value: true if owned by modem, false otherwize + */ +bool ipa_is_modem_pipe(int pipe_idx) +{ + int client_idx; + + if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) { + IPAERR("Bad pipe index!\n"); + return false; + } + + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + if (!IPA_CLIENT_IS_Q6_CONS(client_idx) && + !IPA_CLIENT_IS_Q6_PROD(client_idx)) + continue; + if (ipa3_get_ep_mapping(client_idx) == pipe_idx) + return true; + } + + return false; +} + +static void ipa3_write_rsrc_grp_type_reg(int group_index, + enum ipa_rsrc_grp_type_src n, bool src, + struct ipahal_reg_rsrc_grp_cfg *val) +{ + u8 hw_type_idx; + + hw_type_idx = ipa3_get_hw_type_index(); + + switch (hw_type_idx) { + case IPA_3_0: + if (src) { + switch (group_index) { + case IPA_v3_0_GROUP_UL: + case IPA_v3_0_GROUP_DL: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + case IPA_v3_0_GROUP_DIAG: + case IPA_v3_0_GROUP_DMA: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + case IPA_v3_0_GROUP_Q6ZIP: + case IPA_v3_0_GROUP_UC_RX_Q: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid source resource group,index #%d\n", + group_index); + break; + } + } else { + switch (group_index) { + case IPA_v3_0_GROUP_UL: + case IPA_v3_0_GROUP_DL: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + case IPA_v3_0_GROUP_DIAG: + case IPA_v3_0_GROUP_DMA: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + case IPA_v3_0_GROUP_Q6ZIP_GENERAL: + case IPA_v3_0_GROUP_Q6ZIP_ENGINE: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_45_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid destination resource group,index #%d\n", + group_index); + break; + } + } + break; + case IPA_3_5: + case IPA_3_5_MHI: + case IPA_3_5_1: + if (src) { + switch (group_index) { + case IPA_v3_5_GROUP_LWA_DL: + case IPA_v3_5_GROUP_UL_DL: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + case IPA_v3_5_MHI_GROUP_DMA: + case IPA_v3_5_GROUP_UC_RX_Q: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid source resource group,index #%d\n", + group_index); + break; + } + } else { + switch (group_index) { + case IPA_v3_5_GROUP_LWA_DL: + case IPA_v3_5_GROUP_UL_DL: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + case IPA_v3_5_MHI_GROUP_DMA: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid destination resource group,index #%d\n", + group_index); + break; + } + } + break; + case IPA_4_0: + case IPA_4_0_MHI: + if (src) { + switch (group_index) { + case IPA_v4_0_GROUP_LWA_DL: + case IPA_v4_0_GROUP_UL_DL: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + case IPA_v4_0_MHI_GROUP_DMA: + case IPA_v4_0_GROUP_UC_RX_Q: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid source resource group,index #%d\n", + group_index); + break; + } + } else { + switch (group_index) { + case IPA_v4_0_GROUP_LWA_DL: + case IPA_v4_0_GROUP_UL_DL: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + case IPA_v4_0_MHI_GROUP_DMA: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid destination resource group,index #%d\n", + group_index); + break; + } + } + break; + default: + IPAERR("invalid hw type\n"); + WARN_ON(1); + return; + } +} + +static void ipa3_configure_rx_hps_clients(int depth, bool min) +{ + int i; + struct ipahal_reg_rx_hps_clients val; + u8 hw_type_idx; + + hw_type_idx = ipa3_get_hw_type_index(); + + /* + * depth 0 contains 4 first clients out of 6 + * depth 1 contains 2 last clients out of 6 + */ + for (i = 0 ; i < (depth ? 2 : 4) ; i++) { + if (min) + val.client_minmax[i] = + ipa3_rsrc_rx_grp_config + [hw_type_idx] + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] + [!depth ? i : 4 + i].min; + else + val.client_minmax[i] = + ipa3_rsrc_rx_grp_config + [hw_type_idx] + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] + [!depth ? i : 4 + i].max; + } + if (depth) { + ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_1 : + IPA_RX_HPS_CLIENTS_MAX_DEPTH_1, + &val); + } else { + ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_0 : + IPA_RX_HPS_CLIENTS_MAX_DEPTH_0, + &val); + } +} + +static void ipa3_configure_rx_hps_weight(void) +{ + struct ipahal_reg_rx_hps_weights val; + u8 hw_type_idx; + + hw_type_idx = ipa3_get_hw_type_index(); + + val.hps_queue_weight_0 = + ipa3_rsrc_rx_grp_hps_weight_config + [hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] + [0]; + val.hps_queue_weight_1 = + ipa3_rsrc_rx_grp_hps_weight_config + [hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] + [1]; + val.hps_queue_weight_2 = + ipa3_rsrc_rx_grp_hps_weight_config + [hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] + [2]; + val.hps_queue_weight_3 = + ipa3_rsrc_rx_grp_hps_weight_config + [hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] + [3]; + + ipahal_write_reg_fields(IPA_HPS_FTCH_ARB_QUEUE_WEIGHT, &val); +} + +void ipa3_set_resorce_groups_min_max_limits(void) +{ + int i; + int j; + int src_rsrc_type_max; + int dst_rsrc_type_max; + int src_grp_idx_max; + int dst_grp_idx_max; + struct ipahal_reg_rsrc_grp_cfg val; + u8 hw_type_idx; + + IPADBG("ENTER\n"); + IPADBG("Assign source rsrc groups min-max limits\n"); + + hw_type_idx = ipa3_get_hw_type_index(); + switch (hw_type_idx) { + case IPA_3_0: + src_rsrc_type_max = IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX; + dst_rsrc_type_max = IPA_v3_0_RSRC_GRP_TYPE_DST_MAX; + src_grp_idx_max = IPA_v3_0_GROUP_MAX; + dst_grp_idx_max = IPA_v3_0_GROUP_MAX; + break; + case IPA_3_5: + case IPA_3_5_MHI: + case IPA_3_5_1: + src_rsrc_type_max = IPA_v3_5_RSRC_GRP_TYPE_SRC_MAX; + dst_rsrc_type_max = IPA_v3_5_RSRC_GRP_TYPE_DST_MAX; + src_grp_idx_max = IPA_v3_5_SRC_GROUP_MAX; + dst_grp_idx_max = IPA_v3_5_DST_GROUP_MAX; + break; + case IPA_4_0: + case IPA_4_0_MHI: + src_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX; + dst_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_DST_MAX; + src_grp_idx_max = IPA_v4_0_SRC_GROUP_MAX; + dst_grp_idx_max = IPA_v4_0_DST_GROUP_MAX; + break; + default: + IPAERR("invalid hw type index\n"); + WARN_ON(1); + return; + } + + for (i = 0; i < src_rsrc_type_max; i++) { + for (j = 0; j < src_grp_idx_max; j = j + 2) { + val.x_min = + ipa3_rsrc_src_grp_config[hw_type_idx][i][j].min; + val.x_max = + ipa3_rsrc_src_grp_config[hw_type_idx][i][j].max; + val.y_min = + ipa3_rsrc_src_grp_config[hw_type_idx][i][j + 1].min; + val.y_max = + ipa3_rsrc_src_grp_config[hw_type_idx][i][j + 1].max; + ipa3_write_rsrc_grp_type_reg(j, i, true, &val); + } + } + + IPADBG("Assign destination rsrc groups min-max limits\n"); + + for (i = 0; i < dst_rsrc_type_max; i++) { + for (j = 0; j < dst_grp_idx_max; j = j + 2) { + val.x_min = + ipa3_rsrc_dst_grp_config[hw_type_idx][i][j].min; + val.x_max = + ipa3_rsrc_dst_grp_config[hw_type_idx][i][j].max; + val.y_min = + ipa3_rsrc_dst_grp_config[hw_type_idx][i][j + 1].min; + val.y_max = + ipa3_rsrc_dst_grp_config[hw_type_idx][i][j + 1].max; + ipa3_write_rsrc_grp_type_reg(j, i, false, &val); + } + } + + /* move resource group configuration from HLOS to TZ */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) { + IPAERR("skip configuring ipa_rx_hps_clients from HLOS\n"); + return; + } + + IPADBG("Assign RX_HPS CMDQ rsrc groups min-max limits\n"); + + ipa3_configure_rx_hps_clients(0, true); + ipa3_configure_rx_hps_clients(0, false); + + /* only hw_type v3_0\3_1 have 6 RX_HPS_CMDQ and needs depth 1*/ + if (ipa3_ctx->ipa_hw_type <= IPA_HW_v3_1) { + ipa3_configure_rx_hps_clients(1, true); + ipa3_configure_rx_hps_clients(1, false); + } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) + ipa3_configure_rx_hps_weight(); + + IPADBG("EXIT\n"); +} + +static void ipa3_gsi_poll_after_suspend(struct ipa3_ep_context *ep) +{ + bool empty; + + IPADBG("switch ch %ld to poll\n", ep->gsi_chan_hdl); + gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL); + gsi_is_channel_empty(ep->gsi_chan_hdl, &empty); + if (!empty) { + IPADBG("ch %ld not empty\n", ep->gsi_chan_hdl); + /* queue a work to start polling if don't have one */ + atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1); + if (!atomic_read(&ep->sys->curr_polling_state)) { + ipa3_inc_acquire_wakelock(); + atomic_set(&ep->sys->curr_polling_state, 1); + queue_work(ep->sys->wq, &ep->sys->work); + } + } +} + +void ipa3_suspend_apps_pipes(bool suspend) +{ + struct ipa_ep_cfg_ctrl cfg; + int ipa_ep_idx; + struct ipa3_ep_context *ep; + int res; + + memset(&cfg, 0, sizeof(cfg)); + cfg.ipa_ep_suspend = suspend; + + ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + if (ipa_ep_idx < 0) { + IPAERR("IPA client mapping failed\n"); + ipa_assert(); + return; + } + ep = &ipa3_ctx->ep[ipa_ep_idx]; + if (ep->valid) { + IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend", + ipa_ep_idx); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + if (suspend) { + res = ipa3_stop_gsi_channel(ipa_ep_idx); + if (res) { + IPAERR("failed to stop LAN channel\n"); + ipa_assert(); + } + } else { + res = gsi_start_channel(ep->gsi_chan_hdl); + if (res) { + IPAERR("failed to start LAN channel\n"); + ipa_assert(); + } + } + } else { + ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg); + } + if (suspend) + ipa3_gsi_poll_after_suspend(ep); + else if (!atomic_read(&ep->sys->curr_polling_state)) + gsi_config_channel_mode(ep->gsi_chan_hdl, + GSI_CHAN_MODE_CALLBACK); + } + + ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + /* Considering the case for SSR. */ + if (ipa_ep_idx == -1) { + IPADBG("Invalid client.\n"); + return; + } + ep = &ipa3_ctx->ep[ipa_ep_idx]; + if (ep->valid) { + IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend", + ipa_ep_idx); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + if (suspend) { + res = ipa3_stop_gsi_channel(ipa_ep_idx); + if (res) { + IPAERR("failed to stop WAN channel\n"); + ipa_assert(); + } + } else { + res = gsi_start_channel(ep->gsi_chan_hdl); + if (res) { + IPAERR("failed to start WAN channel\n"); + ipa_assert(); + } + } + } else { + ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg); + } + if (suspend) + ipa3_gsi_poll_after_suspend(ep); + else if (!atomic_read(&ep->sys->curr_polling_state)) + gsi_config_channel_mode(ep->gsi_chan_hdl, + GSI_CHAN_MODE_CALLBACK); + } +} + +int ipa3_allocate_dma_task_for_gsi(void) +{ + struct ipahal_imm_cmd_dma_task_32b_addr cmd = { 0 }; + + IPADBG("Allocate mem\n"); + ipa3_ctx->dma_task_info.mem.size = IPA_GSI_CHANNEL_STOP_PKT_SIZE; + ipa3_ctx->dma_task_info.mem.base = dma_alloc_coherent(ipa3_ctx->pdev, + ipa3_ctx->dma_task_info.mem.size, + &ipa3_ctx->dma_task_info.mem.phys_base, + GFP_KERNEL); + if (!ipa3_ctx->dma_task_info.mem.base) { + IPAERR("no mem\n"); + return -EFAULT; + } + + cmd.flsh = 1; + cmd.size1 = ipa3_ctx->dma_task_info.mem.size; + cmd.addr1 = ipa3_ctx->dma_task_info.mem.phys_base; + cmd.packet_size = ipa3_ctx->dma_task_info.mem.size; + ipa3_ctx->dma_task_info.cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_TASK_32B_ADDR, &cmd, false); + if (!ipa3_ctx->dma_task_info.cmd_pyld) { + IPAERR("failed to construct dma_task_32b_addr cmd\n"); + dma_free_coherent(ipa3_ctx->pdev, + ipa3_ctx->dma_task_info.mem.size, + ipa3_ctx->dma_task_info.mem.base, + ipa3_ctx->dma_task_info.mem.phys_base); + memset(&ipa3_ctx->dma_task_info, 0, + sizeof(ipa3_ctx->dma_task_info)); + return -EFAULT; + } + + return 0; +} + +void ipa3_free_dma_task_for_gsi(void) +{ + dma_free_coherent(ipa3_ctx->pdev, + ipa3_ctx->dma_task_info.mem.size, + ipa3_ctx->dma_task_info.mem.base, + ipa3_ctx->dma_task_info.mem.phys_base); + ipahal_destroy_imm_cmd(ipa3_ctx->dma_task_info.cmd_pyld); + memset(&ipa3_ctx->dma_task_info, 0, sizeof(ipa3_ctx->dma_task_info)); +} + +/** + * ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel + * + * Send a DMA_TASK of 1B to IPA to unblock GSI channel in STOP_IN_PROG. + * Return value: 0 on success, negative otherwise + */ +int ipa3_inject_dma_task_for_gsi(void) +{ + struct ipa3_desc desc = {0}; + + desc.opcode = ipa3_ctx->dma_task_info.cmd_pyld->opcode; + desc.pyld = ipa3_ctx->dma_task_info.cmd_pyld->data; + desc.len = ipa3_ctx->dma_task_info.cmd_pyld->len; + desc.type = IPA_IMM_CMD_DESC; + + IPADBG("sending 1B packet to IPA\n"); + if (ipa3_send_cmd_timeout(1, &desc, + IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC)) { + IPAERR("ipa3_send_cmd failed\n"); + return -EFAULT; + } + + return 0; +} + +/** + * ipa3_stop_gsi_channel()- Stops a GSI channel in IPA + * @chan_hdl: GSI channel handle + * + * This function implements the sequence to stop a GSI channel + * in IPA. This function returns when the channel is is STOP state. + * + * Return value: 0 on success, negative otherwise + */ +int ipa3_stop_gsi_channel(u32 clnt_hdl) +{ + struct ipa_mem_buffer mem; + int res = 0; + int i; + struct ipa3_ep_context *ep; + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + memset(&mem, 0, sizeof(mem)); + + if (IPA_CLIENT_IS_PROD(ep->client)) { + IPADBG("Calling gsi_stop_channel ch:%lu\n", + ep->gsi_chan_hdl); + res = gsi_stop_channel(ep->gsi_chan_hdl); + IPADBG("gsi_stop_channel ch: %lu returned %d\n", + ep->gsi_chan_hdl, res); + goto end_sequence; + } + + for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) { + IPADBG("Calling gsi_stop_channel ch:%lu\n", + ep->gsi_chan_hdl); + res = gsi_stop_channel(ep->gsi_chan_hdl); + IPADBG("gsi_stop_channel ch: %lu returned %d\n", + ep->gsi_chan_hdl, res); + if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT) + goto end_sequence; + + IPADBG("Inject a DMA_TASK with 1B packet to IPA\n"); + /* Send a 1B packet DMA_TASK to IPA and try again */ + res = ipa3_inject_dma_task_for_gsi(); + if (res) { + IPAERR("Failed to inject DMA TASk for GSI\n"); + goto end_sequence; + } + + /* sleep for short period to flush IPA */ + usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC, + IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC); + } + + IPAERR("Failed to stop GSI channel with retries\n"); + res = -EFAULT; +end_sequence: + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + + return res; +} + +static int ipa3_load_single_fw(const struct firmware *firmware, + const struct elf32_phdr *phdr) +{ + uint32_t *fw_mem_base; + int index; + const uint32_t *elf_data_ptr; + + if (phdr->p_offset > firmware->size) { + IPAERR("Invalid ELF: offset=%u is beyond elf_size=%zu\n", + phdr->p_offset, firmware->size); + return -EINVAL; + } + if ((firmware->size - phdr->p_offset) < phdr->p_filesz) { + IPAERR("Invalid ELF: offset=%u filesz=%u elf_size=%zu\n", + phdr->p_offset, phdr->p_filesz, firmware->size); + return -EINVAL; + } + + if (phdr->p_memsz % sizeof(uint32_t)) { + IPAERR("FW mem size %u doesn't align to 32bit\n", + phdr->p_memsz); + return -EFAULT; + } + + if (phdr->p_filesz > phdr->p_memsz) { + IPAERR("FW image too big src_size=%u dst_size=%u\n", + phdr->p_filesz, phdr->p_memsz); + return -EFAULT; + } + + fw_mem_base = ioremap(phdr->p_vaddr, phdr->p_memsz); + if (!fw_mem_base) { + IPAERR("Failed to map 0x%x for the size of %u\n", + phdr->p_vaddr, phdr->p_memsz); + return -ENOMEM; + } + + /* Set the entire region to 0s */ + memset(fw_mem_base, 0, phdr->p_memsz); + + elf_data_ptr = (uint32_t *)(firmware->data + phdr->p_offset); + + /* Write the FW */ + for (index = 0; index < phdr->p_filesz/sizeof(uint32_t); index++) { + writel_relaxed(*elf_data_ptr, &fw_mem_base[index]); + elf_data_ptr++; + } + + iounmap(fw_mem_base); + + return 0; +} + +/** + * ipa3_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM. + * + * @firmware: Structure which contains the FW data from the user space. + * @gsi_mem_base: GSI base address + * + * Return value: 0 on success, negative otherwise + * + */ +int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base) +{ + const struct elf32_hdr *ehdr; + const struct elf32_phdr *phdr; + unsigned long gsi_iram_ofst; + unsigned long gsi_iram_size; + phys_addr_t ipa_reg_mem_base; + u32 ipa_reg_ofst; + int rc; + + if (!gsi_mem_base) { + IPAERR("Invalid GSI base address\n"); + return -EINVAL; + } + + ipa_assert_on(!firmware); + /* One program header per FW image: GSI, DPS and HPS */ + if (firmware->size < (sizeof(*ehdr) + 3 * sizeof(*phdr))) { + IPAERR("Missing ELF and Program headers firmware size=%zu\n", + firmware->size); + return -EINVAL; + } + + ehdr = (struct elf32_hdr *) firmware->data; + ipa_assert_on(!ehdr); + if (ehdr->e_phnum != 3) { + IPAERR("Unexpected number of ELF program headers\n"); + return -EINVAL; + } + phdr = (struct elf32_phdr *)(firmware->data + sizeof(*ehdr)); + + /* + * Each ELF program header represents a FW image and contains: + * p_vaddr : The starting address to which the FW needs to loaded. + * p_memsz : The size of the IRAM (where the image loaded) + * p_filesz: The size of the FW image embedded inside the ELF + * p_offset: Absolute offset to the image from the head of the ELF + */ + + /* Load GSI FW image */ + gsi_get_inst_ram_offset_and_size(&gsi_iram_ofst, &gsi_iram_size); + if (phdr->p_vaddr != (gsi_mem_base + gsi_iram_ofst)) { + IPAERR( + "Invalid GSI FW img load addr vaddr=0x%x gsi_mem_base=%pa gsi_iram_ofst=0x%lx\n" + , phdr->p_vaddr, &gsi_mem_base, gsi_iram_ofst); + return -EINVAL; + } + if (phdr->p_memsz > gsi_iram_size) { + IPAERR("Invalid GSI FW img size memsz=%d gsi_iram_size=%lu\n", + phdr->p_memsz, gsi_iram_size); + return -EINVAL; + } + rc = ipa3_load_single_fw(firmware, phdr); + if (rc) + return rc; + + phdr++; + ipa_reg_mem_base = ipa3_ctx->ipa_wrapper_base + ipahal_get_reg_base(); + + /* Load IPA DPS FW image */ + ipa_reg_ofst = ipahal_get_reg_ofst(IPA_DPS_SEQUENCER_FIRST); + if (phdr->p_vaddr != (ipa_reg_mem_base + ipa_reg_ofst)) { + IPAERR( + "Invalid IPA DPS img load addr vaddr=0x%x ipa_reg_mem_base=%pa ipa_reg_ofst=%u\n" + , phdr->p_vaddr, &ipa_reg_mem_base, ipa_reg_ofst); + return -EINVAL; + } + if (phdr->p_memsz > ipahal_get_dps_img_mem_size()) { + IPAERR("Invalid IPA DPS img size memsz=%d dps_mem_size=%u\n", + phdr->p_memsz, ipahal_get_dps_img_mem_size()); + return -EINVAL; + } + rc = ipa3_load_single_fw(firmware, phdr); + if (rc) + return rc; + + phdr++; + + /* Load IPA HPS FW image */ + ipa_reg_ofst = ipahal_get_reg_ofst(IPA_HPS_SEQUENCER_FIRST); + if (phdr->p_vaddr != (ipa_reg_mem_base + ipa_reg_ofst)) { + IPAERR( + "Invalid IPA HPS img load addr vaddr=0x%x ipa_reg_mem_base=%pa ipa_reg_ofst=%u\n" + , phdr->p_vaddr, &ipa_reg_mem_base, ipa_reg_ofst); + return -EINVAL; + } + if (phdr->p_memsz > ipahal_get_hps_img_mem_size()) { + IPAERR("Invalid IPA HPS img size memsz=%d dps_mem_size=%u\n", + phdr->p_memsz, ipahal_get_hps_img_mem_size()); + return -EINVAL; + } + rc = ipa3_load_single_fw(firmware, phdr); + if (rc) + return rc; + + IPADBG("IPA FWs (GSI FW, DPS and HPS) loaded successfully\n"); + return 0; +} + +/** + * ipa3_is_msm_device() - Is the running device a MSM or MDM? + * Determine according to IPA version + * + * Return value: true if MSM, false if MDM + * + */ +bool ipa3_is_msm_device(void) +{ + switch (ipa3_ctx->ipa_hw_type) { + case IPA_HW_v3_0: + case IPA_HW_v3_5: + case IPA_HW_v4_0: + return false; + case IPA_HW_v3_1: + case IPA_HW_v3_5_1: + return true; + default: + IPAERR("unknown HW type %d\n", ipa3_ctx->ipa_hw_type); + ipa_assert(); + } + + return false; +} + +/** + * ipa3_disable_prefetch() - disable\enable tx prefetch + * + * @client: the client which is related to the TX where prefetch will be + * disabled + * + * Return value: Non applicable + * + */ +void ipa3_disable_prefetch(enum ipa_client_type client) +{ + struct ipahal_reg_tx_cfg cfg; + u8 qmb; + + qmb = ipa3_get_qmb_master_sel(client); + + IPADBG("disabling prefetch for qmb %d\n", (int)qmb); + + ipahal_read_reg_fields(IPA_TX_CFG, &cfg); + /* QMB0 (DDR) correlates with TX0, QMB1(PCIE) correlates with TX1 */ + if (qmb == QMB_MASTER_SELECT_DDR) + cfg.tx0_prefetch_disable = true; + else + cfg.tx1_prefetch_disable = true; + ipahal_write_reg_fields(IPA_TX_CFG, &cfg); +} + +/** + * ipa3_get_pdev() - return a pointer to IPA dev struct + * + * Return value: a pointer to IPA dev struct + * + */ +struct device *ipa3_get_pdev(void) +{ + if (!ipa3_ctx) + return NULL; + + return ipa3_ctx->pdev; +} + +/** + * ipa3_enable_dcd() - enable dynamic clock division on IPA + * + * Return value: Non applicable + * + */ +void ipa3_enable_dcd(void) +{ + struct ipahal_reg_idle_indication_cfg idle_indication_cfg; + + /* recommended values for IPA 3.5 according to IPA HPG */ + idle_indication_cfg.const_non_idle_enable = 0; + idle_indication_cfg.enter_idle_debounce_thresh = 256; + + ipahal_write_reg_fields(IPA_IDLE_INDICATION_CFG, + &idle_indication_cfg); +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile new file mode 100644 index 000000000000..67e491b74abe --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_IPA3) += ipa_hal.o + +ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o ipahal_hw_stats.o diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c new file mode 100644 index 000000000000..675e57645488 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c @@ -0,0 +1,1535 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "ipahal.h" +#include "ipahal_i.h" +#include "ipahal_reg_i.h" +#include "ipahal_fltrt_i.h" +#include "ipahal_hw_stats_i.h" + + +struct ipahal_context *ipahal_ctx; + +static const char *ipahal_imm_cmd_name_to_str[IPA_IMM_CMD_MAX] = { + __stringify(IPA_IMM_CMD_IP_V4_FILTER_INIT), + __stringify(IPA_IMM_CMD_IP_V6_FILTER_INIT), + __stringify(IPA_IMM_CMD_IP_V4_NAT_INIT), + __stringify(IPA_IMM_CMD_IP_V4_ROUTING_INIT), + __stringify(IPA_IMM_CMD_IP_V6_ROUTING_INIT), + __stringify(IPA_IMM_CMD_HDR_INIT_LOCAL), + __stringify(IPA_IMM_CMD_HDR_INIT_SYSTEM), + __stringify(IPA_IMM_CMD_REGISTER_WRITE), + __stringify(IPA_IMM_CMD_NAT_DMA), + __stringify(IPA_IMM_CMD_IP_PACKET_INIT), + __stringify(IPA_IMM_CMD_DMA_SHARED_MEM), + __stringify(IPA_IMM_CMD_IP_PACKET_TAG_STATUS), + __stringify(IPA_IMM_CMD_DMA_TASK_32B_ADDR), + __stringify(IPA_IMM_CMD_TABLE_DMA), +}; + +static const char *ipahal_pkt_status_exception_to_str + [IPAHAL_PKT_STATUS_EXCEPTION_MAX] = { + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_NONE), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_NAT), + __stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT), +}; + +static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd); + + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_dma_task_32b_addr *data; + struct ipahal_imm_cmd_dma_task_32b_addr *dma_params = + (struct ipahal_imm_cmd_dma_task_32b_addr *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) + return pyld; + + /* Currently supports only one packet */ + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd) + (1 << 8); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_dma_task_32b_addr *)pyld->data; + + if (unlikely(dma_params->size1 & ~0xFFFF)) { + WARN(1, "Size1 is bigger than 16bit width 0x%x\n", + dma_params->size1); + } + if (unlikely(dma_params->packet_size & ~0xFFFF)) { + WARN(1, "Pkt size is bigger than 16bit width 0x%x\n", + dma_params->packet_size); + } + data->cmplt = dma_params->cmplt ? 1 : 0; + data->eof = dma_params->eof ? 1 : 0; + data->flsh = dma_params->flsh ? 1 : 0; + data->lock = dma_params->lock ? 1 : 0; + data->unlock = dma_params->unlock ? 1 : 0; + data->size1 = dma_params->size1; + data->addr1 = dma_params->addr1; + data->packet_size = dma_params->packet_size; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_tag_status( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_packet_tag_status *data; + struct ipahal_imm_cmd_ip_packet_tag_status *tag_params = + (struct ipahal_imm_cmd_ip_packet_tag_status *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_packet_tag_status *)pyld->data; + + if (unlikely(tag_params->tag & ~0xFFFFFFFFFFFF)) { + IPAHAL_ERR("tag is bigger than 48bit width 0x%llx\n", + tag_params->tag); + WARN_ON(1); + } + data->tag = tag_params->tag; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_dma_shared_mem *data; + struct ipahal_imm_cmd_dma_shared_mem *mem_params = + (struct ipahal_imm_cmd_dma_shared_mem *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) + return pyld; + + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_dma_shared_mem *)pyld->data; + + if (unlikely(mem_params->size & ~0xFFFF)) { + WARN(1, "Size is bigger than 16bit width 0x%x\n", + mem_params->size); + } + if (unlikely(mem_params->local_addr & ~0xFFFF)) { + WARN(1, "Local addr is bigger than 16bit width 0x%x\n", + mem_params->local_addr); + } + data->direction = mem_params->is_read ? 1 : 0; + data->size = mem_params->size; + data->local_addr = mem_params->local_addr; + data->system_addr = mem_params->system_addr; + data->skip_pipeline_clear = mem_params->skip_pipeline_clear ? 1 : 0; + switch (mem_params->pipeline_clear_options) { + case IPAHAL_HPS_CLEAR: + data->pipeline_clear_options = 0; + break; + case IPAHAL_SRC_GRP_CLEAR: + data->pipeline_clear_options = 1; + break; + case IPAHAL_FULL_PIPELINE_CLEAR: + data->pipeline_clear_options = 2; + break; + default: + IPAHAL_ERR("unsupported pipline clear option %d\n", + mem_params->pipeline_clear_options); + WARN_ON(1); + }; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem_v_4_0( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 *data; + struct ipahal_imm_cmd_dma_shared_mem *mem_params = + (struct ipahal_imm_cmd_dma_shared_mem *)params; + + if (unlikely(mem_params->size & ~0xFFFF)) { + IPAHAL_ERR("Size is bigger than 16bit width 0x%x\n", + mem_params->size); + WARN_ON(1); + return NULL; + } + if (unlikely(mem_params->local_addr & ~0xFFFF)) { + IPAHAL_ERR("Local addr is bigger than 16bit width 0x%x\n", + mem_params->local_addr); + WARN_ON(1); + return NULL; + } + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + WARN_ON(1); + return pyld; + } + + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 *)pyld->data; + + data->direction = mem_params->is_read ? 1 : 0; + data->clear_after_read = mem_params->clear_after_read; + data->size = mem_params->size; + data->local_addr = mem_params->local_addr; + data->system_addr = mem_params->system_addr; + pyld->opcode |= (mem_params->skip_pipeline_clear ? 1 : 0) << 8; + switch (mem_params->pipeline_clear_options) { + case IPAHAL_HPS_CLEAR: + break; + case IPAHAL_SRC_GRP_CLEAR: + pyld->opcode |= (1 << 9); + break; + case IPAHAL_FULL_PIPELINE_CLEAR: + pyld->opcode |= (2 << 9); + break; + default: + IPAHAL_ERR("unsupported pipline clear option %d\n", + mem_params->pipeline_clear_options); + WARN_ON(1); + }; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_register_write *data; + struct ipahal_imm_cmd_register_write *regwrt_params = + (struct ipahal_imm_cmd_register_write *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_register_write *)pyld->data; + + if (unlikely(regwrt_params->offset & ~0xFFFF)) { + IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n", + regwrt_params->offset); + WARN_ON(1); + } + data->offset = regwrt_params->offset; + data->value = regwrt_params->value; + data->value_mask = regwrt_params->value_mask; + + data->skip_pipeline_clear = regwrt_params->skip_pipeline_clear ? 1 : 0; + switch (regwrt_params->pipeline_clear_options) { + case IPAHAL_HPS_CLEAR: + data->pipeline_clear_options = 0; + break; + case IPAHAL_SRC_GRP_CLEAR: + data->pipeline_clear_options = 1; + break; + case IPAHAL_FULL_PIPELINE_CLEAR: + data->pipeline_clear_options = 2; + break; + default: + IPAHAL_ERR("unsupported pipline clear option %d\n", + regwrt_params->pipeline_clear_options); + WARN_ON(1); + }; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write_v_4_0( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_register_write_v_4_0 *data; + struct ipahal_imm_cmd_register_write *regwrt_params = + (struct ipahal_imm_cmd_register_write *)params; + + if (unlikely(regwrt_params->offset & ~0xFFFF)) { + IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n", + regwrt_params->offset); + WARN_ON(1); + return NULL; + } + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + WARN_ON(1); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_register_write_v_4_0 *)pyld->data; + + data->offset = regwrt_params->offset; + data->offset_high = regwrt_params->offset >> 16; + data->value = regwrt_params->value; + data->value_mask = regwrt_params->value_mask; + + pyld->opcode |= (regwrt_params->skip_pipeline_clear ? 1 : 0) << 8; + switch (regwrt_params->pipeline_clear_options) { + case IPAHAL_HPS_CLEAR: + break; + case IPAHAL_SRC_GRP_CLEAR: + pyld->opcode |= (1 << 9); + break; + case IPAHAL_FULL_PIPELINE_CLEAR: + pyld->opcode |= (2 << 9); + break; + default: + IPAHAL_ERR("unsupported pipline clear option %d\n", + regwrt_params->pipeline_clear_options); + WARN_ON(1); + }; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_init( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_packet_init *data; + struct ipahal_imm_cmd_ip_packet_init *pktinit_params = + (struct ipahal_imm_cmd_ip_packet_init *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_packet_init *)pyld->data; + + if (unlikely(pktinit_params->destination_pipe_index & ~0x1F)) { + IPAHAL_ERR("Dst pipe idx is bigger than 5bit width 0x%x\n", + pktinit_params->destination_pipe_index); + WARN_ON(1); + } + data->destination_pipe_index = pktinit_params->destination_pipe_index; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_nat_dma( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_nat_dma *data; + struct ipahal_imm_cmd_nat_dma *nat_params = + (struct ipahal_imm_cmd_nat_dma *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_nat_dma *)pyld->data; + + data->table_index = nat_params->table_index; + data->base_addr = nat_params->base_addr; + data->offset = nat_params->offset; + data->data = nat_params->data; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_table_dma_ipav4( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_table_dma_ipav4 *data; + struct ipahal_imm_cmd_table_dma *nat_params = + (struct ipahal_imm_cmd_table_dma *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_table_dma_ipav4 *)pyld->data; + + data->table_index = nat_params->table_index; + data->base_addr = nat_params->base_addr; + data->offset = nat_params->offset; + data->data = nat_params->data; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_system( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_hdr_init_system *data; + struct ipahal_imm_cmd_hdr_init_system *syshdr_params = + (struct ipahal_imm_cmd_hdr_init_system *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_hdr_init_system *)pyld->data; + + data->hdr_table_addr = syshdr_params->hdr_table_addr; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_local( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_hdr_init_local *data; + struct ipahal_imm_cmd_hdr_init_local *lclhdr_params = + (struct ipahal_imm_cmd_hdr_init_local *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_hdr_init_local *)pyld->data; + + if (unlikely(lclhdr_params->size_hdr_table & ~0xFFF)) { + IPAHAL_ERR("Hdr tble size is bigger than 12bit width 0x%x\n", + lclhdr_params->size_hdr_table); + WARN_ON(1); + } + data->hdr_table_addr = lclhdr_params->hdr_table_addr; + data->size_hdr_table = lclhdr_params->size_hdr_table; + data->hdr_addr = lclhdr_params->hdr_addr; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_routing_init( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_v6_routing_init *data; + struct ipahal_imm_cmd_ip_v6_routing_init *rt6_params = + (struct ipahal_imm_cmd_ip_v6_routing_init *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_v6_routing_init *)pyld->data; + + data->hash_rules_addr = rt6_params->hash_rules_addr; + data->hash_rules_size = rt6_params->hash_rules_size; + data->hash_local_addr = rt6_params->hash_local_addr; + data->nhash_rules_addr = rt6_params->nhash_rules_addr; + data->nhash_rules_size = rt6_params->nhash_rules_size; + data->nhash_local_addr = rt6_params->nhash_local_addr; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_routing_init( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_v4_routing_init *data; + struct ipahal_imm_cmd_ip_v4_routing_init *rt4_params = + (struct ipahal_imm_cmd_ip_v4_routing_init *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_v4_routing_init *)pyld->data; + + data->hash_rules_addr = rt4_params->hash_rules_addr; + data->hash_rules_size = rt4_params->hash_rules_size; + data->hash_local_addr = rt4_params->hash_local_addr; + data->nhash_rules_addr = rt4_params->nhash_rules_addr; + data->nhash_rules_size = rt4_params->nhash_rules_size; + data->nhash_local_addr = rt4_params->nhash_local_addr; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_nat_init( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_v4_nat_init *data; + struct ipahal_imm_cmd_ip_v4_nat_init *nat4_params = + (struct ipahal_imm_cmd_ip_v4_nat_init *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_v4_nat_init *)pyld->data; + + data->ipv4_rules_addr = nat4_params->ipv4_rules_addr; + data->ipv4_expansion_rules_addr = + nat4_params->ipv4_expansion_rules_addr; + data->index_table_addr = nat4_params->index_table_addr; + data->index_table_expansion_addr = + nat4_params->index_table_expansion_addr; + data->table_index = nat4_params->table_index; + data->ipv4_rules_addr_type = + nat4_params->ipv4_rules_addr_shared ? 1 : 0; + data->ipv4_expansion_rules_addr_type = + nat4_params->ipv4_expansion_rules_addr_shared ? 1 : 0; + data->index_table_addr_type = + nat4_params->index_table_addr_shared ? 1 : 0; + data->index_table_expansion_addr_type = + nat4_params->index_table_expansion_addr_shared ? 1 : 0; + data->size_base_tables = nat4_params->size_base_tables; + data->size_expansion_tables = nat4_params->size_expansion_tables; + data->public_ip_addr = nat4_params->public_ip_addr; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_filter_init( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_v6_filter_init *data; + struct ipahal_imm_cmd_ip_v6_filter_init *flt6_params = + (struct ipahal_imm_cmd_ip_v6_filter_init *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_v6_filter_init *)pyld->data; + + data->hash_rules_addr = flt6_params->hash_rules_addr; + data->hash_rules_size = flt6_params->hash_rules_size; + data->hash_local_addr = flt6_params->hash_local_addr; + data->nhash_rules_addr = flt6_params->nhash_rules_addr; + data->nhash_rules_size = flt6_params->nhash_rules_size; + data->nhash_local_addr = flt6_params->nhash_local_addr; + + return pyld; +} + +static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_filter_init( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_pyld *pyld; + struct ipa_imm_cmd_hw_ip_v4_filter_init *data; + struct ipahal_imm_cmd_ip_v4_filter_init *flt4_params = + (struct ipahal_imm_cmd_ip_v4_filter_init *)params; + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx); + if (unlikely(!pyld)) { + IPAHAL_ERR("kzalloc err\n"); + return pyld; + } + pyld->opcode = ipahal_imm_cmd_get_opcode(cmd); + pyld->len = sizeof(*data); + data = (struct ipa_imm_cmd_hw_ip_v4_filter_init *)pyld->data; + + data->hash_rules_addr = flt4_params->hash_rules_addr; + data->hash_rules_size = flt4_params->hash_rules_size; + data->hash_local_addr = flt4_params->hash_local_addr; + data->nhash_rules_addr = flt4_params->nhash_rules_addr; + data->nhash_rules_size = flt4_params->nhash_rules_size; + data->nhash_local_addr = flt4_params->nhash_local_addr; + + return pyld; +} + +/* + * struct ipahal_imm_cmd_obj - immediate command H/W information for + * specific IPA version + * @construct - CB to construct imm command payload from abstracted structure + * @opcode - Immediate command OpCode + */ +struct ipahal_imm_cmd_obj { + struct ipahal_imm_cmd_pyld *(*construct)(enum ipahal_imm_cmd_name cmd, + const void *params, bool is_atomic_ctx); + u16 opcode; +}; + +/* + * This table contains the info regard each immediate command for IPAv3 + * and later. + * Information like: opcode and construct functions. + * All the information on the IMM on IPAv3 are statically defined below. + * If information is missing regard some IMM on some IPA version, + * the init function will fill it with the information from the previous + * IPA version. + * Information is considered missing if all of the fields are 0 + * If opcode is -1, this means that the IMM is removed on the + * specific version + */ +static struct ipahal_imm_cmd_obj + ipahal_imm_cmd_objs[IPA_HW_MAX][IPA_IMM_CMD_MAX] = { + /* IPAv3 */ + [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_FILTER_INIT] = { + ipa_imm_cmd_construct_ip_v4_filter_init, + 3}, + [IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_FILTER_INIT] = { + ipa_imm_cmd_construct_ip_v6_filter_init, + 4}, + [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_NAT_INIT] = { + ipa_imm_cmd_construct_ip_v4_nat_init, + 5}, + [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_ROUTING_INIT] = { + ipa_imm_cmd_construct_ip_v4_routing_init, + 7}, + [IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_ROUTING_INIT] = { + ipa_imm_cmd_construct_ip_v6_routing_init, + 8}, + [IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_LOCAL] = { + ipa_imm_cmd_construct_hdr_init_local, + 9}, + [IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_SYSTEM] = { + ipa_imm_cmd_construct_hdr_init_system, + 10}, + [IPA_HW_v3_0][IPA_IMM_CMD_REGISTER_WRITE] = { + ipa_imm_cmd_construct_register_write, + 12}, + [IPA_HW_v3_0][IPA_IMM_CMD_NAT_DMA] = { + ipa_imm_cmd_construct_nat_dma, + 14}, + [IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_INIT] = { + ipa_imm_cmd_construct_ip_packet_init, + 16}, + [IPA_HW_v3_0][IPA_IMM_CMD_DMA_TASK_32B_ADDR] = { + ipa_imm_cmd_construct_dma_task_32b_addr, + 17}, + [IPA_HW_v3_0][IPA_IMM_CMD_DMA_SHARED_MEM] = { + ipa_imm_cmd_construct_dma_shared_mem, + 19}, + [IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_TAG_STATUS] = { + ipa_imm_cmd_construct_ip_packet_tag_status, + 20}, + + /* IPAv4 */ + [IPA_HW_v4_0][IPA_IMM_CMD_REGISTER_WRITE] = { + ipa_imm_cmd_construct_register_write_v_4_0, + 12}, + /* NAT_DMA was renamed to TABLE_DMA for IPAv4 */ + [IPA_HW_v4_0][IPA_IMM_CMD_NAT_DMA] = { + NULL, + -1 }, + [IPA_HW_v4_0][IPA_IMM_CMD_TABLE_DMA] = { + ipa_imm_cmd_construct_table_dma_ipav4, + 14}, + [IPA_HW_v4_0][IPA_IMM_CMD_DMA_SHARED_MEM] = { + ipa_imm_cmd_construct_dma_shared_mem_v_4_0, + 19}, +}; + +/* + * ipahal_imm_cmd_init() - Build the Immediate command information table + * See ipahal_imm_cmd_objs[][] comments + */ +static int ipahal_imm_cmd_init(enum ipa_hw_type ipa_hw_type) +{ + int i; + int j; + struct ipahal_imm_cmd_obj zero_obj; + + IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type); + + if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) { + IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type); + return -EINVAL; + } + + memset(&zero_obj, 0, sizeof(zero_obj)); + for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) { + for (j = 0; j < IPA_IMM_CMD_MAX ; j++) { + if (!memcmp(&ipahal_imm_cmd_objs[i+1][j], &zero_obj, + sizeof(struct ipahal_imm_cmd_obj))) { + memcpy(&ipahal_imm_cmd_objs[i+1][j], + &ipahal_imm_cmd_objs[i][j], + sizeof(struct ipahal_imm_cmd_obj)); + } else { + /* + * explicitly overridden immediate command. + * Check validity + */ + if (!ipahal_imm_cmd_objs[i+1][j].opcode) { + IPAHAL_ERR( + "imm_cmd=%s with zero opcode ipa_ver=%d\n", + ipahal_imm_cmd_name_str(j), i+1); + WARN_ON(1); + } + if (!ipahal_imm_cmd_objs[i+1][j].construct) { + IPAHAL_ERR( + "imm_cmd=%s with NULL construct func ipa_ver=%d\n", + ipahal_imm_cmd_name_str(j), i+1); + WARN_ON(1); + } + } + } + } + + return 0; +} + +/* + * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd + * @cmd_name: [in] Immediate command name + */ +const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name) +{ + if (cmd_name < 0 || cmd_name >= IPA_IMM_CMD_MAX) { + IPAHAL_ERR("requested name of invalid imm_cmd=%d\n", cmd_name); + return "Invalid IMM_CMD"; + } + + return ipahal_imm_cmd_name_to_str[cmd_name]; +} + +/* + * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command + */ +static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd) +{ + u32 opcode; + + if (cmd >= IPA_IMM_CMD_MAX) { + IPAHAL_ERR("Invalid immediate command imm_cmd=%u\n", cmd); + ipa_assert(); + return -EFAULT; + } + + IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n", + ipahal_imm_cmd_name_str(cmd)); + opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode; + if (opcode == -1) { + IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n", + ipahal_imm_cmd_name_str(cmd)); + ipa_assert(); + return -EFAULT; + } + + return opcode; +} + +/* + * ipahal_construct_imm_cmd() - Construct immdiate command + * This function builds imm cmd bulk that can be be sent to IPA + * The command will be allocated dynamically. + * After done using it, call ipahal_destroy_imm_cmd() to release it + */ +struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx) +{ + if (!params) { + IPAHAL_ERR("Input error: params=%pK\n", params); + ipa_assert(); + return NULL; + } + + if (cmd >= IPA_IMM_CMD_MAX) { + IPAHAL_ERR("Invalid immediate command %u\n", cmd); + return NULL; + } + + IPAHAL_DBG_LOW("construct IMM_CMD:%s\n", ipahal_imm_cmd_name_str(cmd)); + return ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].construct( + cmd, params, is_atomic_ctx); +} + +/* + * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op + * Core driver may want functionality to inject NOP commands to IPA + * to ensure e.g., PIPLINE clear before someother operation. + * The functionality given by this function can be reached by + * ipahal_construct_imm_cmd(). This function is helper to the core driver + * to reach this NOP functionlity easily. + * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait) + * @pipline_clr_opt: options for pipeline clear waiting + * @is_atomic_ctx: is called in atomic context or can sleep? + */ +struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd( + bool skip_pipline_clear, + enum ipahal_pipeline_clear_option pipline_clr_opt, + bool is_atomic_ctx) +{ + struct ipahal_imm_cmd_register_write cmd; + struct ipahal_imm_cmd_pyld *cmd_pyld; + + memset(&cmd, 0, sizeof(cmd)); + cmd.skip_pipeline_clear = skip_pipline_clear; + cmd.pipeline_clear_options = pipline_clr_opt; + cmd.value_mask = 0x0; + + cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, + &cmd, is_atomic_ctx); + + if (!cmd_pyld) + IPAHAL_ERR("failed to construct register_write imm cmd\n"); + + return cmd_pyld; +} + + +/* IPA Packet Status Logic */ + +#define IPA_PKT_STATUS_SET_MSK(__hw_bit_msk, __shft) \ + (status->status_mask |= \ + ((hw_status->status_mask & (__hw_bit_msk) ? 1 : 0) << (__shft))) + +static void ipa_pkt_status_parse( + const void *unparsed_status, struct ipahal_pkt_status *status) +{ + enum ipahal_pkt_status_opcode opcode = 0; + enum ipahal_pkt_status_exception exception_type = 0; + bool is_ipv6; + + struct ipa_pkt_status_hw *hw_status = + (struct ipa_pkt_status_hw *)unparsed_status; + + is_ipv6 = (hw_status->status_mask & 0x80) ? false : true; + + status->pkt_len = hw_status->pkt_len; + status->endp_src_idx = hw_status->endp_src_idx; + status->endp_dest_idx = hw_status->endp_dest_idx; + status->metadata = hw_status->metadata; + status->flt_local = hw_status->flt_local; + status->flt_hash = hw_status->flt_hash; + status->flt_global = hw_status->flt_hash; + status->flt_ret_hdr = hw_status->flt_ret_hdr; + status->flt_miss = ~(hw_status->flt_rule_id) ? false : true; + status->flt_rule_id = hw_status->flt_rule_id; + status->rt_local = hw_status->rt_local; + status->rt_hash = hw_status->rt_hash; + status->ucp = hw_status->ucp; + status->rt_tbl_idx = hw_status->rt_tbl_idx; + status->rt_miss = ~(hw_status->rt_rule_id) ? false : true; + status->rt_rule_id = hw_status->rt_rule_id; + status->nat_hit = hw_status->nat_hit; + status->nat_entry_idx = hw_status->nat_entry_idx; + status->tag_info = hw_status->tag_info; + status->seq_num = hw_status->seq_num; + status->time_of_day_ctr = hw_status->time_of_day_ctr; + status->hdr_local = hw_status->hdr_local; + status->hdr_offset = hw_status->hdr_offset; + status->frag_hit = hw_status->frag_hit; + status->frag_rule = hw_status->frag_rule; + + switch (hw_status->status_opcode) { + case 0x1: + opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET; + break; + case 0x2: + opcode = IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE; + break; + case 0x4: + opcode = IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET; + break; + case 0x8: + opcode = IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET; + break; + case 0x10: + opcode = IPAHAL_PKT_STATUS_OPCODE_LOG; + break; + case 0x20: + opcode = IPAHAL_PKT_STATUS_OPCODE_DCMP; + break; + case 0x40: + opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS; + break; + default: + IPAHAL_ERR("unsupported Status Opcode 0x%x\n", + hw_status->status_opcode); + WARN_ON(1); + }; + status->status_opcode = opcode; + + switch (hw_status->nat_type) { + case 0: + status->nat_type = IPAHAL_PKT_STATUS_NAT_NONE; + break; + case 1: + status->nat_type = IPAHAL_PKT_STATUS_NAT_SRC; + break; + case 2: + status->nat_type = IPAHAL_PKT_STATUS_NAT_DST; + break; + default: + IPAHAL_ERR("unsupported Status NAT type 0x%x\n", + hw_status->nat_type); + WARN_ON(1); + }; + + switch (hw_status->exception) { + case 0: + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NONE; + break; + case 1: + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR; + break; + case 4: + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE; + break; + case 8: + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH; + break; + case 16: + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS; + break; + case 32: + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT; + break; + case 64: + if (is_ipv6) + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT; + else + exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NAT; + break; + default: + IPAHAL_ERR("unsupported Status Exception type 0x%x\n", + hw_status->exception); + WARN_ON(1); + }; + status->exception = exception_type; + + IPA_PKT_STATUS_SET_MSK(0x1, IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT); + IPA_PKT_STATUS_SET_MSK(0x2, IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT); + IPA_PKT_STATUS_SET_MSK(0x4, IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT); + IPA_PKT_STATUS_SET_MSK(0x8, IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT); + IPA_PKT_STATUS_SET_MSK(0x10, IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT); + IPA_PKT_STATUS_SET_MSK(0x20, IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT); + IPA_PKT_STATUS_SET_MSK(0x40, + IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT); + IPA_PKT_STATUS_SET_MSK(0x80, IPAHAL_PKT_STATUS_MASK_V4_SHFT); + IPA_PKT_STATUS_SET_MSK(0x100, + IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT); + IPA_PKT_STATUS_SET_MSK(0x200, IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT); + IPA_PKT_STATUS_SET_MSK(0x400, IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT); + IPA_PKT_STATUS_SET_MSK(0x800, + IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT); + IPA_PKT_STATUS_SET_MSK(0x1000, IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT); + IPA_PKT_STATUS_SET_MSK(0x2000, IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT); + IPA_PKT_STATUS_SET_MSK(0x4000, IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT); + IPA_PKT_STATUS_SET_MSK(0x8000, IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT); + status->status_mask &= 0xFFFF; +} + +/* + * struct ipahal_pkt_status_obj - Pakcet Status H/W information for + * specific IPA version + * @size: H/W size of the status packet + * @parse: CB that parses the H/W packet status into the abstracted structure + */ +struct ipahal_pkt_status_obj { + u32 size; + void (*parse)(const void *unparsed_status, + struct ipahal_pkt_status *status); +}; + +/* + * This table contains the info regard packet status for IPAv3 and later + * Information like: size of packet status and parsing function + * All the information on the pkt Status on IPAv3 are statically defined below. + * If information is missing regard some IPA version, the init function + * will fill it with the information from the previous IPA version. + * Information is considered missing if all of the fields are 0 + */ +static struct ipahal_pkt_status_obj ipahal_pkt_status_objs[IPA_HW_MAX] = { + /* IPAv3 */ + [IPA_HW_v3_0] = { + IPA3_0_PKT_STATUS_SIZE, + ipa_pkt_status_parse, + }, +}; + +/* + * ipahal_pkt_status_init() - Build the packet status information array + * for the different IPA versions + * See ipahal_pkt_status_objs[] comments + */ +static int ipahal_pkt_status_init(enum ipa_hw_type ipa_hw_type) +{ + int i; + struct ipahal_pkt_status_obj zero_obj; + + IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type); + + if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) { + IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type); + return -EINVAL; + } + + /* + * Since structure alignment is implementation dependent, + * add test to avoid different and incompatible data layouts. + * + * In case new H/W has different size or structure of status packet, + * add a compile time validty check for it like below (as well as + * the new defines and/or the new strucutre in the internal header). + */ + BUILD_BUG_ON(sizeof(struct ipa_pkt_status_hw) != + IPA3_0_PKT_STATUS_SIZE); + + memset(&zero_obj, 0, sizeof(zero_obj)); + for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) { + if (!memcmp(&ipahal_pkt_status_objs[i+1], &zero_obj, + sizeof(struct ipahal_pkt_status_obj))) { + memcpy(&ipahal_pkt_status_objs[i+1], + &ipahal_pkt_status_objs[i], + sizeof(struct ipahal_pkt_status_obj)); + } else { + /* + * explicitly overridden Packet Status info + * Check validity + */ + if (!ipahal_pkt_status_objs[i+1].size) { + IPAHAL_ERR( + "Packet Status with zero size ipa_ver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_pkt_status_objs[i+1].parse) { + IPAHAL_ERR( + "Packet Status without Parse func ipa_ver=%d\n", + i+1); + WARN_ON(1); + } + } + } + + return 0; +} + +/* + * ipahal_pkt_status_get_size() - Get H/W size of packet status + */ +u32 ipahal_pkt_status_get_size(void) +{ + return ipahal_pkt_status_objs[ipahal_ctx->hw_type].size; +} + +/* + * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form + * @unparsed_status: Pointer to H/W format of the packet status as read from H/W + * @status: Pointer to pre-allocated buffer where the parsed info will be stored + */ +void ipahal_pkt_status_parse(const void *unparsed_status, + struct ipahal_pkt_status *status) +{ + if (!unparsed_status || !status) { + IPAHAL_ERR("Input Error: unparsed_status=%pK status=%pK\n", + unparsed_status, status); + return; + } + + IPAHAL_DBG_LOW("Parse Status Packet\n"); + memset(status, 0, sizeof(*status)); + ipahal_pkt_status_objs[ipahal_ctx->hw_type].parse(unparsed_status, + status); +} + +/* + * ipahal_pkt_status_exception_str() - returns string represents exception type + * @exception: [in] The exception type + */ +const char *ipahal_pkt_status_exception_str( + enum ipahal_pkt_status_exception exception) +{ + if (exception < 0 || exception >= IPAHAL_PKT_STATUS_EXCEPTION_MAX) { + IPAHAL_ERR( + "requested string of invalid pkt_status exception=%d\n", + exception); + return "Invalid PKT_STATUS_EXCEPTION"; + } + + return ipahal_pkt_status_exception_to_str[exception]; +} + +#ifdef CONFIG_DEBUG_FS +static void ipahal_debugfs_init(void) +{ + ipahal_ctx->dent = debugfs_create_dir("ipahal", 0); + if (!ipahal_ctx->dent || IS_ERR(ipahal_ctx->dent)) { + IPAHAL_ERR("fail to create ipahal debugfs folder\n"); + goto fail; + } + + return; +fail: + debugfs_remove_recursive(ipahal_ctx->dent); + ipahal_ctx->dent = NULL; +} + +static void ipahal_debugfs_remove(void) +{ + if (!ipahal_ctx) + return; + + if (IS_ERR(ipahal_ctx->dent)) { + IPAHAL_ERR("ipahal debugfs folder was not created\n"); + return; + } + + debugfs_remove_recursive(ipahal_ctx->dent); +} +#else /* CONFIG_DEBUG_FS */ +static void ipahal_debugfs_init(void) {} +static void ipahal_debugfs_remove(void) {} +#endif /* CONFIG_DEBUG_FS */ + +/* + * ipahal_cp_hdr_to_hw_buff_v3() - copy header to hardware buffer according to + * base address and offset given. + * @base: dma base address + * @offset: offset from base address where the data will be copied + * @hdr: the header to be copied + * @hdr_len: the length of the header + */ +static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset, + u8 *const hdr, u32 hdr_len) +{ + memcpy(base + offset, hdr, hdr_len); +} + +/* + * ipahal_cp_proc_ctx_to_hw_buff_v3() - copy processing context to + * base address and offset given. + * @type: header processing context type (no processing context, + * IPA_HDR_PROC_ETHII_TO_ETHII etc.) + * @base: dma base address + * @offset: offset from base address where the data will be copied + * @hdr_len: the length of the header + * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr + * @phys_base: memory location in DDR + * @hdr_base_addr: base address in table + * @offset_entry: offset from hdr_base_addr in table + * @l2tp_params: l2tp parameters + */ +static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type, + void *const base, u32 offset, + u32 hdr_len, bool is_hdr_proc_ctx, + dma_addr_t phys_base, u32 hdr_base_addr, + struct ipa_hdr_offset_entry *offset_entry, + union ipa_l2tp_hdr_proc_ctx_params l2tp_params) +{ + if (type == IPA_HDR_PROC_NONE) { + struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx; + + ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_seq *) + (base + offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; + ctx->hdr_add.tlv.length = 1; + ctx->hdr_add.tlv.value = hdr_len; + ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base : + hdr_base_addr + offset_entry->offset; + IPAHAL_DBG("header address 0x%x\n", + ctx->hdr_add.hdr_addr); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; + } else if (type == IPA_HDR_PROC_L2TP_HEADER_ADD) { + struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *ctx; + + ctx = (struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *) + (base + offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; + ctx->hdr_add.tlv.length = 1; + ctx->hdr_add.tlv.value = hdr_len; + ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base : + hdr_base_addr + offset_entry->offset; + IPAHAL_DBG("header address 0x%x\n", + ctx->hdr_add.hdr_addr); + ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD; + ctx->l2tp_params.tlv.length = 1; + ctx->l2tp_params.tlv.value = + IPA_HDR_UCP_L2TP_HEADER_ADD; + ctx->l2tp_params.l2tp_params.eth_hdr_retained = + l2tp_params.hdr_add_param.eth_hdr_retained; + ctx->l2tp_params.l2tp_params.input_ip_version = + l2tp_params.hdr_add_param.input_ip_version; + ctx->l2tp_params.l2tp_params.output_ip_version = + l2tp_params.hdr_add_param.output_ip_version; + + IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; + } else if (type == IPA_HDR_PROC_L2TP_HEADER_REMOVE) { + struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *ctx; + + ctx = (struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *) + (base + offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; + ctx->hdr_add.tlv.length = 1; + ctx->hdr_add.tlv.value = hdr_len; + ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base : + hdr_base_addr + offset_entry->offset; + IPAHAL_DBG("header address 0x%x length %d\n", + ctx->hdr_add.hdr_addr, ctx->hdr_add.tlv.value); + ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD; + ctx->l2tp_params.tlv.length = 1; + ctx->l2tp_params.tlv.value = + IPA_HDR_UCP_L2TP_HEADER_REMOVE; + ctx->l2tp_params.l2tp_params.hdr_len_remove = + l2tp_params.hdr_remove_param.hdr_len_remove; + ctx->l2tp_params.l2tp_params.eth_hdr_retained = + l2tp_params.hdr_remove_param.eth_hdr_retained; + + IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; + } else { + struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *ctx; + + ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *) + (base + offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; + ctx->hdr_add.tlv.length = 1; + ctx->hdr_add.tlv.value = hdr_len; + ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base : + hdr_base_addr + offset_entry->offset; + IPAHAL_DBG("header address 0x%x\n", + ctx->hdr_add.hdr_addr); + ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD; + ctx->cmd.length = 0; + switch (type) { + case IPA_HDR_PROC_ETHII_TO_ETHII: + ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_ETHII; + break; + case IPA_HDR_PROC_ETHII_TO_802_3: + ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_802_3; + break; + case IPA_HDR_PROC_802_3_TO_ETHII: + ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII; + break; + case IPA_HDR_PROC_802_3_TO_802_3: + ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3; + break; + default: + IPAHAL_ERR("unknown ipa_hdr_proc_type %d", type); + WARN_ON(1); + return -EINVAL; + } + IPAHAL_DBG("command id %d\n", ctx->cmd.value); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; + } + + return 0; +} + +/* + * ipahal_get_proc_ctx_needed_len_v3() - calculates the needed length for + * addition of header processing context according to the type of processing + * context. + * @type: header processing context type (no processing context, + * IPA_HDR_PROC_ETHII_TO_ETHII etc.) + */ +static int ipahal_get_proc_ctx_needed_len_v3(enum ipa_hdr_proc_type type) +{ + return (type == IPA_HDR_PROC_NONE) ? + sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_seq) : + sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq); +} + +/* + * struct ipahal_hdr_funcs - headers handling functions for specific IPA + * version + * @ipahal_cp_hdr_to_hw_buff - copy function for regular headers + */ +struct ipahal_hdr_funcs { + void (*ipahal_cp_hdr_to_hw_buff)(void *const base, u32 offset, + u8 *const hdr, u32 hdr_len); + + int (*ipahal_cp_proc_ctx_to_hw_buff)(enum ipa_hdr_proc_type type, + void *const base, u32 offset, u32 hdr_len, + bool is_hdr_proc_ctx, dma_addr_t phys_base, + u32 hdr_base_addr, + struct ipa_hdr_offset_entry *offset_entry, + union ipa_l2tp_hdr_proc_ctx_params l2tp_params); + + int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type); +}; + +static struct ipahal_hdr_funcs hdr_funcs; + +static void ipahal_hdr_init(enum ipa_hw_type ipa_hw_type) +{ + + IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type); + + /* + * once there are changes in HW and need to use different case, insert + * new case for the new h/w. put the default always for the latest HW + * and make sure all previous supported versions have their cases. + */ + switch (ipa_hw_type) { + case IPA_HW_v3_0: + default: + hdr_funcs.ipahal_cp_hdr_to_hw_buff = + ipahal_cp_hdr_to_hw_buff_v3; + hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff = + ipahal_cp_proc_ctx_to_hw_buff_v3; + hdr_funcs.ipahal_get_proc_ctx_needed_len = + ipahal_get_proc_ctx_needed_len_v3; + } + IPAHAL_DBG("Exit\n"); +} + +/* + * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to + * base address and offset given. + * @base: dma base address + * @offset: offset from base address where the data will be copied + * @hdr: the header to be copied + * @hdr_len: the length of the header + */ +void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *const hdr, + u32 hdr_len) +{ + IPAHAL_DBG_LOW("Entry\n"); + IPAHAL_DBG("base %pK, offset %d, hdr %pK, hdr_len %d\n", base, + offset, hdr, hdr_len); + if (!base || !hdr_len || !hdr) { + IPAHAL_ERR("failed on validating params\n"); + return; + } + + hdr_funcs.ipahal_cp_hdr_to_hw_buff(base, offset, hdr, hdr_len); + + IPAHAL_DBG_LOW("Exit\n"); +} + +/* + * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to + * base address and offset given. + * @type: type of header processing context + * @base: dma base address + * @offset: offset from base address where the data will be copied + * @hdr_len: the length of the header + * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr + * @phys_base: memory location in DDR + * @hdr_base_addr: base address in table + * @offset_entry: offset from hdr_base_addr in table + * @l2tp_params: l2tp parameters + */ +int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type, + void *const base, u32 offset, u32 hdr_len, + bool is_hdr_proc_ctx, dma_addr_t phys_base, + u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry, + union ipa_l2tp_hdr_proc_ctx_params l2tp_params) +{ + IPAHAL_DBG( + "type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %d, offset_entry %pK\n" + , type, base, offset, hdr_len, is_hdr_proc_ctx, + hdr_base_addr, offset_entry); + + if (!base || + !hdr_len || + (is_hdr_proc_ctx && !phys_base) || + (!is_hdr_proc_ctx && !offset_entry) || + (!is_hdr_proc_ctx && !hdr_base_addr)) { + IPAHAL_ERR( + "invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%u is_hdr_proc_ctx:%d offset_entry:%pK\n" + , hdr_len, &phys_base, hdr_base_addr + , is_hdr_proc_ctx, offset_entry); + return -EINVAL; + } + + return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset, + hdr_len, is_hdr_proc_ctx, phys_base, + hdr_base_addr, offset_entry, l2tp_params); +} + +/* + * ipahal_get_proc_ctx_needed_len() - calculates the needed length for + * addition of header processing context according to the type of processing + * context + * @type: header processing context type (no processing context, + * IPA_HDR_PROC_ETHII_TO_ETHII etc.) + */ +int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type) +{ + int res; + + IPAHAL_DBG("entry\n"); + + res = hdr_funcs.ipahal_get_proc_ctx_needed_len(type); + + IPAHAL_DBG("Exit\n"); + + return res; +} + +/* + * Get IPA Data Processing Star image memory size at IPA SRAM + */ +u32 ipahal_get_dps_img_mem_size(void) +{ + return IPA_HW_DPS_IMG_MEM_SIZE_V3_0; +} + +/* + * Get IPA Header Processing Star image memory size at IPA SRAM + */ +u32 ipahal_get_hps_img_mem_size(void) +{ + return IPA_HW_HPS_IMG_MEM_SIZE_V3_0; +} + +int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base, + struct device *ipa_pdev) +{ + int result; + + IPAHAL_DBG("Entry - IPA HW TYPE=%d base=%pK ipa_pdev=%pK\n", + ipa_hw_type, base, ipa_pdev); + + ipahal_ctx = kzalloc(sizeof(*ipahal_ctx), GFP_KERNEL); + if (!ipahal_ctx) { + IPAHAL_ERR("kzalloc err for ipahal_ctx\n"); + result = -ENOMEM; + goto bail_err_exit; + } + + if (ipa_hw_type < IPA_HW_v3_0) { + IPAHAL_ERR("ipahal supported on IPAv3 and later only\n"); + result = -EINVAL; + goto bail_free_ctx; + } + + if (ipa_hw_type >= IPA_HW_MAX) { + IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type); + result = -EINVAL; + goto bail_free_ctx; + } + + if (!base) { + IPAHAL_ERR("invalid memory io mapping addr\n"); + result = -EINVAL; + goto bail_free_ctx; + } + + if (!ipa_pdev) { + IPAHAL_ERR("invalid IPA platform device\n"); + result = -EINVAL; + goto bail_free_ctx; + } + + ipahal_ctx->hw_type = ipa_hw_type; + ipahal_ctx->base = base; + ipahal_ctx->ipa_pdev = ipa_pdev; + + if (ipahal_reg_init(ipa_hw_type)) { + IPAHAL_ERR("failed to init ipahal reg\n"); + result = -EFAULT; + goto bail_free_ctx; + } + + if (ipahal_imm_cmd_init(ipa_hw_type)) { + IPAHAL_ERR("failed to init ipahal imm cmd\n"); + result = -EFAULT; + goto bail_free_ctx; + } + + if (ipahal_pkt_status_init(ipa_hw_type)) { + IPAHAL_ERR("failed to init ipahal pkt status\n"); + result = -EFAULT; + goto bail_free_ctx; + } + + ipahal_hdr_init(ipa_hw_type); + + if (ipahal_fltrt_init(ipa_hw_type)) { + IPAHAL_ERR("failed to init ipahal flt rt\n"); + result = -EFAULT; + goto bail_free_ctx; + } + + if (ipahal_hw_stats_init(ipa_hw_type)) { + IPAHAL_ERR("failed to init ipahal hw stats\n"); + result = -EFAULT; + goto bail_free_ctx; + } + + ipahal_debugfs_init(); + + return 0; + +bail_free_ctx: + kfree(ipahal_ctx); + ipahal_ctx = NULL; +bail_err_exit: + return result; +} + +void ipahal_destroy(void) +{ + IPAHAL_DBG("Entry\n"); + ipahal_fltrt_destroy(); + ipahal_debugfs_remove(); + kfree(ipahal_ctx); + ipahal_ctx = NULL; +} + +void ipahal_free_dma_mem(struct ipa_mem_buffer *mem) +{ + if (likely(mem)) { + dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base, + mem->phys_base); + mem->size = 0; + mem->base = NULL; + mem->phys_base = 0; + } +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h new file mode 100644 index 000000000000..0f322b570dfa --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h @@ -0,0 +1,665 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPAHAL_H_ +#define _IPAHAL_H_ + +#include +#include "../../ipa_common_i.h" + +/* + * Immediate command names + * + * NOTE:: Any change to this enum, need to change to ipahal_imm_cmd_name_to_str + * array as well. + */ +enum ipahal_imm_cmd_name { + IPA_IMM_CMD_IP_V4_FILTER_INIT, + IPA_IMM_CMD_IP_V6_FILTER_INIT, + IPA_IMM_CMD_IP_V4_NAT_INIT, + IPA_IMM_CMD_IP_V4_ROUTING_INIT, + IPA_IMM_CMD_IP_V6_ROUTING_INIT, + IPA_IMM_CMD_HDR_INIT_LOCAL, + IPA_IMM_CMD_HDR_INIT_SYSTEM, + IPA_IMM_CMD_REGISTER_WRITE, + IPA_IMM_CMD_NAT_DMA, + IPA_IMM_CMD_IP_PACKET_INIT, + IPA_IMM_CMD_DMA_SHARED_MEM, + IPA_IMM_CMD_IP_PACKET_TAG_STATUS, + IPA_IMM_CMD_DMA_TASK_32B_ADDR, + IPA_IMM_CMD_TABLE_DMA, + IPA_IMM_CMD_MAX, +}; + +/* Immediate commands abstracted structures */ + +/* + * struct ipahal_imm_cmd_ip_v4_filter_init - IP_V4_FILTER_INIT cmd payload + * Inits IPv4 filter block. + * @hash_rules_addr: Addr in sys mem where ipv4 hashable flt tbl starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should + * be copied to + * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should + * be copied to + */ +struct ipahal_imm_cmd_ip_v4_filter_init { + u64 hash_rules_addr; + u32 hash_rules_size; + u32 hash_local_addr; + u64 nhash_rules_addr; + u32 nhash_rules_size; + u32 nhash_local_addr; +}; + +/* + * struct ipahal_imm_cmd_ip_v6_filter_init - IP_V6_FILTER_INIT cmd payload + * Inits IPv6 filter block. + * @hash_rules_addr: Addr in sys mem where ipv6 hashable flt tbl starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should + * be copied to + * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should + * be copied to + */ +struct ipahal_imm_cmd_ip_v6_filter_init { + u64 hash_rules_addr; + u32 hash_rules_size; + u32 hash_local_addr; + u64 nhash_rules_addr; + u32 nhash_rules_size; + u32 nhash_local_addr; +}; + +/* + * struct ipahal_imm_cmd_ip_v4_nat_init - IP_V4_NAT_INIT cmd payload + * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location + * cache address abd itger related parameters. + * @table_index: For future support of multiple NAT tables + * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start + * @ipv4_rules_addr_shared: ipv4_rules_addr in shared mem (if not, then sys) + * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT + * table starts. IPv4 NAT rules that result in NAT collision are located + * in this table. + * @ipv4_expansion_rules_addr_shared: ipv4_expansion_rules_addr in + * shared mem (if not, then sys) + * @index_table_addr: Addr in sys/shared mem where index table, which points + * to NAT table starts + * @index_table_addr_shared: index_table_addr in shared mem (if not, then sys) + * @index_table_expansion_addr: Addr in sys/shared mem where expansion index + * table starts + * @index_table_expansion_addr_shared: index_table_expansion_addr in + * shared mem (if not, then sys) + * @size_base_tables: Num of entries in NAT tbl and idx tbl (each) + * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion + * idx tbl (each) + * @public_ip_addr: public IP address + */ +struct ipahal_imm_cmd_ip_v4_nat_init { + u8 table_index; + u64 ipv4_rules_addr; + bool ipv4_rules_addr_shared; + u64 ipv4_expansion_rules_addr; + bool ipv4_expansion_rules_addr_shared; + u64 index_table_addr; + bool index_table_addr_shared; + u64 index_table_expansion_addr; + bool index_table_expansion_addr_shared; + u16 size_base_tables; + u16 size_expansion_tables; + u32 public_ip_addr; +}; + +/* + * struct ipahal_imm_cmd_ip_v4_routing_init - IP_V4_ROUTING_INIT cmd payload + * Inits IPv4 routing table/structure - with the rules and other related params + * @hash_rules_addr: Addr in sys mem where ipv4 hashable rt tbl starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should + * be copied to + * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should + * be copied to + */ +struct ipahal_imm_cmd_ip_v4_routing_init { + u64 hash_rules_addr; + u32 hash_rules_size; + u32 hash_local_addr; + u64 nhash_rules_addr; + u32 nhash_rules_size; + u32 nhash_local_addr; +}; + +/* + * struct ipahal_imm_cmd_ip_v6_routing_init - IP_V6_ROUTING_INIT cmd payload + * Inits IPv6 routing table/structure - with the rules and other related params + * @hash_rules_addr: Addr in sys mem where ipv6 hashable rt tbl starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should + * be copied to + * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should + * be copied to + */ +struct ipahal_imm_cmd_ip_v6_routing_init { + u64 hash_rules_addr; + u32 hash_rules_size; + u32 hash_local_addr; + u64 nhash_rules_addr; + u32 nhash_rules_size; + u32 nhash_local_addr; +}; + +/* + * struct ipahal_imm_cmd_hdr_init_local - HDR_INIT_LOCAL cmd payload + * Inits hdr table within local mem with the hdrs and their length. + * @hdr_table_addr: Word address in sys mem where the table starts (SRC) + * @size_hdr_table: Size of the above (in bytes) + * @hdr_addr: header address in IPA sram (used as DST for memory copy) + * @rsvd: reserved + */ +struct ipahal_imm_cmd_hdr_init_local { + u64 hdr_table_addr; + u32 size_hdr_table; + u32 hdr_addr; +}; + +/* + * struct ipahal_imm_cmd_hdr_init_system - HDR_INIT_SYSTEM cmd payload + * Inits hdr table within sys mem with the hdrs and their length. + * @hdr_table_addr: Word address in system memory where the hdrs tbl starts. + */ +struct ipahal_imm_cmd_hdr_init_system { + u64 hdr_table_addr; +}; + +/* + * struct ipahal_imm_cmd_nat_dma - NAT_DMA cmd payload + * Perform DMA operation on NAT related mem addressess. Copy data into + * different locations within NAT associated tbls. (For add/remove NAT rules) + * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op. + * @base_addr: Base addr to which the DMA operation should be performed. + * @offset: offset in bytes from base addr to write 'data' to + * @data: data to be written + */ +struct ipahal_imm_cmd_nat_dma { + u8 table_index; + u8 base_addr; + u32 offset; + u16 data; +}; + +/* + * struct ipahal_imm_cmd_table_dma - TABLE_DMA cmd payload + * Perform DMA operation on NAT and IPV6 connection tracking related mem + * addresses. Copy data into different locations within IPV6CT and NAT + * associated tbls. (For add/remove NAT rules) + * @table_index: NAT tbl index. Defines the tbl on which to perform DMA op. + * @base_addr: Base addr to which the DMA operation should be performed. + * @offset: offset in bytes from base addr to write 'data' to + * @data: data to be written + */ +struct ipahal_imm_cmd_table_dma { + u8 table_index; + u8 base_addr; + u32 offset; + u16 data; +}; + +/* + * struct ipahal_imm_cmd_ip_packet_init - IP_PACKET_INIT cmd payload + * Configuration for specific IP pkt. Shall be called prior to an IP pkt + * data. Pkt will not go through IP pkt processing. + * @destination_pipe_index: Destination pipe index (in case routing + * is enabled, this field will overwrite the rt rule) + */ +struct ipahal_imm_cmd_ip_packet_init { + u32 destination_pipe_index; +}; + +/* + * enum ipa_pipeline_clear_option - Values for pipeline clear waiting options + * @IPAHAL_HPS_CLEAR: Wait for HPS clear. All queues except high priority queue + * shall not be serviced until HPS is clear of packets or immediate commands. + * The high priority Rx queue / Q6ZIP group shall still be serviced normally. + * + * @IPAHAL_SRC_GRP_CLEAR: Wait for originating source group to be clear + * (for no packet contexts allocated to the originating source group). + * The source group / Rx queue shall not be serviced until all previously + * allocated packet contexts are released. All other source groups/queues shall + * be serviced normally. + * + * @IPAHAL_FULL_PIPELINE_CLEAR: Wait for full pipeline to be clear. + * All groups / Rx queues shall not be serviced until IPA pipeline is fully + * clear. This should be used for debug only. + */ +enum ipahal_pipeline_clear_option { + IPAHAL_HPS_CLEAR, + IPAHAL_SRC_GRP_CLEAR, + IPAHAL_FULL_PIPELINE_CLEAR +}; + +/* + * struct ipahal_imm_cmd_register_write - REGISTER_WRITE cmd payload + * Write value to register. Allows reg changes to be synced with data packet + * and other immediate commands. Can be used to access the sram + * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr + * @value: value to write to register + * @value_mask: mask specifying which value bits to write to the register + * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait) + * @pipeline_clear_option: options for pipeline clear waiting + */ +struct ipahal_imm_cmd_register_write { + u32 offset; + u32 value; + u32 value_mask; + bool skip_pipeline_clear; + enum ipahal_pipeline_clear_option pipeline_clear_options; +}; + +/* + * struct ipahal_imm_cmd_dma_shared_mem - DMA_SHARED_MEM cmd payload + * Perform mem copy into or out of the SW area of IPA local mem + * @size: Size in bytes of data to copy. Expected size is up to 2K bytes + * @local_addr: Address in IPA local memory + * @clear_after_read: Clear local memory at the end of a read operation allows + * atomic read and clear if HPS is clear. Ignore for writes. + * @is_read: Read operation from local memory? If not, then write. + * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait) + * @pipeline_clear_option: options for pipeline clear waiting + * @system_addr: Address in system memory + */ +struct ipahal_imm_cmd_dma_shared_mem { + u32 size; + u32 local_addr; + bool clear_after_read; + bool is_read; + bool skip_pipeline_clear; + enum ipahal_pipeline_clear_option pipeline_clear_options; + u64 system_addr; +}; + +/* + * struct ipahal_imm_cmd_ip_packet_tag_status - IP_PACKET_TAG_STATUS cmd payload + * This cmd is used for to allow SW to track HW processing by setting a TAG + * value that is passed back to SW inside Packet Status information. + * TAG info will be provided as part of Packet Status info generated for + * the next pkt transferred over the pipe. + * This immediate command must be followed by a packet in the same transfer. + * @tag: Tag that is provided back to SW + */ +struct ipahal_imm_cmd_ip_packet_tag_status { + u64 tag; +}; + +/* + * struct ipahal_imm_cmd_dma_task_32b_addr - IPA_DMA_TASK_32B_ADDR cmd payload + * Used by clients using 32bit addresses. Used to perform DMA operation on + * multiple descriptors. + * The Opcode is dynamic, where it holds the number of buffer to process + * @cmplt: Complete flag: If true, IPA interrupt SW when the entire + * DMA related data was completely xfered to its destination. + * @eof: Enf Of Frame flag: If true, IPA assert the EOT to the + * dest client. This is used used for aggr sequence + * @flsh: Flush flag: If true pkt will go through the IPA blocks but + * will not be xfered to dest client but rather will be discarded + * @lock: Lock pipe flag: If true, IPA will stop processing descriptors + * from other EPs in the same src grp (RX queue) + * @unlock: Unlock pipe flag: If true, IPA will stop exclusively + * servicing current EP out of the src EPs of the grp (RX queue) + * @size1: Size of buffer1 data + * @addr1: Pointer to buffer1 data + * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs, + * only the first one needs to have this field set. It will be ignored + * in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK + * must contain this field (2 or more buffers) or EOT. + */ +struct ipahal_imm_cmd_dma_task_32b_addr { + bool cmplt; + bool eof; + bool flsh; + bool lock; + bool unlock; + u32 size1; + u32 addr1; + u32 packet_size; +}; + +/* + * struct ipahal_imm_cmd_pyld - Immediate cmd payload information + * @len: length of the buffer + * @opcode: opcode of the immediate command + * @data: buffer contains the immediate command payload. Buffer goes + * back to back with this structure + */ +struct ipahal_imm_cmd_pyld { + u16 len; + u16 opcode; + u8 data[0]; +}; + + +/* Immediate command Function APIs */ + +/* + * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd + * @cmd_name: [in] Immediate command name + */ +const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name); + +/* + * ipahal_construct_imm_cmd() - Construct immdiate command + * This function builds imm cmd bulk that can be be sent to IPA + * The command will be allocated dynamically. + * After done using it, call ipahal_destroy_imm_cmd() to release it + */ +struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd( + enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx); + +/* + * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op + * Core driver may want functionality to inject NOP commands to IPA + * to ensure e.g., PIPLINE clear before someother operation. + * The functionality given by this function can be reached by + * ipahal_construct_imm_cmd(). This function is helper to the core driver + * to reach this NOP functionlity easily. + * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait) + * @pipline_clr_opt: options for pipeline clear waiting + * @is_atomic_ctx: is called in atomic context or can sleep? + */ +struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd( + bool skip_pipline_clear, + enum ipahal_pipeline_clear_option pipline_clr_opt, + bool is_atomic_ctx); + +/* + * ipahal_destroy_imm_cmd() - Destroy/Release bulk that was built + * by the construction functions + */ +static inline void ipahal_destroy_imm_cmd(struct ipahal_imm_cmd_pyld *pyld) +{ + kfree(pyld); +} + + +/* IPA Status packet Structures and Function APIs */ + +/* + * enum ipahal_pkt_status_opcode - Packet Status Opcode + * @IPAHAL_STATUS_OPCODE_PACKET_2ND_PASS: Packet Status generated as part of + * IPA second processing pass for a packet (i.e. IPA XLAT processing for + * the translated packet). + */ +enum ipahal_pkt_status_opcode { + IPAHAL_PKT_STATUS_OPCODE_PACKET = 0, + IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE, + IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET, + IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET, + IPAHAL_PKT_STATUS_OPCODE_LOG, + IPAHAL_PKT_STATUS_OPCODE_DCMP, + IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS, +}; + +/* + * enum ipahal_pkt_status_exception - Packet Status exception type + * @IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH: formerly IHL exception. + * + * Note: IPTYPE, PACKET_LENGTH and PACKET_THRESHOLD exceptions means that + * partial / no IP processing took place and corresponding Status Mask + * fields should be ignored. Flt and rt info is not valid. + * + * NOTE:: Any change to this enum, need to change to + * ipahal_pkt_status_exception_to_str array as well. + */ +enum ipahal_pkt_status_exception { + IPAHAL_PKT_STATUS_EXCEPTION_NONE = 0, + IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR, + IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE, + IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH, + IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD, + IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS, + IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT, + /* + * NAT and IPv6CT have the same value at HW. + * NAT for IPv4 and IPv6CT for IPv6 exceptions + */ + IPAHAL_PKT_STATUS_EXCEPTION_NAT, + IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT, + IPAHAL_PKT_STATUS_EXCEPTION_MAX, +}; + +/* + * enum ipahal_pkt_status_mask - Packet Status bitmask shift values of + * the contained flags. This bitmask indicates flags on the properties of + * the packet as well as IPA processing it may had. + * @FRAG_PROCESS: Frag block processing flag: Was pkt processed by frag block? + * Also means the frag info is valid unless exception or first frag + * @FILT_PROCESS: Flt block processing flag: Was pkt processed by flt block? + * Also means that flt info is valid. + * @NAT_PROCESS: NAT block processing flag: Was pkt processed by NAT block? + * Also means that NAT info is valid, unless exception. + * @ROUTE_PROCESS: Rt block processing flag: Was pkt processed by rt block? + * Also means that rt info is valid, unless exception. + * @TAG_VALID: Flag specifying if TAG and TAG info valid? + * @FRAGMENT: Flag specifying if pkt is IP fragment. + * @FIRST_FRAGMENT: Flag specifying if pkt is first fragment. In this case, frag + * info is invalid + * @V4: Flag specifying pkt is IPv4 or IPv6 + * @CKSUM_PROCESS: CSUM block processing flag: Was pkt processed by csum block? + * If so, csum trailer exists + * @AGGR_PROCESS: Aggr block processing flag: Was pkt processed by aggr block? + * @DEST_EOT: Flag specifying if EOT was asserted for the pkt on dest endp + * @DEAGGR_PROCESS: Deaggr block processing flag: Was pkt processed by deaggr + * block? + * @DEAGG_FIRST: Flag specifying if this is the first pkt in deaggr frame + * @SRC_EOT: Flag specifying if EOT asserted by src endp when sending the buffer + * @PREV_EOT: Flag specifying if EOT was sent just before the pkt as part of + * aggr hard-byte-limit + * @BYTE_LIMIT: Flag specifying if pkt is over a configured byte limit. + */ +enum ipahal_pkt_status_mask { + IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT = 0, + IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT, + IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT, + IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT, + IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT, + IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT, + IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT, + IPAHAL_PKT_STATUS_MASK_V4_SHFT, + IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT, + IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT, + IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT, + IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT, + IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT, + IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT, + IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT, + IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT, +}; + +/* + * Returns boolean value representing a property of the a packet. + * @__flag_shft: The shift value of the flag of the status bitmask of + * @__status: Pointer to abstracrted status structure + * the needed property. See enum ipahal_pkt_status_mask + */ +#define IPAHAL_PKT_STATUS_MASK_FLAG_VAL(__flag_shft, __status) \ + (((__status)->status_mask) & ((u32)0x1<<(__flag_shft)) ? true : false) + +/* + * enum ipahal_pkt_status_nat_type - Type of NAT + */ +enum ipahal_pkt_status_nat_type { + IPAHAL_PKT_STATUS_NAT_NONE, + IPAHAL_PKT_STATUS_NAT_SRC, + IPAHAL_PKT_STATUS_NAT_DST, +}; + +/* + * struct ipahal_pkt_status - IPA status packet abstracted payload. + * This structure describes the status packet fields for the + * following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET, + * IPA_STATUS_SUSPENDED_PACKET. + * Other statuses types has different status packet structure. + * @status_opcode: The Type of the status (Opcode). + * @exception: The first exception that took place. + * In case of exception, src endp and pkt len are always valid. + * @status_mask: Bit mask for flags on several properties on the packet + * and processing it may passed at IPA. See enum ipahal_pkt_status_mask + * @pkt_len: Pkt pyld len including hdr and retained hdr if used. Does + * not include padding or checksum trailer len. + * @endp_src_idx: Source end point index. + * @endp_dest_idx: Destination end point index. + * Not valid in case of exception + * @metadata: meta data value used by packet + * @flt_local: Filter table location flag: Does matching flt rule belongs to + * flt tbl that resides in lcl memory? (if not, then system mem) + * @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl? + * @flt_global: Global filter rule flag: Does matching flt rule belongs to + * the global flt tbl? (if not, then the per endp tables) + * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule + * specifies to retain header? + * @flt_miss: Filtering miss flag: Was their a filtering rule miss? + * In case of miss, all flt info to be ignored + * @flt_rule_id: The ID of the matching filter rule (if no miss). + * This info can be combined with endp_src_idx to locate the exact rule. + * @rt_local: Route table location flag: Does matching rt rule belongs to + * rt tbl that resides in lcl memory? (if not, then system mem) + * @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl? + * @ucp: UC Processing flag + * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match + * @rt_miss: Routing miss flag: Was their a routing rule miss? + * @rt_rule_id: The ID of the matching rt rule. (if no miss). This info + * can be combined with rt_tbl_idx to locate the exact rule. + * @nat_hit: NAT hit flag: Was their NAT hit? + * @nat_entry_idx: Index of the NAT entry used of NAT processing + * @nat_type: Defines the type of the NAT operation: + * @tag_info: S/W defined value provided via immediate command + * @seq_num: Per source endp unique packet sequence number + * @time_of_day_ctr: running counter from IPA clock + * @hdr_local: Header table location flag: In header insertion, was the header + * taken from the table resides in local memory? (If no, then system mem) + * @hdr_offset: Offset of used header in the header table + * @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table? + * @frag_rule: Frag rule index in H/W frag table in case of frag hit + */ +struct ipahal_pkt_status { + enum ipahal_pkt_status_opcode status_opcode; + enum ipahal_pkt_status_exception exception; + u32 status_mask; + u32 pkt_len; + u8 endp_src_idx; + u8 endp_dest_idx; + u32 metadata; + bool flt_local; + bool flt_hash; + bool flt_global; + bool flt_ret_hdr; + bool flt_miss; + u16 flt_rule_id; + bool rt_local; + bool rt_hash; + bool ucp; + u8 rt_tbl_idx; + bool rt_miss; + u16 rt_rule_id; + bool nat_hit; + u16 nat_entry_idx; + enum ipahal_pkt_status_nat_type nat_type; + u64 tag_info; + u8 seq_num; + u32 time_of_day_ctr; + bool hdr_local; + u16 hdr_offset; + bool frag_hit; + u8 frag_rule; +}; + +/* + * ipahal_pkt_status_get_size() - Get H/W size of packet status + */ +u32 ipahal_pkt_status_get_size(void); + +/* + * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form + * @unparsed_status: Pointer to H/W format of the packet status as read from H/W + * @status: Pointer to pre-allocated buffer where the parsed info will be stored + */ +void ipahal_pkt_status_parse(const void *unparsed_status, + struct ipahal_pkt_status *status); + +/* + * ipahal_pkt_status_exception_str() - returns string represents exception type + * @exception: [in] The exception type + */ +const char *ipahal_pkt_status_exception_str( + enum ipahal_pkt_status_exception exception); + +/* + * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to + * base address and offset given. + * @base: dma base address + * @offset: offset from base address where the data will be copied + * @hdr: the header to be copied + * @hdr_len: the length of the header + */ +void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *hdr, u32 hdr_len); + +/* + * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to + * base address and offset given. + * @type: type of header processing context + * @base: dma base address + * @offset: offset from base address where the data will be copied + * @hdr_len: the length of the header + * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr + * @phys_base: memory location in DDR + * @hdr_base_addr: base address in table + * @offset_entry: offset from hdr_base_addr in table + * @l2tp_params: l2tp parameters + */ +int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type, + void *base, u32 offset, u32 hdr_len, + bool is_hdr_proc_ctx, dma_addr_t phys_base, + u32 hdr_base_addr, + struct ipa_hdr_offset_entry *offset_entry, + union ipa_l2tp_hdr_proc_ctx_params l2tp_params); + +/* + * ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition + * of header processing context according to the type of processing context + * @type: header processing context type (no processing context, + * IPA_HDR_PROC_ETHII_TO_ETHII etc.) + */ +int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type); + +/* + * Get IPA Data Processing Star image memory size at IPA SRAM + */ +u32 ipahal_get_dps_img_mem_size(void); + +/* + * Get IPA Header Processing Star image memory size at IPA SRAM + */ +u32 ipahal_get_hps_img_mem_size(void); + +int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base, + struct device *ipa_pdev); +void ipahal_destroy(void); +void ipahal_free_dma_mem(struct ipa_mem_buffer *mem); + +#endif /* _IPAHAL_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c new file mode 100644 index 000000000000..e196ef4eab89 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c @@ -0,0 +1,3516 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include "ipahal.h" +#include "ipahal_fltrt.h" +#include "ipahal_fltrt_i.h" +#include "ipahal_i.h" +#include "../../ipa_common_i.h" + +/* + * struct ipahal_fltrt_obj - Flt/Rt H/W information for specific IPA version + * @support_hash: Is hashable tables supported + * @tbl_width: Width of table in bytes + * @sysaddr_alignment: System table address alignment + * @lcladdr_alignment: Local table offset alignment + * @blk_sz_alignment: Rules block size alignment + * @rule_start_alignment: Rule start address alignment + * @tbl_hdr_width: Width of the header structure in bytes + * @tbl_addr_mask: Masking for Table address + * @rule_max_prio: Max possible priority of a rule + * @rule_min_prio: Min possible priority of a rule + * @low_rule_id: Low value of Rule ID that can be used + * @rule_id_bit_len: Rule is high (MSB) bit len + * @rule_buf_size: Max size rule may utilize. + * @write_val_to_hdr: Write address or offset to header entry + * @create_flt_bitmap: Create bitmap in H/W format using given bitmap + * @create_tbl_addr: Given raw table address, create H/W formated one + * @parse_tbl_addr: Parse the given H/W address (hdr format) + * @rt_generate_hw_rule: Generate RT rule in H/W format + * @flt_generate_hw_rule: Generate FLT rule in H/W format + * @flt_generate_eq: Generate flt equation attributes from rule attributes + * @rt_parse_hw_rule: Parse rt rule read from H/W + * @flt_parse_hw_rule: Parse flt rule read from H/W + * @eq_bitfield: Array of the bit fields of the support equations + */ +struct ipahal_fltrt_obj { + bool support_hash; + u32 tbl_width; + u32 sysaddr_alignment; + u32 lcladdr_alignment; + u32 blk_sz_alignment; + u32 rule_start_alignment; + u32 tbl_hdr_width; + u32 tbl_addr_mask; + int rule_max_prio; + int rule_min_prio; + u32 low_rule_id; + u32 rule_id_bit_len; + u32 rule_buf_size; + u8* (*write_val_to_hdr)(u64 val, u8 *hdr); + u64 (*create_flt_bitmap)(u64 ep_bitmap); + u64 (*create_tbl_addr)(bool is_sys, u64 addr); + void (*parse_tbl_addr)(u64 hwaddr, u64 *addr, bool *is_sys); + int (*rt_generate_hw_rule)(struct ipahal_rt_rule_gen_params *params, + u32 *hw_len, u8 *buf); + int (*flt_generate_hw_rule)(struct ipahal_flt_rule_gen_params *params, + u32 *hw_len, u8 *buf); + int (*flt_generate_eq)(enum ipa_ip_type ipt, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb); + int (*rt_parse_hw_rule)(u8 *addr, struct ipahal_rt_rule_entry *rule); + int (*flt_parse_hw_rule)(u8 *addr, struct ipahal_flt_rule_entry *rule); + u8 eq_bitfield[IPA_EQ_MAX]; +}; + + +static u64 ipa_fltrt_create_flt_bitmap(u64 ep_bitmap) +{ + /* At IPA3, there global configuration is possible but not used */ + return (ep_bitmap << 1) & ~0x1; +} + +static u64 ipa_fltrt_create_tbl_addr(bool is_sys, u64 addr) +{ + if (is_sys) { + if (addr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) { + IPAHAL_ERR( + "sys addr is not aligned accordingly addr=0x%pad\n", + &addr); + ipa_assert(); + return 0; + } + } else { + if (addr & IPA3_0_HW_TBL_LCLADDR_ALIGNMENT) { + IPAHAL_ERR("addr/ofst isn't lcl addr aligned %llu\n", + addr); + ipa_assert(); + return 0; + } + /* + * for local tables (at sram) offsets is used as tables + * addresses. offset need to be in 8B units + * (local address aligned) and left shifted to its place. + * Local bit need to be enabled. + */ + addr /= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1; + addr *= IPA3_0_HW_TBL_ADDR_MASK + 1; + addr += 1; + } + + return addr; +} + +static void ipa_fltrt_parse_tbl_addr(u64 hwaddr, u64 *addr, bool *is_sys) +{ + IPAHAL_DBG_LOW("Parsing hwaddr 0x%llx\n", hwaddr); + + *is_sys = !(hwaddr & 0x1); + hwaddr &= (~0ULL - 1); + if (hwaddr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) { + IPAHAL_ERR( + "sys addr is not aligned accordingly addr=0x%pad\n", + &hwaddr); + ipa_assert(); + return; + } + + if (!*is_sys) { + hwaddr /= IPA3_0_HW_TBL_ADDR_MASK + 1; + hwaddr *= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1; + } + + *addr = hwaddr; +} + +/* Update these tables of the number of equations changes */ +static const int ipa3_0_ofst_meq32[] = { IPA_OFFSET_MEQ32_0, + IPA_OFFSET_MEQ32_1}; +static const int ipa3_0_ofst_meq128[] = { IPA_OFFSET_MEQ128_0, + IPA_OFFSET_MEQ128_1}; +static const int ipa3_0_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0, + IPA_IHL_OFFSET_RANGE16_1}; +static const int ipa3_0_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0, + IPA_IHL_OFFSET_MEQ32_1}; + +static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt, + const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule); +static int ipa_fltrt_generate_hw_rule_bdy_from_eq( + const struct ipa_ipfltri_rule_eq *attrib, u8 **buf); +static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb); +static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb); +static int ipa_flt_generate_eq(enum ipa_ip_type ipt, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb); +static int ipa_rt_parse_hw_rule(u8 *addr, + struct ipahal_rt_rule_entry *rule); +static int ipa_flt_parse_hw_rule(u8 *addr, + struct ipahal_flt_rule_entry *rule); +static int ipa_flt_parse_hw_rule_ipav4(u8 *addr, + struct ipahal_flt_rule_entry *rule); + +#define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \ + (ARRAY_SIZE(__eq_array) <= (__eq_index)) + +#define IPA_GET_RULE_EQ_BIT_PTRN(__eq) \ + (BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].eq_bitfield[(__eq)])) + +/* + * ipa_fltrt_rule_generation_err_check() - check basic validity on the rule + * attribs before starting building it + * checks if not not using ipv4 attribs on ipv6 and vice-versa + * @ip: IP address type + * @attrib: IPA rule attribute + * + * Return: 0 on success, -EPERM on failure + */ +static int ipa_fltrt_rule_generation_err_check( + enum ipa_ip_type ipt, const struct ipa_rule_attrib *attrib) +{ + if (ipt == IPA_IP_v4) { + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR || + attrib->attrib_mask & IPA_FLT_TC || + attrib->attrib_mask & IPA_FLT_FLOW_LABEL) { + IPAHAL_ERR("v6 attrib's specified for v4 rule\n"); + return -EPERM; + } + } else if (ipt == IPA_IP_v6) { + if (attrib->attrib_mask & IPA_FLT_TOS || + attrib->attrib_mask & IPA_FLT_PROTOCOL) { + IPAHAL_ERR("v4 attrib's specified for v6 rule\n"); + return -EPERM; + } + } else { + IPAHAL_ERR("unsupported ip %d\n", ipt); + return -EPERM; + } + + return 0; +} + +static int ipa_rt_gen_hw_rule(struct ipahal_rt_rule_gen_params *params, + u32 *hw_len, u8 *buf) +{ + struct ipa3_0_rt_rule_hw_hdr *rule_hdr; + u8 *start; + u16 en_rule = 0; + + start = buf; + rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)buf; + + ipa_assert_on(params->dst_pipe_idx & ~0x1F); + rule_hdr->u.hdr.pipe_dest_idx = params->dst_pipe_idx; + switch (params->hdr_type) { + case IPAHAL_RT_RULE_HDR_PROC_CTX: + rule_hdr->u.hdr.system = !params->hdr_lcl; + rule_hdr->u.hdr.proc_ctx = 1; + ipa_assert_on(params->hdr_ofst & 31); + rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 5; + break; + case IPAHAL_RT_RULE_HDR_RAW: + rule_hdr->u.hdr.system = !params->hdr_lcl; + rule_hdr->u.hdr.proc_ctx = 0; + ipa_assert_on(params->hdr_ofst & 3); + rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 2; + break; + case IPAHAL_RT_RULE_HDR_NONE: + rule_hdr->u.hdr.system = !params->hdr_lcl; + rule_hdr->u.hdr.proc_ctx = 0; + rule_hdr->u.hdr.hdr_offset = 0; + break; + default: + WARN(1, "Invalid HDR type %d\n", params->hdr_type); + return -EINVAL; + }; + + ipa_assert_on(params->priority & ~0x3FF); + rule_hdr->u.hdr.priority = params->priority; + rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0; + ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1)); + ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1)); + rule_hdr->u.hdr.rule_id = params->id; + + buf += sizeof(struct ipa3_0_rt_rule_hw_hdr); + + if (ipa_fltrt_generate_hw_rule_bdy(params->ipt, ¶ms->rule->attrib, + &buf, &en_rule)) { + IPAHAL_ERR("fail to generate hw rule\n"); + return -EPERM; + } + rule_hdr->u.hdr.en_rule = en_rule; + + IPAHAL_DBG_LOW("en_rule 0x%x\n", en_rule); + ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr); + + if (*hw_len == 0) { + *hw_len = buf - start; + } else if (*hw_len != (buf - start)) { + IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n", + *hw_len, (buf - start)); + return -EPERM; + } + + return 0; +} + +static int ipa_flt_gen_hw_rule(struct ipahal_flt_rule_gen_params *params, + u32 *hw_len, u8 *buf) +{ + struct ipa3_0_flt_rule_hw_hdr *rule_hdr; + u8 *start; + u16 en_rule = 0; + + start = buf; + rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)buf; + + switch (params->rule->action) { + case IPA_PASS_TO_ROUTING: + rule_hdr->u.hdr.action = 0x0; + break; + case IPA_PASS_TO_SRC_NAT: + rule_hdr->u.hdr.action = 0x1; + break; + case IPA_PASS_TO_DST_NAT: + rule_hdr->u.hdr.action = 0x2; + break; + case IPA_PASS_TO_EXCEPTION: + rule_hdr->u.hdr.action = 0x3; + break; + default: + WARN(1, "Invalid Rule Action %d\n", params->rule->action); + return -EINVAL; + } + ipa_assert_on(params->rt_tbl_idx & ~0x1F); + rule_hdr->u.hdr.rt_tbl_idx = params->rt_tbl_idx; + rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0; + rule_hdr->u.hdr.rsvd1 = 0; + rule_hdr->u.hdr.rsvd2 = 0; + rule_hdr->u.hdr.rsvd3 = 0; + + ipa_assert_on(params->priority & ~0x3FF); + rule_hdr->u.hdr.priority = params->priority; + ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1)); + ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1)); + rule_hdr->u.hdr.rule_id = params->id; + + buf += sizeof(struct ipa3_0_flt_rule_hw_hdr); + + if (params->rule->eq_attrib_type) { + if (ipa_fltrt_generate_hw_rule_bdy_from_eq( + ¶ms->rule->eq_attrib, &buf)) { + IPAHAL_ERR("fail to generate hw rule from eq\n"); + return -EPERM; + } + en_rule = params->rule->eq_attrib.rule_eq_bitmap; + } else { + if (ipa_fltrt_generate_hw_rule_bdy(params->ipt, + ¶ms->rule->attrib, &buf, &en_rule)) { + IPAHAL_ERR("fail to generate hw rule\n"); + return -EPERM; + } + } + rule_hdr->u.hdr.en_rule = en_rule; + + IPAHAL_DBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n", + en_rule, + rule_hdr->u.hdr.action, + rule_hdr->u.hdr.rt_tbl_idx, + rule_hdr->u.hdr.retain_hdr); + IPAHAL_DBG_LOW("priority=%d, rule_id=%d\n", + rule_hdr->u.hdr.priority, + rule_hdr->u.hdr.rule_id); + + ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr); + + if (*hw_len == 0) { + *hw_len = buf - start; + } else if (*hw_len != (buf - start)) { + IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n", + *hw_len, (buf - start)); + return -EPERM; + } + + return 0; +} + +static int ipa_flt_gen_hw_rule_ipav4(struct ipahal_flt_rule_gen_params *params, + u32 *hw_len, u8 *buf) +{ + struct ipa4_0_flt_rule_hw_hdr *rule_hdr; + u8 *start; + u16 en_rule = 0; + + start = buf; + rule_hdr = (struct ipa4_0_flt_rule_hw_hdr *)buf; + + switch (params->rule->action) { + case IPA_PASS_TO_ROUTING: + rule_hdr->u.hdr.action = 0x0; + break; + case IPA_PASS_TO_SRC_NAT: + rule_hdr->u.hdr.action = 0x1; + break; + case IPA_PASS_TO_DST_NAT: + rule_hdr->u.hdr.action = 0x2; + break; + case IPA_PASS_TO_EXCEPTION: + rule_hdr->u.hdr.action = 0x3; + break; + default: + WARN(1, "Invalid Rule Action %d\n", params->rule->action); + return -EINVAL; + } + + ipa_assert_on(params->rt_tbl_idx & ~0x1F); + rule_hdr->u.hdr.rt_tbl_idx = params->rt_tbl_idx; + rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0; + + ipa_assert_on(params->rule->pdn_idx & ~0xF); + rule_hdr->u.hdr.pdn_idx = params->rule->pdn_idx; + rule_hdr->u.hdr.set_metadata = params->rule->set_metadata; + rule_hdr->u.hdr.rsvd2 = 0; + rule_hdr->u.hdr.rsvd3 = 0; + + ipa_assert_on(params->priority & ~0x3FF); + rule_hdr->u.hdr.priority = params->priority; + ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1)); + ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1)); + rule_hdr->u.hdr.rule_id = params->id; + + buf += sizeof(struct ipa4_0_flt_rule_hw_hdr); + + if (params->rule->eq_attrib_type) { + if (ipa_fltrt_generate_hw_rule_bdy_from_eq( + ¶ms->rule->eq_attrib, &buf)) { + IPAHAL_ERR("fail to generate hw rule from eq\n"); + return -EPERM; + } + en_rule = params->rule->eq_attrib.rule_eq_bitmap; + } else { + if (ipa_fltrt_generate_hw_rule_bdy(params->ipt, + ¶ms->rule->attrib, &buf, &en_rule)) { + IPAHAL_ERR("fail to generate hw rule\n"); + return -EPERM; + } + } + rule_hdr->u.hdr.en_rule = en_rule; + + IPAHAL_DBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n", + en_rule, + rule_hdr->u.hdr.action, + rule_hdr->u.hdr.rt_tbl_idx, + rule_hdr->u.hdr.retain_hdr); + IPAHAL_DBG_LOW("priority=%d, rule_id=%d, pdn=%d, set_metadata=%d\n", + rule_hdr->u.hdr.priority, + rule_hdr->u.hdr.rule_id, + rule_hdr->u.hdr.pdn_idx, + rule_hdr->u.hdr.set_metadata); + + ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr); + + if (*hw_len == 0) { + *hw_len = buf - start; + } else if (*hw_len != (buf - start)) { + IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n", + *hw_len, (buf - start)); + return -EPERM; + } + + return 0; +} + +/* + * This array contains the FLT/RT info for IPAv3 and later. + * All the information on IPAv3 are statically defined below. + * If information is missing regarding on some IPA version, + * the init function will fill it with the information from the previous + * IPA version. + * Information is considered missing if all of the fields are 0. + */ +static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = { + /* IPAv3 */ + [IPA_HW_v3_0] = { + true, + IPA3_0_HW_TBL_WIDTH, + IPA3_0_HW_TBL_SYSADDR_ALIGNMENT, + IPA3_0_HW_TBL_LCLADDR_ALIGNMENT, + IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT, + IPA3_0_HW_RULE_START_ALIGNMENT, + IPA3_0_HW_TBL_HDR_WIDTH, + IPA3_0_HW_TBL_ADDR_MASK, + IPA3_0_RULE_MAX_PRIORITY, + IPA3_0_RULE_MIN_PRIORITY, + IPA3_0_LOW_RULE_ID, + IPA3_0_RULE_ID_BIT_LEN, + IPA3_0_HW_RULE_BUF_SIZE, + ipa_write_64, + ipa_fltrt_create_flt_bitmap, + ipa_fltrt_create_tbl_addr, + ipa_fltrt_parse_tbl_addr, + ipa_rt_gen_hw_rule, + ipa_flt_gen_hw_rule, + ipa_flt_generate_eq, + ipa_rt_parse_hw_rule, + ipa_flt_parse_hw_rule, + { + [IPA_TOS_EQ] = 0, + [IPA_PROTOCOL_EQ] = 1, + [IPA_TC_EQ] = 2, + [IPA_OFFSET_MEQ128_0] = 3, + [IPA_OFFSET_MEQ128_1] = 4, + [IPA_OFFSET_MEQ32_0] = 5, + [IPA_OFFSET_MEQ32_1] = 6, + [IPA_IHL_OFFSET_MEQ32_0] = 7, + [IPA_IHL_OFFSET_MEQ32_1] = 8, + [IPA_METADATA_COMPARE] = 9, + [IPA_IHL_OFFSET_RANGE16_0] = 10, + [IPA_IHL_OFFSET_RANGE16_1] = 11, + [IPA_IHL_OFFSET_EQ_32] = 12, + [IPA_IHL_OFFSET_EQ_16] = 13, + [IPA_FL_EQ] = 14, + [IPA_IS_FRAG] = 15, + }, + }, + + /* IPAv4 */ + [IPA_HW_v4_0] = { + true, + IPA3_0_HW_TBL_WIDTH, + IPA3_0_HW_TBL_SYSADDR_ALIGNMENT, + IPA3_0_HW_TBL_LCLADDR_ALIGNMENT, + IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT, + IPA3_0_HW_RULE_START_ALIGNMENT, + IPA3_0_HW_TBL_HDR_WIDTH, + IPA3_0_HW_TBL_ADDR_MASK, + IPA3_0_RULE_MAX_PRIORITY, + IPA3_0_RULE_MIN_PRIORITY, + IPA3_0_LOW_RULE_ID, + IPA3_0_RULE_ID_BIT_LEN, + IPA3_0_HW_RULE_BUF_SIZE, + ipa_write_64, + ipa_fltrt_create_flt_bitmap, + ipa_fltrt_create_tbl_addr, + ipa_fltrt_parse_tbl_addr, + ipa_rt_gen_hw_rule, + ipa_flt_gen_hw_rule_ipav4, + ipa_flt_generate_eq, + ipa_rt_parse_hw_rule, + ipa_flt_parse_hw_rule_ipav4, + { + [IPA_TOS_EQ] = 0, + [IPA_PROTOCOL_EQ] = 1, + [IPA_TC_EQ] = 2, + [IPA_OFFSET_MEQ128_0] = 3, + [IPA_OFFSET_MEQ128_1] = 4, + [IPA_OFFSET_MEQ32_0] = 5, + [IPA_OFFSET_MEQ32_1] = 6, + [IPA_IHL_OFFSET_MEQ32_0] = 7, + [IPA_IHL_OFFSET_MEQ32_1] = 8, + [IPA_METADATA_COMPARE] = 9, + [IPA_IHL_OFFSET_RANGE16_0] = 10, + [IPA_IHL_OFFSET_RANGE16_1] = 11, + [IPA_IHL_OFFSET_EQ_32] = 12, + [IPA_IHL_OFFSET_EQ_16] = 13, + [IPA_FL_EQ] = 14, + [IPA_IS_FRAG] = 15, + }, + }, +}; + +static int ipa_flt_generate_eq(enum ipa_ip_type ipt, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb) +{ + if (ipa_fltrt_rule_generation_err_check(ipt, attrib)) + return -EPERM; + + if (ipt == IPA_IP_v4) { + if (ipa_flt_generate_eq_ip4(ipt, attrib, eq_atrb)) { + IPAHAL_ERR("failed to build ipv4 flt eq rule\n"); + return -EPERM; + } + } else if (ipt == IPA_IP_v6) { + if (ipa_flt_generate_eq_ip6(ipt, attrib, eq_atrb)) { + IPAHAL_ERR("failed to build ipv6 flt eq rule\n"); + return -EPERM; + } + } else { + IPAHAL_ERR("unsupported ip %d\n", ipt); + return -EPERM; + } + + /* + * default "rule" means no attributes set -> map to + * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0 + */ + if (attrib->attrib_mask == 0) { + eq_atrb->rule_eq_bitmap = 0; + eq_atrb->rule_eq_bitmap |= IPA_GET_RULE_EQ_BIT_PTRN( + IPA_OFFSET_MEQ32_0); + eq_atrb->offset_meq_32[0].offset = 0; + eq_atrb->offset_meq_32[0].mask = 0; + eq_atrb->offset_meq_32[0].value = 0; + } + + return 0; +} + +static void ipa_fltrt_generate_mac_addr_hw_rule(u8 **extra, u8 **rest, + u8 hdr_mac_addr_offset, + const uint8_t mac_addr_mask[ETH_ALEN], + const uint8_t mac_addr[ETH_ALEN]) +{ + int i; + + *extra = ipa_write_8(hdr_mac_addr_offset, *extra); + + /* LSB MASK and ADDR */ + *rest = ipa_write_64(0, *rest); + *rest = ipa_write_64(0, *rest); + + /* MSB MASK and ADDR */ + *rest = ipa_write_16(0, *rest); + for (i = 5; i >= 0; i--) + *rest = ipa_write_8(mac_addr_mask[i], *rest); + *rest = ipa_write_16(0, *rest); + for (i = 5; i >= 0; i--) + *rest = ipa_write_8(mac_addr[i], *rest); +} + +static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule, + const struct ipa_rule_attrib *attrib, + u8 **extra_wrds, u8 **rest_wrds) +{ + u8 *extra = *extra_wrds; + u8 *rest = *rest_wrds; + u8 ofst_meq32 = 0; + u8 ihl_ofst_rng16 = 0; + u8 ihl_ofst_meq32 = 0; + u8 ofst_meq128 = 0; + int rc = 0; + + if (attrib->attrib_mask & IPA_FLT_TOS) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ); + extra = ipa_write_8(attrib->u.v4.tos, extra); + } + + if (attrib->attrib_mask & IPA_FLT_PROTOCOL) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ); + extra = ipa_write_8(attrib->u.v4.protocol, extra); + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -14, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -8, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -22, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -16, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + /* 0 => offset of TOS in v4 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_32((attrib->tos_mask << 16), rest); + rest = ipa_write_32((attrib->tos_value << 16), rest); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + /* 12 => offset of src ip in v4 header */ + extra = ipa_write_8(12, extra); + rest = ipa_write_32(attrib->u.v4.src_addr_mask, rest); + rest = ipa_write_32(attrib->u.v4.src_addr, rest); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + /* 16 => offset of dst ip in v4 header */ + extra = ipa_write_8(16, extra); + rest = ipa_write_32(attrib->u.v4.dst_addr_mask, rest); + rest = ipa_write_32(attrib->u.v4.dst_addr, rest); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + /* -2 => offset of ether type in L2 hdr */ + extra = ipa_write_8((u8)-2, extra); + rest = ipa_write_16(0, rest); + rest = ipa_write_16(htons(attrib->ether_type), rest); + rest = ipa_write_16(0, rest); + rest = ipa_write_16(htons(attrib->ether_type), rest); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 0 => offset of type after v4 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_32(0xFF, rest); + rest = ipa_write_32(attrib->type, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 1 => offset of code after v4 header */ + extra = ipa_write_8(1, extra); + rest = ipa_write_32(0xFF, rest); + rest = ipa_write_32(attrib->code, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 0 => offset of SPI after v4 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_32(0xFFFFFFFF, rest); + rest = ipa_write_32(attrib->spi, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ( + ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]); + /* populate first ihl meq eq */ + extra = ipa_write_8(8, extra); + rest = ipa_write_8(attrib->dst_mac_addr_mask[3], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[2], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[1], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[0], rest); + rest = ipa_write_8(attrib->dst_mac_addr[3], rest); + rest = ipa_write_8(attrib->dst_mac_addr[2], rest); + rest = ipa_write_8(attrib->dst_mac_addr[1], rest); + rest = ipa_write_8(attrib->dst_mac_addr[0], rest); + /* populate second ihl meq eq */ + extra = ipa_write_8(12, extra); + rest = ipa_write_16(0, rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[5], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[4], rest); + rest = ipa_write_16(0, rest); + rest = ipa_write_8(attrib->dst_mac_addr[5], rest); + rest = ipa_write_8(attrib->dst_mac_addr[4], rest); + ihl_ofst_meq32 += 2; + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE); + rest = ipa_write_32(attrib->meta_data_mask, rest); + rest = ipa_write_32(attrib->meta_data, rest); + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAHAL_ERR("bad src port range param\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 0 => offset of src port after v4 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_16(attrib->src_port_hi, rest); + rest = ipa_write_16(attrib->src_port_lo, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAHAL_ERR("bad dst port range param\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 2 => offset of dst port after v4 header */ + extra = ipa_write_8(2, extra); + rest = ipa_write_16(attrib->dst_port_hi, rest); + rest = ipa_write_16(attrib->dst_port_lo, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 0 => offset of src port after v4 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_16(attrib->src_port, rest); + rest = ipa_write_16(attrib->src_port, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 2 => offset of dst port after v4 header */ + extra = ipa_write_8(2, extra); + rest = ipa_write_16(attrib->dst_port, rest); + rest = ipa_write_16(attrib->dst_port, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG); + + goto done; + +err: + rc = -EPERM; +done: + *extra_wrds = extra; + *rest_wrds = rest; + return rc; +} + +static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule, + const struct ipa_rule_attrib *attrib, + u8 **extra_wrds, u8 **rest_wrds) +{ + u8 *extra = *extra_wrds; + u8 *rest = *rest_wrds; + u8 ofst_meq32 = 0; + u8 ihl_ofst_rng16 = 0; + u8 ihl_ofst_meq32 = 0; + u8 ofst_meq128 = 0; + int rc = 0; + + /* v6 code below assumes no extension headers TODO: fix this */ + + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ); + extra = ipa_write_8(attrib->u.v6.next_hdr, extra); + } + + if (attrib->attrib_mask & IPA_FLT_TC) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ); + extra = ipa_write_8(attrib->u.v6.tc, extra); + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + /* 8 => offset of src ip in v6 header */ + extra = ipa_write_8(8, extra); + rest = ipa_write_32(attrib->u.v6.src_addr_mask[3], rest); + rest = ipa_write_32(attrib->u.v6.src_addr_mask[2], rest); + rest = ipa_write_32(attrib->u.v6.src_addr[3], rest); + rest = ipa_write_32(attrib->u.v6.src_addr[2], rest); + rest = ipa_write_32(attrib->u.v6.src_addr_mask[1], rest); + rest = ipa_write_32(attrib->u.v6.src_addr_mask[0], rest); + rest = ipa_write_32(attrib->u.v6.src_addr[1], rest); + rest = ipa_write_32(attrib->u.v6.src_addr[0], rest); + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + /* 24 => offset of dst ip in v6 header */ + extra = ipa_write_8(24, extra); + rest = ipa_write_32(attrib->u.v6.dst_addr_mask[3], rest); + rest = ipa_write_32(attrib->u.v6.dst_addr_mask[2], rest); + rest = ipa_write_32(attrib->u.v6.dst_addr[3], rest); + rest = ipa_write_32(attrib->u.v6.dst_addr[2], rest); + rest = ipa_write_32(attrib->u.v6.dst_addr_mask[1], rest); + rest = ipa_write_32(attrib->u.v6.dst_addr_mask[0], rest); + rest = ipa_write_32(attrib->u.v6.dst_addr[1], rest); + rest = ipa_write_32(attrib->u.v6.dst_addr[0], rest); + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + /* 0 => offset of TOS in v6 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_64(0, rest); + rest = ipa_write_64(0, rest); + rest = ipa_write_32(0, rest); + rest = ipa_write_32((attrib->tos_mask << 20), rest); + rest = ipa_write_32(0, rest); + rest = ipa_write_32((attrib->tos_value << 20), rest); + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -14, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -8, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -22, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_fltrt_generate_mac_addr_hw_rule( + &extra, + &rest, + -16, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + /* -2 => offset of ether type in L2 hdr */ + extra = ipa_write_8((u8)-2, extra); + rest = ipa_write_16(0, rest); + rest = ipa_write_16(htons(attrib->ether_type), rest); + rest = ipa_write_16(0, rest); + rest = ipa_write_16(htons(attrib->ether_type), rest); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 0 => offset of type after v6 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_32(0xFF, rest); + rest = ipa_write_32(attrib->type, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 1 => offset of code after v6 header */ + extra = ipa_write_8(1, extra); + rest = ipa_write_32(0xFF, rest); + rest = ipa_write_32(attrib->code, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + /* 0 => offset of SPI after v6 header FIXME */ + extra = ipa_write_8(0, extra); + rest = ipa_write_32(0xFFFFFFFF, rest); + rest = ipa_write_32(attrib->spi, rest); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ( + ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]); + /* populate first ihl meq eq */ + extra = ipa_write_8(8, extra); + rest = ipa_write_8(attrib->dst_mac_addr_mask[3], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[2], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[1], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[0], rest); + rest = ipa_write_8(attrib->dst_mac_addr[3], rest); + rest = ipa_write_8(attrib->dst_mac_addr[2], rest); + rest = ipa_write_8(attrib->dst_mac_addr[1], rest); + rest = ipa_write_8(attrib->dst_mac_addr[0], rest); + /* populate second ihl meq eq */ + extra = ipa_write_8(12, extra); + rest = ipa_write_16(0, rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[5], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[4], rest); + rest = ipa_write_16(0, rest); + rest = ipa_write_8(attrib->dst_mac_addr[5], rest); + rest = ipa_write_8(attrib->dst_mac_addr[4], rest); + ihl_ofst_meq32 += 2; + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE); + rest = ipa_write_32(attrib->meta_data_mask, rest); + rest = ipa_write_32(attrib->meta_data, rest); + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 0 => offset of src port after v6 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_16(attrib->src_port, rest); + rest = ipa_write_16(attrib->src_port, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 2 => offset of dst port after v6 header */ + extra = ipa_write_8(2, extra); + rest = ipa_write_16(attrib->dst_port, rest); + rest = ipa_write_16(attrib->dst_port, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAHAL_ERR("bad src port range param\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 0 => offset of src port after v6 header */ + extra = ipa_write_8(0, extra); + rest = ipa_write_16(attrib->src_port_hi, rest); + rest = ipa_write_16(attrib->src_port_lo, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + goto err; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAHAL_ERR("bad dst port range param\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + /* 2 => offset of dst port after v6 header */ + extra = ipa_write_8(2, extra); + rest = ipa_write_16(attrib->dst_port_hi, rest); + rest = ipa_write_16(attrib->dst_port_lo, rest); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ); + rest = ipa_write_32(attrib->u.v6.flow_label & 0xFFFFF, + rest); + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG); + + goto done; + +err: + rc = -EPERM; +done: + *extra_wrds = extra; + *rest_wrds = rest; + return rc; +} + +static u8 *ipa_fltrt_copy_mem(u8 *src, u8 *dst, int cnt) +{ + while (cnt--) + *dst++ = *src++; + + return dst; +} + +/* + * ipa_fltrt_generate_hw_rule_bdy() - generate HW rule body (w/o header) + * @ip: IP address type + * @attrib: IPA rule attribute + * @buf: output buffer. Advance it after building the rule + * @en_rule: enable rule + * + * Return codes: + * 0: success + * -EPERM: wrong input + */ +static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt, + const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule) +{ + int sz; + int rc = 0; + u8 *extra_wrd_buf; + u8 *rest_wrd_buf; + u8 *extra_wrd_start; + u8 *rest_wrd_start; + u8 *extra_wrd_i; + u8 *rest_wrd_i; + + sz = IPA3_0_HW_TBL_WIDTH * 2 + IPA3_0_HW_RULE_START_ALIGNMENT; + extra_wrd_buf = kzalloc(sz, GFP_KERNEL); + if (!extra_wrd_buf) { + rc = -ENOMEM; + goto fail_extra_alloc; + } + + sz = IPA3_0_HW_RULE_BUF_SIZE + IPA3_0_HW_RULE_START_ALIGNMENT; + rest_wrd_buf = kzalloc(sz, GFP_KERNEL); + if (!rest_wrd_buf) { + rc = -ENOMEM; + goto fail_rest_alloc; + } + + extra_wrd_start = extra_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT; + extra_wrd_start = (u8 *)((long)extra_wrd_start & + ~IPA3_0_HW_RULE_START_ALIGNMENT); + + rest_wrd_start = rest_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT; + rest_wrd_start = (u8 *)((long)rest_wrd_start & + ~IPA3_0_HW_RULE_START_ALIGNMENT); + + extra_wrd_i = extra_wrd_start; + rest_wrd_i = rest_wrd_start; + + rc = ipa_fltrt_rule_generation_err_check(ipt, attrib); + if (rc) { + IPAHAL_ERR("rule generation err check failed\n"); + goto fail_err_check; + } + + if (ipt == IPA_IP_v4) { + if (ipa_fltrt_generate_hw_rule_bdy_ip4(en_rule, attrib, + &extra_wrd_i, &rest_wrd_i)) { + IPAHAL_ERR("failed to build ipv4 hw rule\n"); + rc = -EPERM; + goto fail_err_check; + } + + } else if (ipt == IPA_IP_v6) { + if (ipa_fltrt_generate_hw_rule_bdy_ip6(en_rule, attrib, + &extra_wrd_i, &rest_wrd_i)) { + IPAHAL_ERR("failed to build ipv6 hw rule\n"); + rc = -EPERM; + goto fail_err_check; + } + } else { + IPAHAL_ERR("unsupported ip %d\n", ipt); + goto fail_err_check; + } + + /* + * default "rule" means no attributes set -> map to + * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0 + */ + if (attrib->attrib_mask == 0) { + IPAHAL_DBG_LOW("building default rule\n"); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(ipa3_0_ofst_meq32[0]); + extra_wrd_i = ipa_write_8(0, extra_wrd_i); /* offset */ + rest_wrd_i = ipa_write_32(0, rest_wrd_i); /* mask */ + rest_wrd_i = ipa_write_32(0, rest_wrd_i); /* val */ + } + + IPAHAL_DBG_LOW("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start); + IPAHAL_DBG_LOW("extra_word_2 0x%llx\n", + *(u64 *)(extra_wrd_start + IPA3_0_HW_TBL_WIDTH)); + + extra_wrd_i = ipa_pad_to_64(extra_wrd_i); + sz = extra_wrd_i - extra_wrd_start; + IPAHAL_DBG_LOW("extra words params sz %d\n", sz); + *buf = ipa_fltrt_copy_mem(extra_wrd_start, *buf, sz); + + rest_wrd_i = ipa_pad_to_64(rest_wrd_i); + sz = rest_wrd_i - rest_wrd_start; + IPAHAL_DBG_LOW("non extra words params sz %d\n", sz); + *buf = ipa_fltrt_copy_mem(rest_wrd_start, *buf, sz); + +fail_err_check: + kfree(rest_wrd_buf); +fail_rest_alloc: + kfree(extra_wrd_buf); +fail_extra_alloc: + return rc; +} + + +/** + * ipa_fltrt_calc_extra_wrd_bytes()- Calculate the number of extra words for eq + * @attrib: equation attribute + * + * Return value: 0 on success, negative otherwise + */ +static int ipa_fltrt_calc_extra_wrd_bytes( + const struct ipa_ipfltri_rule_eq *attrib) +{ + int num = 0; + + if (attrib->tos_eq_present) + num++; + if (attrib->protocol_eq_present) + num++; + if (attrib->tc_eq_present) + num++; + num += attrib->num_offset_meq_128; + num += attrib->num_offset_meq_32; + num += attrib->num_ihl_offset_meq_32; + num += attrib->num_ihl_offset_range_16; + if (attrib->ihl_offset_eq_32_present) + num++; + if (attrib->ihl_offset_eq_16_present) + num++; + + IPAHAL_DBG_LOW("extra bytes number %d\n", num); + + return num; +} + +static int ipa_fltrt_generate_hw_rule_bdy_from_eq( + const struct ipa_ipfltri_rule_eq *attrib, u8 **buf) +{ + int num_offset_meq_32 = attrib->num_offset_meq_32; + int num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16; + int num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32; + int num_offset_meq_128 = attrib->num_offset_meq_128; + int i; + int extra_bytes; + u8 *extra; + u8 *rest; + + extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(attrib); + /* only 3 eq does not have extra word param, 13 out of 16 is the number + * of equations that needs extra word param + */ + if (extra_bytes > 13) { + IPAHAL_ERR("too much extra bytes\n"); + return -EPERM; + } else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) { + /* two extra words */ + extra = *buf; + rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH * 2; + } else if (extra_bytes > 0) { + /* single exra word */ + extra = *buf; + rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH; + } else { + /* no extra words */ + extra = NULL; + rest = *buf; + } + + if (attrib->tos_eq_present) + extra = ipa_write_8(attrib->tos_eq, extra); + + if (attrib->protocol_eq_present) + extra = ipa_write_8(attrib->protocol_eq, extra); + + if (attrib->tc_eq_present) + extra = ipa_write_8(attrib->tc_eq, extra); + + if (num_offset_meq_128) { + extra = ipa_write_8(attrib->offset_meq_128[0].offset, extra); + for (i = 0; i < 8; i++) + rest = ipa_write_8(attrib->offset_meq_128[0].mask[i], + rest); + for (i = 0; i < 8; i++) + rest = ipa_write_8(attrib->offset_meq_128[0].value[i], + rest); + for (i = 8; i < 16; i++) + rest = ipa_write_8(attrib->offset_meq_128[0].mask[i], + rest); + for (i = 8; i < 16; i++) + rest = ipa_write_8(attrib->offset_meq_128[0].value[i], + rest); + num_offset_meq_128--; + } + + if (num_offset_meq_128) { + extra = ipa_write_8(attrib->offset_meq_128[1].offset, extra); + for (i = 0; i < 8; i++) + rest = ipa_write_8(attrib->offset_meq_128[1].mask[i], + rest); + for (i = 0; i < 8; i++) + rest = ipa_write_8(attrib->offset_meq_128[1].value[i], + rest); + for (i = 8; i < 16; i++) + rest = ipa_write_8(attrib->offset_meq_128[1].mask[i], + rest); + for (i = 8; i < 16; i++) + rest = ipa_write_8(attrib->offset_meq_128[1].value[i], + rest); + num_offset_meq_128--; + } + + if (num_offset_meq_32) { + extra = ipa_write_8(attrib->offset_meq_32[0].offset, extra); + rest = ipa_write_32(attrib->offset_meq_32[0].mask, rest); + rest = ipa_write_32(attrib->offset_meq_32[0].value, rest); + num_offset_meq_32--; + } + + if (num_offset_meq_32) { + extra = ipa_write_8(attrib->offset_meq_32[1].offset, extra); + rest = ipa_write_32(attrib->offset_meq_32[1].mask, rest); + rest = ipa_write_32(attrib->offset_meq_32[1].value, rest); + num_offset_meq_32--; + } + + if (num_ihl_offset_meq_32) { + extra = ipa_write_8(attrib->ihl_offset_meq_32[0].offset, + extra); + + rest = ipa_write_32(attrib->ihl_offset_meq_32[0].mask, rest); + rest = ipa_write_32(attrib->ihl_offset_meq_32[0].value, rest); + num_ihl_offset_meq_32--; + } + + if (num_ihl_offset_meq_32) { + extra = ipa_write_8(attrib->ihl_offset_meq_32[1].offset, + extra); + + rest = ipa_write_32(attrib->ihl_offset_meq_32[1].mask, rest); + rest = ipa_write_32(attrib->ihl_offset_meq_32[1].value, rest); + num_ihl_offset_meq_32--; + } + + if (attrib->metadata_meq32_present) { + rest = ipa_write_32(attrib->metadata_meq32.mask, rest); + rest = ipa_write_32(attrib->metadata_meq32.value, rest); + } + + if (num_ihl_offset_range_16) { + extra = ipa_write_8(attrib->ihl_offset_range_16[0].offset, + extra); + + rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_high, + rest); + rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_low, + rest); + num_ihl_offset_range_16--; + } + + if (num_ihl_offset_range_16) { + extra = ipa_write_8(attrib->ihl_offset_range_16[1].offset, + extra); + + rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_high, + rest); + rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_low, + rest); + num_ihl_offset_range_16--; + } + + if (attrib->ihl_offset_eq_32_present) { + extra = ipa_write_8(attrib->ihl_offset_eq_32.offset, extra); + rest = ipa_write_32(attrib->ihl_offset_eq_32.value, rest); + } + + if (attrib->ihl_offset_eq_16_present) { + extra = ipa_write_8(attrib->ihl_offset_eq_16.offset, extra); + rest = ipa_write_16(attrib->ihl_offset_eq_16.value, rest); + rest = ipa_write_16(0, rest); + } + + if (attrib->fl_eq_present) + rest = ipa_write_32(attrib->fl_eq & 0xFFFFF, rest); + + extra = ipa_pad_to_64(extra); + rest = ipa_pad_to_64(rest); + *buf = rest; + + return 0; +} + +static void ipa_flt_generate_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb, + u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN], + const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128) +{ + int i; + + eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset; + + /* LSB MASK and ADDR */ + memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 8); + memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 8); + + /* MSB MASK and ADDR */ + memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 2); + for (i = 0; i <= 5; i++) + eq_atrb->offset_meq_128[ofst_meq128].mask[15 - i] = + mac_addr_mask[i]; + + memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 2); + for (i = 0; i <= 5; i++) + eq_atrb->offset_meq_128[ofst_meq128].value[15 - i] = + mac_addr[i]; +} + +static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb) +{ + u8 ofst_meq32 = 0; + u8 ihl_ofst_rng16 = 0; + u8 ihl_ofst_meq32 = 0; + u8 ofst_meq128 = 0; + u16 eq_bitmap = 0; + u16 *en_rule = &eq_bitmap; + + if (attrib->attrib_mask & IPA_FLT_TOS) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ); + eq_atrb->tos_eq_present = 1; + eq_atrb->tos_eq = attrib->u.v4.tos; + } + + if (attrib->attrib_mask & IPA_FLT_PROTOCOL) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ); + eq_atrb->protocol_eq_present = 1; + eq_atrb->protocol_eq = attrib->u.v4.protocol; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -14, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -8, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -22, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -16, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ( + ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]); + /* populate the first ihl meq 32 eq */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 8; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + (attrib->dst_mac_addr_mask[3] & 0xFF) | + ((attrib->dst_mac_addr_mask[2] << 8) & 0xFF00) | + ((attrib->dst_mac_addr_mask[1] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr_mask[0] << 24) & 0xFF000000); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + (attrib->dst_mac_addr[3] & 0xFF) | + ((attrib->dst_mac_addr[2] << 8) & 0xFF00) | + ((attrib->dst_mac_addr[1] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr[0] << 24) & 0xFF000000); + /* populate the second ihl meq 32 eq */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].offset = 12; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].mask = + ((attrib->dst_mac_addr_mask[5] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr_mask[4] << 24) & 0xFF000000); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].value = + ((attrib->dst_mac_addr[5] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr[4] << 24) & 0xFF000000); + ihl_ofst_meq32 += 2; + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + eq_atrb->offset_meq_32[ofst_meq32].offset = 0; + eq_atrb->offset_meq_32[ofst_meq32].mask = + attrib->tos_mask << 16; + eq_atrb->offset_meq_32[ofst_meq32].value = + attrib->tos_value << 16; + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + eq_atrb->offset_meq_32[ofst_meq32].offset = 12; + eq_atrb->offset_meq_32[ofst_meq32].mask = + attrib->u.v4.src_addr_mask; + eq_atrb->offset_meq_32[ofst_meq32].value = + attrib->u.v4.src_addr; + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + eq_atrb->offset_meq_32[ofst_meq32].offset = 16; + eq_atrb->offset_meq_32[ofst_meq32].mask = + attrib->u.v4.dst_addr_mask; + eq_atrb->offset_meq_32[ofst_meq32].value = + attrib->u.v4.dst_addr; + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + eq_atrb->offset_meq_32[ofst_meq32].offset = -2; + eq_atrb->offset_meq_32[ofst_meq32].mask = + htons(attrib->ether_type); + eq_atrb->offset_meq_32[ofst_meq32].value = + htons(attrib->ether_type); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->type; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->code; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0xFFFFFFFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->spi; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + IPA_METADATA_COMPARE); + eq_atrb->metadata_meq32_present = 1; + eq_atrb->metadata_meq32.offset = 0; + eq_atrb->metadata_meq32.mask = attrib->meta_data_mask; + eq_atrb->metadata_meq32.value = attrib->meta_data; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAHAL_ERR("bad src port range param\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAHAL_ERR("bad dst port range param\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG); + eq_atrb->ipv4_frag_eq_present = 1; + } + + eq_atrb->rule_eq_bitmap = *en_rule; + eq_atrb->num_offset_meq_32 = ofst_meq32; + eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16; + eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32; + eq_atrb->num_offset_meq_128 = ofst_meq128; + + return 0; +} + +static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb) +{ + u8 ofst_meq32 = 0; + u8 ihl_ofst_rng16 = 0; + u8 ihl_ofst_meq32 = 0; + u8 ofst_meq128 = 0; + u16 eq_bitmap = 0; + u16 *en_rule = &eq_bitmap; + + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + IPA_PROTOCOL_EQ); + eq_atrb->protocol_eq_present = 1; + eq_atrb->protocol_eq = attrib->u.v6.next_hdr; + } + + if (attrib->attrib_mask & IPA_FLT_TC) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + IPA_TC_EQ); + eq_atrb->tc_eq_present = 1; + eq_atrb->tc_eq = attrib->u.v6.tc; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + /* use the same word order as in ipa v2 */ + eq_atrb->offset_meq_128[ofst_meq128].offset = 8; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0) + = attrib->u.v6.src_addr_mask[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4) + = attrib->u.v6.src_addr_mask[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8) + = attrib->u.v6.src_addr_mask[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) + = attrib->u.v6.src_addr_mask[3]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0) + = attrib->u.v6.src_addr[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4) + = attrib->u.v6.src_addr[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8) + = attrib->u.v6.src_addr[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + + 12) = attrib->u.v6.src_addr[3]; + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + eq_atrb->offset_meq_128[ofst_meq128].offset = 24; + /* use the same word order as in ipa v2 */ + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0) + = attrib->u.v6.dst_addr_mask[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4) + = attrib->u.v6.dst_addr_mask[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8) + = attrib->u.v6.dst_addr_mask[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) + = attrib->u.v6.dst_addr_mask[3]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0) + = attrib->u.v6.dst_addr[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4) + = attrib->u.v6.dst_addr[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8) + = attrib->u.v6.dst_addr[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + + 12) = attrib->u.v6.dst_addr[3]; + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + eq_atrb->offset_meq_128[ofst_meq128].offset = 0; + memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 12); + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) + = attrib->tos_mask << 20; + memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 12); + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + + 12) = attrib->tos_value << 20; + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -14, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -8, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -22, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) { + IPAHAL_ERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq128[ofst_meq128]); + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_flt_generate_mac_addr_eq(eq_atrb, -16, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ( + ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]); + /* populate the first ihl meq 32 eq */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 8; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + (attrib->dst_mac_addr_mask[3] & 0xFF) | + ((attrib->dst_mac_addr_mask[2] << 8) & 0xFF00) | + ((attrib->dst_mac_addr_mask[1] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr_mask[0] << 24) & 0xFF000000); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + (attrib->dst_mac_addr[3] & 0xFF) | + ((attrib->dst_mac_addr[2] << 8) & 0xFF00) | + ((attrib->dst_mac_addr[1] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr[0] << 24) & 0xFF000000); + /* populate the second ihl meq 32 eq */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].offset = 12; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].mask = + ((attrib->dst_mac_addr_mask[5] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr_mask[4] << 24) & 0xFF000000); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].value = + ((attrib->dst_mac_addr[5] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr[4] << 24) & 0xFF000000); + ihl_ofst_meq32 += 2; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { + IPAHAL_ERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ofst_meq32[ofst_meq32]); + eq_atrb->offset_meq_32[ofst_meq32].offset = -2; + eq_atrb->offset_meq_32[ofst_meq32].mask = + htons(attrib->ether_type); + eq_atrb->offset_meq_32[ofst_meq32].value = + htons(attrib->ether_type); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->type; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->code; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0xFFFFFFFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->spi; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + IPA_METADATA_COMPARE); + eq_atrb->metadata_meq32_present = 1; + eq_atrb->metadata_meq32.offset = 0; + eq_atrb->metadata_meq32.mask = attrib->meta_data_mask; + eq_atrb->metadata_meq32.value = attrib->meta_data; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAHAL_ERR("bad src port range param\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16, + ihl_ofst_rng16)) { + IPAHAL_ERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAHAL_ERR("bad dst port range param\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]); + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ); + eq_atrb->fl_eq_present = 1; + eq_atrb->fl_eq = attrib->u.v6.flow_label; + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + IPA_IS_FRAG); + eq_atrb->ipv4_frag_eq_present = 1; + } + + eq_atrb->rule_eq_bitmap = *en_rule; + eq_atrb->num_offset_meq_32 = ofst_meq32; + eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16; + eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32; + eq_atrb->num_offset_meq_128 = ofst_meq128; + + return 0; +} + +static int ipa_fltrt_parse_hw_rule_eq(u8 *addr, u32 hdr_sz, + struct ipa_ipfltri_rule_eq *atrb, u32 *rule_size) +{ + u16 eq_bitmap; + int extra_bytes; + u8 *extra; + u8 *rest; + int i; + u8 dummy_extra_wrd; + + if (!addr || !atrb || !rule_size) { + IPAHAL_ERR("Input error: addr=%pK atrb=%pK rule_size=%pK\n", + addr, atrb, rule_size); + return -EINVAL; + } + + eq_bitmap = atrb->rule_eq_bitmap; + + IPAHAL_DBG_LOW("eq_bitmap=0x%x\n", eq_bitmap); + + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ)) + atrb->tos_eq_present = true; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ)) + atrb->protocol_eq_present = true; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ)) + atrb->tc_eq_present = true; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_0)) + atrb->num_offset_meq_128++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_1)) + atrb->num_offset_meq_128++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_0)) + atrb->num_offset_meq_32++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_1)) + atrb->num_offset_meq_32++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_0)) + atrb->num_ihl_offset_meq_32++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_1)) + atrb->num_ihl_offset_meq_32++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE)) + atrb->metadata_meq32_present = true; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_0)) + atrb->num_ihl_offset_range_16++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_1)) + atrb->num_ihl_offset_range_16++; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_32)) + atrb->ihl_offset_eq_32_present = true; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_16)) + atrb->ihl_offset_eq_16_present = true; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ)) + atrb->fl_eq_present = true; + if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG)) + atrb->ipv4_frag_eq_present = true; + + extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(atrb); + /* only 3 eq does not have extra word param, 13 out of 16 is the number + * of equations that needs extra word param + */ + if (extra_bytes > 13) { + IPAHAL_ERR("too much extra bytes\n"); + return -EPERM; + } else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) { + /* two extra words */ + extra = addr + hdr_sz; + rest = extra + IPA3_0_HW_TBL_HDR_WIDTH * 2; + } else if (extra_bytes > 0) { + /* single extra word */ + extra = addr + hdr_sz; + rest = extra + IPA3_0_HW_TBL_HDR_WIDTH; + } else { + /* no extra words */ + dummy_extra_wrd = 0; + extra = &dummy_extra_wrd; + rest = addr + hdr_sz; + } + IPAHAL_DBG_LOW("addr=0x%pK extra=0x%pK rest=0x%pK\n", + addr, extra, rest); + + if (atrb->tos_eq_present) + atrb->tos_eq = *extra++; + if (atrb->protocol_eq_present) + atrb->protocol_eq = *extra++; + if (atrb->tc_eq_present) + atrb->tc_eq = *extra++; + + if (atrb->num_offset_meq_128 > 0) { + atrb->offset_meq_128[0].offset = *extra++; + for (i = 0; i < 8; i++) + atrb->offset_meq_128[0].mask[i] = *rest++; + for (i = 0; i < 8; i++) + atrb->offset_meq_128[0].value[i] = *rest++; + for (i = 8; i < 16; i++) + atrb->offset_meq_128[0].mask[i] = *rest++; + for (i = 8; i < 16; i++) + atrb->offset_meq_128[0].value[i] = *rest++; + } + if (atrb->num_offset_meq_128 > 1) { + atrb->offset_meq_128[1].offset = *extra++; + for (i = 0; i < 8; i++) + atrb->offset_meq_128[1].mask[i] = *rest++; + for (i = 0; i < 8; i++) + atrb->offset_meq_128[1].value[i] = *rest++; + for (i = 8; i < 16; i++) + atrb->offset_meq_128[1].mask[i] = *rest++; + for (i = 8; i < 16; i++) + atrb->offset_meq_128[1].value[i] = *rest++; + } + + if (atrb->num_offset_meq_32 > 0) { + atrb->offset_meq_32[0].offset = *extra++; + atrb->offset_meq_32[0].mask = *((u32 *)rest); + rest += 4; + atrb->offset_meq_32[0].value = *((u32 *)rest); + rest += 4; + } + if (atrb->num_offset_meq_32 > 1) { + atrb->offset_meq_32[1].offset = *extra++; + atrb->offset_meq_32[1].mask = *((u32 *)rest); + rest += 4; + atrb->offset_meq_32[1].value = *((u32 *)rest); + rest += 4; + } + + if (atrb->num_ihl_offset_meq_32 > 0) { + atrb->ihl_offset_meq_32[0].offset = *extra++; + atrb->ihl_offset_meq_32[0].mask = *((u32 *)rest); + rest += 4; + atrb->ihl_offset_meq_32[0].value = *((u32 *)rest); + rest += 4; + } + if (atrb->num_ihl_offset_meq_32 > 1) { + atrb->ihl_offset_meq_32[1].offset = *extra++; + atrb->ihl_offset_meq_32[1].mask = *((u32 *)rest); + rest += 4; + atrb->ihl_offset_meq_32[1].value = *((u32 *)rest); + rest += 4; + } + + if (atrb->metadata_meq32_present) { + atrb->metadata_meq32.mask = *((u32 *)rest); + rest += 4; + atrb->metadata_meq32.value = *((u32 *)rest); + rest += 4; + } + + if (atrb->num_ihl_offset_range_16 > 0) { + atrb->ihl_offset_range_16[0].offset = *extra++; + atrb->ihl_offset_range_16[0].range_high = *((u16 *)rest); + rest += 2; + atrb->ihl_offset_range_16[0].range_low = *((u16 *)rest); + rest += 2; + } + if (atrb->num_ihl_offset_range_16 > 1) { + atrb->ihl_offset_range_16[1].offset = *extra++; + atrb->ihl_offset_range_16[1].range_high = *((u16 *)rest); + rest += 2; + atrb->ihl_offset_range_16[1].range_low = *((u16 *)rest); + rest += 2; + } + + if (atrb->ihl_offset_eq_32_present) { + atrb->ihl_offset_eq_32.offset = *extra++; + atrb->ihl_offset_eq_32.value = *((u32 *)rest); + rest += 4; + } + + if (atrb->ihl_offset_eq_16_present) { + atrb->ihl_offset_eq_16.offset = *extra++; + atrb->ihl_offset_eq_16.value = *((u16 *)rest); + rest += 4; + } + + if (atrb->fl_eq_present) { + atrb->fl_eq = *((u32 *)rest); + atrb->fl_eq &= 0xfffff; + rest += 4; + } + + IPAHAL_DBG_LOW("before rule alignment rest=0x%pK\n", rest); + rest = (u8 *)(((unsigned long)rest + IPA3_0_HW_RULE_START_ALIGNMENT) & + ~IPA3_0_HW_RULE_START_ALIGNMENT); + IPAHAL_DBG_LOW("after rule alignment rest=0x%pK\n", rest); + + *rule_size = rest - addr; + IPAHAL_DBG_LOW("rule_size=0x%x\n", *rule_size); + + return 0; +} + +static int ipa_rt_parse_hw_rule(u8 *addr, struct ipahal_rt_rule_entry *rule) +{ + struct ipa3_0_rt_rule_hw_hdr *rule_hdr; + struct ipa_ipfltri_rule_eq *atrb; + + IPAHAL_DBG_LOW("Entry\n"); + + rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)addr; + atrb = &rule->eq_attrib; + + IPAHAL_DBG_LOW("read hdr 0x%llx\n", rule_hdr->u.word); + + if (rule_hdr->u.word == 0) { + /* table termintator - empty table */ + rule->rule_size = 0; + return 0; + } + + rule->dst_pipe_idx = rule_hdr->u.hdr.pipe_dest_idx; + if (rule_hdr->u.hdr.proc_ctx) { + rule->hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX; + rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 5; + } else { + rule->hdr_type = IPAHAL_RT_RULE_HDR_RAW; + rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 2; + } + rule->hdr_lcl = !rule_hdr->u.hdr.system; + + rule->priority = rule_hdr->u.hdr.priority; + rule->retain_hdr = rule_hdr->u.hdr.retain_hdr; + rule->id = rule_hdr->u.hdr.rule_id; + + atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule; + return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr), + atrb, &rule->rule_size); +} + +static int ipa_flt_parse_hw_rule(u8 *addr, struct ipahal_flt_rule_entry *rule) +{ + struct ipa3_0_flt_rule_hw_hdr *rule_hdr; + struct ipa_ipfltri_rule_eq *atrb; + + IPAHAL_DBG_LOW("Entry\n"); + + rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)addr; + atrb = &rule->rule.eq_attrib; + + if (rule_hdr->u.word == 0) { + /* table termintator - empty table */ + rule->rule_size = 0; + return 0; + } + + switch (rule_hdr->u.hdr.action) { + case 0x0: + rule->rule.action = IPA_PASS_TO_ROUTING; + break; + case 0x1: + rule->rule.action = IPA_PASS_TO_SRC_NAT; + break; + case 0x2: + rule->rule.action = IPA_PASS_TO_DST_NAT; + break; + case 0x3: + rule->rule.action = IPA_PASS_TO_EXCEPTION; + break; + default: + IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action); + WARN_ON(1); + rule->rule.action = rule_hdr->u.hdr.action; + } + + rule->rule.rt_tbl_idx = rule_hdr->u.hdr.rt_tbl_idx; + rule->rule.retain_hdr = rule_hdr->u.hdr.retain_hdr; + rule->priority = rule_hdr->u.hdr.priority; + rule->id = rule_hdr->u.hdr.rule_id; + + atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule; + rule->rule.eq_attrib_type = 1; + return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr), + atrb, &rule->rule_size); +} + +static int ipa_flt_parse_hw_rule_ipav4(u8 *addr, + struct ipahal_flt_rule_entry *rule) +{ + struct ipa4_0_flt_rule_hw_hdr *rule_hdr; + struct ipa_ipfltri_rule_eq *atrb; + + IPAHAL_DBG_LOW("Entry\n"); + + rule_hdr = (struct ipa4_0_flt_rule_hw_hdr *)addr; + atrb = &rule->rule.eq_attrib; + + if (rule_hdr->u.word == 0) { + /* table termintator - empty table */ + rule->rule_size = 0; + return 0; + } + + switch (rule_hdr->u.hdr.action) { + case 0x0: + rule->rule.action = IPA_PASS_TO_ROUTING; + break; + case 0x1: + rule->rule.action = IPA_PASS_TO_SRC_NAT; + break; + case 0x2: + rule->rule.action = IPA_PASS_TO_DST_NAT; + break; + case 0x3: + rule->rule.action = IPA_PASS_TO_EXCEPTION; + break; + default: + IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action); + WARN_ON(1); + rule->rule.action = rule_hdr->u.hdr.action; + } + + rule->rule.rt_tbl_idx = rule_hdr->u.hdr.rt_tbl_idx; + rule->rule.retain_hdr = rule_hdr->u.hdr.retain_hdr; + rule->priority = rule_hdr->u.hdr.priority; + rule->id = rule_hdr->u.hdr.rule_id; + rule->rule.pdn_idx = rule_hdr->u.hdr.pdn_idx; + rule->rule.set_metadata = rule_hdr->u.hdr.set_metadata; + + atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule; + rule->rule.eq_attrib_type = 1; + return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr), + atrb, &rule->rule_size); +} + +/* + * ipahal_fltrt_init() - Build the FLT/RT information table + * See ipahal_fltrt_objs[] comments + * + * Note: As global variables are initialized with zero, any un-overridden + * register entry will be zero. By this we recognize them. + */ +int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type) +{ + struct ipahal_fltrt_obj zero_obj; + int i; + struct ipa_mem_buffer *mem; + int rc = -EFAULT; + + IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type); + + if (ipa_hw_type >= IPA_HW_MAX) { + IPAHAL_ERR("Invalid H/W type\n"); + return -EFAULT; + } + + memset(&zero_obj, 0, sizeof(zero_obj)); + for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) { + if (!memcmp(&ipahal_fltrt_objs[i+1], &zero_obj, + sizeof(struct ipahal_fltrt_obj))) { + memcpy(&ipahal_fltrt_objs[i+1], + &ipahal_fltrt_objs[i], + sizeof(struct ipahal_fltrt_obj)); + } else { + /* + * explicitly overridden FLT RT info + * Check validity + */ + if (!ipahal_fltrt_objs[i+1].tbl_width) { + IPAHAL_ERR( + "Zero tbl width ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].sysaddr_alignment) { + IPAHAL_ERR( + "No tbl sysaddr alignment ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].lcladdr_alignment) { + IPAHAL_ERR( + "No tbl lcladdr alignment ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].blk_sz_alignment) { + IPAHAL_ERR( + "No blk sz alignment ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].rule_start_alignment) { + IPAHAL_ERR( + "No rule start alignment ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].tbl_hdr_width) { + IPAHAL_ERR( + "Zero tbl hdr width ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].tbl_addr_mask) { + IPAHAL_ERR( + "Zero tbl hdr width ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (ipahal_fltrt_objs[i+1].rule_id_bit_len < 2) { + IPAHAL_ERR( + "Too little bits for rule_id ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].rule_buf_size) { + IPAHAL_ERR( + "zero rule buf size ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].write_val_to_hdr) { + IPAHAL_ERR( + "No write_val_to_hdr CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].create_flt_bitmap) { + IPAHAL_ERR( + "No create_flt_bitmap CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].create_tbl_addr) { + IPAHAL_ERR( + "No create_tbl_addr CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].parse_tbl_addr) { + IPAHAL_ERR( + "No parse_tbl_addr CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].rt_generate_hw_rule) { + IPAHAL_ERR( + "No rt_generate_hw_rule CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].flt_generate_hw_rule) { + IPAHAL_ERR( + "No flt_generate_hw_rule CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].flt_generate_eq) { + IPAHAL_ERR( + "No flt_generate_eq CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].rt_parse_hw_rule) { + IPAHAL_ERR( + "No rt_parse_hw_rule CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + if (!ipahal_fltrt_objs[i+1].flt_parse_hw_rule) { + IPAHAL_ERR( + "No flt_parse_hw_rule CB ipaver=%d\n", + i+1); + WARN_ON(1); + } + } + } + + mem = &ipahal_ctx->empty_fltrt_tbl; + + /* setup an empty table in system memory; This will + * be used, for example, to delete a rt tbl safely + */ + mem->size = ipahal_fltrt_objs[ipa_hw_type].tbl_width; + mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAHAL_ERR("DMA buff alloc fail %d bytes for empty tbl\n", + mem->size); + return -ENOMEM; + } + + if (mem->phys_base & + ipahal_fltrt_objs[ipa_hw_type].sysaddr_alignment) { + IPAHAL_ERR("Empty table buf is not address aligned 0x%pad\n", + &mem->phys_base); + rc = -EFAULT; + goto clear_empty_tbl; + } + + memset(mem->base, 0, mem->size); + IPAHAL_DBG("empty table allocated in system memory"); + + return 0; + +clear_empty_tbl: + dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base, + mem->phys_base); + return rc; +} + +void ipahal_fltrt_destroy(void) +{ + IPAHAL_DBG("Entry\n"); + + if (ipahal_ctx && ipahal_ctx->empty_fltrt_tbl.base) + dma_free_coherent(ipahal_ctx->ipa_pdev, + ipahal_ctx->empty_fltrt_tbl.size, + ipahal_ctx->empty_fltrt_tbl.base, + ipahal_ctx->empty_fltrt_tbl.phys_base); +} + +/* Get the H/W table (flt/rt) header width */ +u32 ipahal_get_hw_tbl_hdr_width(void) +{ + return ipahal_fltrt_objs[ipahal_ctx->hw_type].tbl_hdr_width; +} + +/* Get the H/W local table (SRAM) address alignment + * Tables headers references to local tables via offsets in SRAM + * This function return the alignment of the offset that IPA expects + */ +u32 ipahal_get_lcl_tbl_addr_alignment(void) +{ + return ipahal_fltrt_objs[ipahal_ctx->hw_type].lcladdr_alignment; +} + +/* + * Rule priority is used to distinguish rules order + * at the integrated table consisting from hashable and + * non-hashable tables. Max priority are rules that once are + * scanned by IPA, IPA will not look for further rules and use it. + */ +int ipahal_get_rule_max_priority(void) +{ + return ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_max_prio; +} + +/* Given a priority, calc and return the next lower one if it is in + * legal range. + */ +int ipahal_rule_decrease_priority(int *prio) +{ + struct ipahal_fltrt_obj *obj; + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (!prio) { + IPAHAL_ERR("Invalid Input\n"); + return -EINVAL; + } + + /* Priority logic is reverse. 0 priority considred max priority */ + if (*prio > obj->rule_min_prio || *prio < obj->rule_max_prio) { + IPAHAL_ERR("Invalid given priority %d\n", *prio); + return -EINVAL; + } + + *prio += 1; + + if (*prio > obj->rule_min_prio) { + IPAHAL_ERR("Cannot decrease priority. Already on min\n"); + *prio -= 1; + return -EFAULT; + } + + return 0; +} + +/* Does the given ID represents rule miss? + * Rule miss ID, is always the max ID possible in the bit-pattern + */ +bool ipahal_is_rule_miss_id(u32 id) +{ + return (id == + ((1U << ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len) + -1)); +} + +/* Get rule ID with high bit only asserted + * Used e.g. to create groups of IDs according to this bit + */ +u32 ipahal_get_rule_id_hi_bit(void) +{ + return BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len - 1); +} + +/* Get the low value possible to be used for rule-id */ +u32 ipahal_get_low_rule_id(void) +{ + return ipahal_fltrt_objs[ipahal_ctx->hw_type].low_rule_id; +} + +/* + * ipahal_rt_generate_empty_img() - Generate empty route image + * Creates routing header buffer for the given tables number. + * For each table, make it point to the empty table on DDR. + * @tbls_num: Number of tables. For each will have an entry in the header + * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check + * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check + * @mem: mem object that points to DMA mem representing the hdr structure + * @atomic: should DMA allocation be executed with atomic flag + */ +int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size, + u32 nhash_hdr_size, struct ipa_mem_buffer *mem, bool atomic) +{ + int i; + u64 addr; + struct ipahal_fltrt_obj *obj; + int flag; + + IPAHAL_DBG("Entry\n"); + + flag = atomic ? GFP_ATOMIC : GFP_KERNEL; + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (!tbls_num || !nhash_hdr_size || !mem) { + IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%pK\n", + tbls_num, nhash_hdr_size, mem); + return -EINVAL; + } + if (obj->support_hash && !hash_hdr_size) { + IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size); + return -EINVAL; + } + + if (nhash_hdr_size < (tbls_num * obj->tbl_hdr_width)) { + IPAHAL_ERR("No enough spc at non-hash hdr blk for all tbls\n"); + WARN_ON(1); + return -EINVAL; + } + if (obj->support_hash && + (hash_hdr_size < (tbls_num * obj->tbl_hdr_width))) { + IPAHAL_ERR("No enough spc at hash hdr blk for all tbls\n"); + WARN_ON(1); + return -EINVAL; + } + + mem->size = tbls_num * obj->tbl_hdr_width; + mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size, + &mem->phys_base, flag); + if (!mem->base) { + IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size); + return -ENOMEM; + } + + addr = obj->create_tbl_addr(true, + ipahal_ctx->empty_fltrt_tbl.phys_base); + for (i = 0; i < tbls_num; i++) + obj->write_val_to_hdr(addr, + mem->base + i * obj->tbl_hdr_width); + + return 0; +} + +/* + * ipahal_flt_generate_empty_img() - Generate empty filter image + * Creates filter header buffer for the given tables number. + * For each table, make it point to the empty table on DDR. + * @tbls_num: Number of tables. For each will have an entry in the header + * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check + * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check + * @ep_bitmap: Bitmap representing the EP that has flt tables. The format + * should be: bit0->EP0, bit1->EP1 + * If bitmap is zero -> create tbl without bitmap entry + * @mem: mem object that points to DMA mem representing the hdr structure + * @atomic: should DMA allocation be executed with atomic flag + */ +int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size, + u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem, + bool atomic) +{ + int flt_spc; + u64 flt_bitmap; + int i; + u64 addr; + struct ipahal_fltrt_obj *obj; + int flag; + + IPAHAL_DBG("Entry - ep_bitmap 0x%llx\n", ep_bitmap); + + flag = atomic ? GFP_ATOMIC : GFP_KERNEL; + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (!tbls_num || !nhash_hdr_size || !mem) { + IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%pK\n", + tbls_num, nhash_hdr_size, mem); + return -EINVAL; + } + if (obj->support_hash && !hash_hdr_size) { + IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size); + return -EINVAL; + } + + if (obj->support_hash) { + flt_spc = hash_hdr_size; + /* bitmap word */ + if (ep_bitmap) + flt_spc -= obj->tbl_hdr_width; + flt_spc /= obj->tbl_hdr_width; + if (tbls_num > flt_spc) { + IPAHAL_ERR("space for hash flt hdr is too small\n"); + WARN_ON(1); + return -EPERM; + } + } + + flt_spc = nhash_hdr_size; + /* bitmap word */ + if (ep_bitmap) + flt_spc -= obj->tbl_hdr_width; + flt_spc /= obj->tbl_hdr_width; + if (tbls_num > flt_spc) { + IPAHAL_ERR("space for non-hash flt hdr is too small\n"); + WARN_ON(1); + return -EPERM; + } + + mem->size = tbls_num * obj->tbl_hdr_width; + if (ep_bitmap) + mem->size += obj->tbl_hdr_width; + mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size, + &mem->phys_base, flag); + if (!mem->base) { + IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size); + return -ENOMEM; + } + + if (ep_bitmap) { + flt_bitmap = obj->create_flt_bitmap(ep_bitmap); + IPAHAL_DBG("flt bitmap 0x%llx\n", flt_bitmap); + obj->write_val_to_hdr(flt_bitmap, mem->base); + } + + addr = obj->create_tbl_addr(true, + ipahal_ctx->empty_fltrt_tbl.phys_base); + + if (ep_bitmap) { + for (i = 1; i <= tbls_num; i++) + obj->write_val_to_hdr(addr, + mem->base + i * obj->tbl_hdr_width); + } else { + for (i = 0; i < tbls_num; i++) + obj->write_val_to_hdr(addr, + mem->base + i * obj->tbl_hdr_width); + } + + return 0; +} + +/* + * ipa_fltrt_alloc_init_tbl_hdr() - allocate and initialize buffers for + * flt/rt tables headers to be filled into sram. Init each table to point + * to empty system table + * @params: Allocate IN and OUT params + * + * Return: 0 on success, negative on failure + */ +static int ipa_fltrt_alloc_init_tbl_hdr( + struct ipahal_fltrt_alloc_imgs_params *params) +{ + u64 addr; + int i; + struct ipahal_fltrt_obj *obj; + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (!params) { + IPAHAL_ERR("Input error: params=%pK\n", params); + return -EINVAL; + } + + params->nhash_hdr.size = params->tbls_num * obj->tbl_hdr_width; + params->nhash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, + params->nhash_hdr.size, + ¶ms->nhash_hdr.phys_base, GFP_KERNEL); + if (!params->nhash_hdr.base) { + IPAHAL_ERR("fail to alloc DMA buff of size %d\n", + params->nhash_hdr.size); + goto nhash_alloc_fail; + } + + if (obj->support_hash) { + params->hash_hdr.size = params->tbls_num * obj->tbl_hdr_width; + params->hash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, + params->hash_hdr.size, ¶ms->hash_hdr.phys_base, + GFP_KERNEL); + if (!params->hash_hdr.base) { + IPAHAL_ERR("fail to alloc DMA buff of size %d\n", + params->hash_hdr.size); + goto hash_alloc_fail; + } + } + + addr = obj->create_tbl_addr(true, + ipahal_ctx->empty_fltrt_tbl.phys_base); + for (i = 0; i < params->tbls_num; i++) { + obj->write_val_to_hdr(addr, + params->nhash_hdr.base + i * obj->tbl_hdr_width); + if (obj->support_hash) + obj->write_val_to_hdr(addr, + params->hash_hdr.base + + i * obj->tbl_hdr_width); + } + + return 0; + +hash_alloc_fail: + ipahal_free_dma_mem(¶ms->nhash_hdr); +nhash_alloc_fail: + return -ENOMEM; +} + +/* + * ipa_fltrt_alloc_lcl_bdy() - allocate and initialize buffers for + * local flt/rt tables bodies to be filled into sram + * @params: Allocate IN and OUT params + * + * Return: 0 on success, negative on failure + */ +static int ipa_fltrt_alloc_lcl_bdy( + struct ipahal_fltrt_alloc_imgs_params *params) +{ + struct ipahal_fltrt_obj *obj; + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + /* The HAL allocates larger sizes than the given effective ones + * for alignments and border indications + */ + IPAHAL_DBG_LOW("lcl tbl bdy total effective sizes: hash=%u nhash=%u\n", + params->total_sz_lcl_hash_tbls, + params->total_sz_lcl_nhash_tbls); + + IPAHAL_DBG_LOW("lcl tbl bdy count: hash=%u nhash=%u\n", + params->num_lcl_hash_tbls, + params->num_lcl_nhash_tbls); + + /* Align the sizes to coop with termination word + * and H/W local table start offset alignment + */ + if (params->nhash_bdy.size) { + params->nhash_bdy.size = params->total_sz_lcl_nhash_tbls; + /* for table terminator */ + params->nhash_bdy.size += obj->tbl_width * + params->num_lcl_nhash_tbls; + /* align the start of local rule-set */ + params->nhash_bdy.size += obj->lcladdr_alignment * + params->num_lcl_nhash_tbls; + /* SRAM block size alignment */ + params->nhash_bdy.size += obj->blk_sz_alignment; + params->nhash_bdy.size &= ~(obj->blk_sz_alignment); + + IPAHAL_DBG_LOW("nhash lcl tbl bdy total h/w size = %u\n", + params->nhash_bdy.size); + + params->nhash_bdy.base = dma_alloc_coherent( + ipahal_ctx->ipa_pdev, params->nhash_bdy.size, + ¶ms->nhash_bdy.phys_base, GFP_KERNEL); + if (!params->nhash_bdy.base) { + IPAHAL_ERR("fail to alloc DMA buff of size %d\n", + params->nhash_bdy.size); + return -ENOMEM; + } + memset(params->nhash_bdy.base, 0, params->nhash_bdy.size); + } + + if (!obj->support_hash && params->hash_bdy.size) { + IPAHAL_ERR("No HAL Hash tbls support - Will be ignored\n"); + WARN_ON(1); + } + + if (obj->support_hash && params->hash_bdy.size) { + params->hash_bdy.size = params->total_sz_lcl_hash_tbls; + /* for table terminator */ + params->hash_bdy.size += obj->tbl_width * + params->num_lcl_hash_tbls; + /* align the start of local rule-set */ + params->hash_bdy.size += obj->lcladdr_alignment * + params->num_lcl_hash_tbls; + /* SRAM block size alignment */ + params->hash_bdy.size += obj->blk_sz_alignment; + params->hash_bdy.size &= ~(obj->blk_sz_alignment); + + IPAHAL_DBG_LOW("hash lcl tbl bdy total h/w size = %u\n", + params->hash_bdy.size); + + params->hash_bdy.base = dma_alloc_coherent( + ipahal_ctx->ipa_pdev, params->hash_bdy.size, + ¶ms->hash_bdy.phys_base, GFP_KERNEL); + if (!params->hash_bdy.base) { + IPAHAL_ERR("fail to alloc DMA buff of size %d\n", + params->hash_bdy.size); + goto hash_bdy_fail; + } + memset(params->hash_bdy.base, 0, params->hash_bdy.size); + } + + return 0; + +hash_bdy_fail: + if (params->nhash_bdy.size) + ipahal_free_dma_mem(¶ms->nhash_bdy); + + return -ENOMEM; +} + +/* + * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures + * Used usually during commit. + * Allocates header structures and init them to point to empty DDR table + * Allocate body strucutres for local bodies tables + * @params: Parameters for IN and OUT regard the allocation. + */ +int ipahal_fltrt_allocate_hw_tbl_imgs( + struct ipahal_fltrt_alloc_imgs_params *params) +{ + IPAHAL_DBG_LOW("Entry\n"); + + /* Input validation */ + if (!params) { + IPAHAL_ERR("Input err: no params\n"); + return -EINVAL; + } + if (params->ipt >= IPA_IP_MAX) { + IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt); + return -EINVAL; + } + + if (ipa_fltrt_alloc_init_tbl_hdr(params)) { + IPAHAL_ERR("fail to alloc and init tbl hdr\n"); + return -ENOMEM; + } + + if (ipa_fltrt_alloc_lcl_bdy(params)) { + IPAHAL_ERR("fail to alloc tbl bodies\n"); + goto bdy_alloc_fail; + } + + return 0; + +bdy_alloc_fail: + ipahal_free_dma_mem(¶ms->nhash_hdr); + if (params->hash_hdr.size) + ipahal_free_dma_mem(¶ms->hash_hdr); + return -ENOMEM; +} + +/* + * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl + * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the + * allocated memory. + * + * The size is adapted for needed alignments/borders. + */ +int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem) +{ + struct ipahal_fltrt_obj *obj; + + IPAHAL_DBG_LOW("Entry\n"); + + if (!tbl_mem) { + IPAHAL_ERR("Input err\n"); + return -EINVAL; + } + + if (!tbl_mem->size) { + IPAHAL_ERR("Input err: zero table size\n"); + return -EINVAL; + } + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + /* add word for rule-set terminator */ + tbl_mem->size += obj->tbl_width; + + tbl_mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, tbl_mem->size, + &tbl_mem->phys_base, GFP_KERNEL); + if (!tbl_mem->base) { + IPAHAL_ERR("fail to alloc DMA buf of size %d\n", + tbl_mem->size); + return -ENOMEM; + } + if (tbl_mem->phys_base & obj->sysaddr_alignment) { + IPAHAL_ERR("sys rt tbl address is not aligned\n"); + goto align_err; + } + + memset(tbl_mem->base, 0, tbl_mem->size); + + return 0; + +align_err: + ipahal_free_dma_mem(tbl_mem); + return -EPERM; +} + +/* + * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address + * Given table addr/offset, adapt it to IPA H/W format and write it + * to given header index. + * @addr: Address or offset to be used + * @hdr_base: base address of header structure to write the address + * @hdr_idx: index of the address in the header structure + * @is_sys: Is it system address or local offset + */ +int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx, + bool is_sys) +{ + struct ipahal_fltrt_obj *obj; + u64 hwaddr; + u8 *hdr; + + IPAHAL_DBG_LOW("Entry\n"); + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (!addr || !hdr_base) { + IPAHAL_ERR("Input err: addr=0x%llx hdr_base=%pK\n", + addr, hdr_base); + return -EINVAL; + } + + hdr = (u8 *)hdr_base; + hdr += hdr_idx * obj->tbl_hdr_width; + hwaddr = obj->create_tbl_addr(is_sys, addr); + obj->write_val_to_hdr(hwaddr, hdr); + + return 0; +} + +/* + * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's + * content (physical address or offset) and parse it. + * @hdr_base: base sram address of the header structure. + * @hdr_idx: index of the header entry line in the header structure. + * @addr: The parsed address - Out parameter + * @is_sys: Is this system or local address - Out parameter + */ +int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr, + bool *is_sys) +{ + struct ipahal_fltrt_obj *obj; + u64 hwaddr; + u8 *hdr; + + IPAHAL_DBG_LOW("Entry\n"); + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (!addr || !hdr_base || !is_sys) { + IPAHAL_ERR("Input err: addr=%pK hdr_base=%pK is_sys=%pK\n", + addr, hdr_base, is_sys); + return -EINVAL; + } + + hdr = (u8 *)hdr_base; + hdr += hdr_idx * obj->tbl_hdr_width; + hwaddr = *((u64 *)hdr); + obj->parse_tbl_addr(hwaddr, addr, is_sys); + return 0; +} + +/* + * ipahal_rt_generate_hw_rule() - generates the routing hardware rule + * @params: Params for the rule creation. + * @hw_len: Size of the H/W rule to be returned + * @buf: Buffer to build the rule in. If buf is NULL, then the rule will + * be built in internal temp buf. This is used e.g. to get the rule size + * only. + */ +int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params, + u32 *hw_len, u8 *buf) +{ + struct ipahal_fltrt_obj *obj; + u8 *tmp = NULL; + int rc; + + IPAHAL_DBG_LOW("Entry\n"); + + if (!params || !hw_len) { + IPAHAL_ERR("Input err: params=%pK hw_len=%pK\n", + params, hw_len); + return -EINVAL; + } + if (!params->rule) { + IPAHAL_ERR("Input err: invalid rule\n"); + return -EINVAL; + } + if (params->ipt >= IPA_IP_MAX) { + IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt); + return -EINVAL; + } + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (buf == NULL) { + tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + buf = tmp; + } else + if ((long)buf & obj->rule_start_alignment) { + IPAHAL_ERR("buff is not rule rule start aligned\n"); + return -EPERM; + } + + rc = ipahal_fltrt_objs[ipahal_ctx->hw_type].rt_generate_hw_rule( + params, hw_len, buf); + if (!tmp && !rc) { + /* write the rule-set terminator */ + memset(buf + *hw_len, 0, obj->tbl_width); + } + + kfree(tmp); + + return rc; +} + +/* + * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule. + * @params: Params for the rule creation. + * @hw_len: Size of the H/W rule to be returned + * @buf: Buffer to build the rule in. If buf is NULL, then the rule will + * be built in internal temp buf. This is used e.g. to get the rule size + * only. + */ +int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params, + u32 *hw_len, u8 *buf) +{ + struct ipahal_fltrt_obj *obj; + u8 *tmp = NULL; + int rc; + + IPAHAL_DBG_LOW("Entry\n"); + + if (!params || !hw_len) { + IPAHAL_ERR("Input err: params=%pK hw_len=%pK\n", + params, hw_len); + return -EINVAL; + } + if (!params->rule) { + IPAHAL_ERR("Input err: invalid rule\n"); + return -EINVAL; + } + if (params->ipt >= IPA_IP_MAX) { + IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt); + return -EINVAL; + } + + obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type]; + + if (buf == NULL) { + tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL); + if (!tmp) { + IPAHAL_ERR("failed to alloc %u bytes\n", + obj->rule_buf_size); + return -ENOMEM; + } + buf = tmp; + } else + if ((long)buf & obj->rule_start_alignment) { + IPAHAL_ERR("buff is not rule rule start aligned\n"); + return -EPERM; + } + + rc = ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_generate_hw_rule( + params, hw_len, buf); + if (!tmp && !rc) { + /* write the rule-set terminator */ + memset(buf + *hw_len, 0, obj->tbl_width); + } + + kfree(tmp); + + return rc; + +} + +/* + * ipahal_flt_generate_equation() - generate flt rule in equation form + * Will build equation form flt rule from given info. + * @ipt: IP family + * @attrib: Rule attribute to be generated + * @eq_atrb: Equation form generated rule + * Note: Usage example: Pass the generated form to other sub-systems + * for inter-subsystems rules exchange. + */ +int ipahal_flt_generate_equation(enum ipa_ip_type ipt, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb) +{ + IPAHAL_DBG_LOW("Entry\n"); + + if (ipt >= IPA_IP_MAX) { + IPAHAL_ERR("Input err: Invalid ip type %d\n", ipt); + return -EINVAL; + } + + if (!attrib || !eq_atrb) { + IPAHAL_ERR("Input err: attrib=%pK eq_atrb=%pK\n", + attrib, eq_atrb); + return -EINVAL; + } + + return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_generate_eq(ipt, + attrib, eq_atrb); + +} + +/* + * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule + * Given the rule address, read the rule info from H/W and parse it. + * @rule_addr: Rule address (virtual memory) + * @rule: Out parameter for parsed rule info + */ +int ipahal_rt_parse_hw_rule(u8 *rule_addr, + struct ipahal_rt_rule_entry *rule) +{ + IPAHAL_DBG_LOW("Entry\n"); + + if (!rule_addr || !rule) { + IPAHAL_ERR("Input err: rule_addr=%pK rule=%pK\n", + rule_addr, rule); + return -EINVAL; + } + + return ipahal_fltrt_objs[ipahal_ctx->hw_type].rt_parse_hw_rule( + rule_addr, rule); +} + +/* + * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule + * Given the rule address, read the rule info from H/W and parse it. + * @rule_addr: Rule address (virtual memory) + * @rule: Out parameter for parsed rule info + */ +int ipahal_flt_parse_hw_rule(u8 *rule_addr, + struct ipahal_flt_rule_entry *rule) +{ + IPAHAL_DBG_LOW("Entry\n"); + + if (!rule_addr || !rule) { + IPAHAL_ERR("Input err: rule_addr=%pK rule=%pK\n", + rule_addr, rule); + return -EINVAL; + } + + return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_parse_hw_rule( + rule_addr, rule); +} + diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h new file mode 100644 index 000000000000..3ee883b6fb20 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h @@ -0,0 +1,291 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPAHAL_FLTRT_H_ +#define _IPAHAL_FLTRT_H_ + +/* + * struct ipahal_fltrt_alloc_imgs_params - Params for tbls imgs allocations + * The allocation logic will allocate DMA memory representing the header. + * If the bodies are local (SRAM) the allocation will allocate + * a DMA buffers that would contain the content of these local tables in raw + * @ipt: IP version type + * @tbls_num: Number of tables to represent by the header + * @num_lcl_hash_tbls: Number of local (sram) hashable tables + * @num_lcl_nhash_tbls: Number of local (sram) non-hashable tables + * @total_sz_lcl_hash_tbls: Total size of local hashable tables + * @total_sz_lcl_nhash_tbls: Total size of local non-hashable tables + * @hash_hdr/nhash_hdr: OUT params for the header structures + * @hash_bdy/nhash_bdy: OUT params for the local body structures + */ +struct ipahal_fltrt_alloc_imgs_params { + enum ipa_ip_type ipt; + u32 tbls_num; + u32 num_lcl_hash_tbls; + u32 num_lcl_nhash_tbls; + u32 total_sz_lcl_hash_tbls; + u32 total_sz_lcl_nhash_tbls; + + /* OUT PARAMS */ + struct ipa_mem_buffer hash_hdr; + struct ipa_mem_buffer nhash_hdr; + struct ipa_mem_buffer hash_bdy; + struct ipa_mem_buffer nhash_bdy; +}; + +/* + * enum ipahal_rt_rule_hdr_type - Header type used in rt rules + * @IPAHAL_RT_RULE_HDR_NONE: No header is used + * @IPAHAL_RT_RULE_HDR_RAW: Raw header is used + * @IPAHAL_RT_RULE_HDR_PROC_CTX: Header Processing context is used + */ +enum ipahal_rt_rule_hdr_type { + IPAHAL_RT_RULE_HDR_NONE, + IPAHAL_RT_RULE_HDR_RAW, + IPAHAL_RT_RULE_HDR_PROC_CTX, +}; + +/* + * struct ipahal_rt_rule_gen_params - Params for generating rt rule + * @ipt: IP family version + * @dst_pipe_idx: Destination pipe index + * @hdr_type: Header type to be used + * @hdr_lcl: Does header on local or system table? + * @hdr_ofst: Offset of the header in the header table + * @priority: Rule priority + * @id: Rule ID + * @rule: Rule info + */ +struct ipahal_rt_rule_gen_params { + enum ipa_ip_type ipt; + int dst_pipe_idx; + enum ipahal_rt_rule_hdr_type hdr_type; + bool hdr_lcl; + u32 hdr_ofst; + u32 priority; + u32 id; + const struct ipa_rt_rule *rule; +}; + +/* + * struct ipahal_rt_rule_entry - Rt rule info parsed from H/W + * @dst_pipe_idx: Destination pipe index + * @hdr_lcl: Does the references header located in sram or system mem? + * @hdr_ofst: Offset of the header in the header table + * @hdr_type: Header type to be used + * @priority: Rule priority + * @retain_hdr: to retain the removed header in header removal + * @id: Rule ID + * @eq_attrib: Equations and their params in the rule + * @rule_size: Rule size in memory + */ +struct ipahal_rt_rule_entry { + int dst_pipe_idx; + bool hdr_lcl; + u32 hdr_ofst; + enum ipahal_rt_rule_hdr_type hdr_type; + u32 priority; + bool retain_hdr; + u32 id; + struct ipa_ipfltri_rule_eq eq_attrib; + u32 rule_size; +}; + +/* + * struct ipahal_flt_rule_gen_params - Params for generating flt rule + * @ipt: IP family version + * @rt_tbl_idx: Routing table the rule pointing to + * @priority: Rule priority + * @id: Rule ID + * @rule: Rule info + */ +struct ipahal_flt_rule_gen_params { + enum ipa_ip_type ipt; + u32 rt_tbl_idx; + u32 priority; + u32 id; + const struct ipa_flt_rule *rule; +}; + +/* + * struct ipahal_flt_rule_entry - Flt rule info parsed from H/W + * @rule: Rule info + * @priority: Rule priority + * @id: Rule ID + * @rule_size: Rule size in memory + */ +struct ipahal_flt_rule_entry { + struct ipa_flt_rule rule; + u32 priority; + u32 id; + u32 rule_size; +}; + +/* Get the H/W table (flt/rt) header width */ +u32 ipahal_get_hw_tbl_hdr_width(void); + +/* Get the H/W local table (SRAM) address alignment + * Tables headers references to local tables via offsets in SRAM + * This function return the alignment of the offset that IPA expects + */ +u32 ipahal_get_lcl_tbl_addr_alignment(void); + +/* + * Rule priority is used to distinguish rules order + * at the integrated table consisting from hashable and + * non-hashable tables. Max priority are rules that once are + * scanned by IPA, IPA will not look for further rules and use it. + */ +int ipahal_get_rule_max_priority(void); + +/* Given a priority, calc and return the next lower one if it is in + * legal range. + */ +int ipahal_rule_decrease_priority(int *prio); + +/* Does the given ID represents rule miss? */ +bool ipahal_is_rule_miss_id(u32 id); + +/* Get rule ID with high bit only asserted + * Used e.g. to create groups of IDs according to this bit + */ +u32 ipahal_get_rule_id_hi_bit(void); + +/* Get the low value possible to be used for rule-id */ +u32 ipahal_get_low_rule_id(void); + +/* + * ipahal_rt_generate_empty_img() - Generate empty route image + * Creates routing header buffer for the given tables number. + * For each table, make it point to the empty table on DDR. + * @tbls_num: Number of tables. For each will have an entry in the header + * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check + * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check + * @mem: mem object that points to DMA mem representing the hdr structure + * @atomic: should DMA allocation be executed with atomic flag + */ +int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size, + u32 nhash_hdr_size, struct ipa_mem_buffer *mem, bool atomic); + +/* + * ipahal_flt_generate_empty_img() - Generate empty filter image + * Creates filter header buffer for the given tables number. + * For each table, make it point to the empty table on DDR. + * @tbls_num: Number of tables. For each will have an entry in the header + * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check + * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check + * @ep_bitmap: Bitmap representing the EP that has flt tables. The format + * should be: bit0->EP0, bit1->EP1 + * @mem: mem object that points to DMA mem representing the hdr structure + * @atomic: should DMA allocation be executed with atomic flag + */ +int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size, + u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem, + bool atomic); + +/* + * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures + * Used usually during commit. + * Allocates header structures and init them to point to empty DDR table + * Allocate body strucutres for local bodies tables + * @params: Parameters for IN and OUT regard the allocation. + */ +int ipahal_fltrt_allocate_hw_tbl_imgs( + struct ipahal_fltrt_alloc_imgs_params *params); + +/* + * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl + * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the + * allocated memory. + * + * The size is adapted for needed alignments/borders. + */ +int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem); + +/* + * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address + * Given table addr/offset, adapt it to IPA H/W format and write it + * to given header index. + * @addr: Address or offset to be used + * @hdr_base: base address of header structure to write the address + * @hdr_idx: index of the address in the header structure + * @is_sys: Is it system address or local offset + */ +int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx, + bool is_sys); + +/* + * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's + * content (physical address or offset) and parse it. + * @hdr_base: base sram address of the header structure. + * @hdr_idx: index of the header entry line in the header structure. + * @addr: The parsed address - Out parameter + * @is_sys: Is this system or local address - Out parameter + */ +int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr, + bool *is_sys); + +/* + * ipahal_rt_generate_hw_rule() - generates the routing hardware rule. + * @params: Params for the rule creation. + * @hw_len: Size of the H/W rule to be returned + * @buf: Buffer to build the rule in. If buf is NULL, then the rule will + * be built in internal temp buf. This is used e.g. to get the rule size + * only. + */ +int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params, + u32 *hw_len, u8 *buf); + +/* + * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule. + * @params: Params for the rule creation. + * @hw_len: Size of the H/W rule to be returned + * @buf: Buffer to build the rule in. If buf is NULL, then the rule will + * be built in internal temp buf. This is used e.g. to get the rule size + * only. + */ +int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params, + u32 *hw_len, u8 *buf); + +/* + * ipahal_flt_generate_equation() - generate flt rule in equation form + * Will build equation form flt rule from given info. + * @ipt: IP family + * @attrib: Rule attribute to be generated + * @eq_atrb: Equation form generated rule + * Note: Usage example: Pass the generated form to other sub-systems + * for inter-subsystems rules exchange. + */ +int ipahal_flt_generate_equation(enum ipa_ip_type ipt, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb); + +/* + * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule + * Given the rule address, read the rule info from H/W and parse it. + * @rule_addr: Rule address (virtual memory) + * @rule: Out parameter for parsed rule info + */ +int ipahal_rt_parse_hw_rule(u8 *rule_addr, + struct ipahal_rt_rule_entry *rule); + +/* + * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule + * Given the rule address, read the rule info from H/W and parse it. + * @rule_addr: Rule address (virtual memory) + * @rule: Out parameter for parsed rule info + */ +int ipahal_flt_parse_hw_rule(u8 *rule_addr, + struct ipahal_flt_rule_entry *rule); + + +#endif /* _IPAHAL_FLTRT_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h new file mode 100644 index 000000000000..645383a8f1cf --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h @@ -0,0 +1,180 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPAHAL_FLTRT_I_H_ +#define _IPAHAL_FLTRT_I_H_ + +/* + * enum ipa_fltrt_equations - RULE equations + * These are names values to the equations that can be used + * The HAL layer holds mapping between these names and H/W + * presentation. + */ +enum ipa_fltrt_equations { + IPA_TOS_EQ, + IPA_PROTOCOL_EQ, + IPA_TC_EQ, + IPA_OFFSET_MEQ128_0, + IPA_OFFSET_MEQ128_1, + IPA_OFFSET_MEQ32_0, + IPA_OFFSET_MEQ32_1, + IPA_IHL_OFFSET_MEQ32_0, + IPA_IHL_OFFSET_MEQ32_1, + IPA_METADATA_COMPARE, + IPA_IHL_OFFSET_RANGE16_0, + IPA_IHL_OFFSET_RANGE16_1, + IPA_IHL_OFFSET_EQ_32, + IPA_IHL_OFFSET_EQ_16, + IPA_FL_EQ, + IPA_IS_FRAG, + IPA_EQ_MAX, +}; + +/* Width and Alignment values for H/W structures. + * Specific for IPA version. + */ +#define IPA3_0_HW_TBL_SYSADDR_ALIGNMENT (127) +#define IPA3_0_HW_TBL_LCLADDR_ALIGNMENT (7) +#define IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT (127) +#define IPA3_0_HW_TBL_WIDTH (8) +#define IPA3_0_HW_TBL_HDR_WIDTH (8) +#define IPA3_0_HW_TBL_ADDR_MASK (127) +#define IPA3_0_HW_RULE_BUF_SIZE (256) +#define IPA3_0_HW_RULE_START_ALIGNMENT (7) + + +/* + * Rules Priority. + * Needed due to rules classification to hashable and non-hashable. + * Higher priority is lower in number. i.e. 0 is highest priority + */ +#define IPA3_0_RULE_MAX_PRIORITY (0) +#define IPA3_0_RULE_MIN_PRIORITY (1023) + +/* + * RULE ID, bit length (e.g. 10 bits). + */ +#define IPA3_0_RULE_ID_BIT_LEN (10) +#define IPA3_0_LOW_RULE_ID (1) + +/** + * struct ipa3_0_rt_rule_hw_hdr - HW header of IPA routing rule + * @word: routing rule header properties + * @en_rule: enable rule - Equation bit fields + * @pipe_dest_idx: destination pipe index + * @system: Is referenced header is lcl or sys memory + * @hdr_offset: header offset + * @proc_ctx: whether hdr_offset points to header table or to + * header processing context table + * @priority: Rule priority. Added to distinguish rules order + * at the integrated table consisting from hashable and + * non-hashable parts + * @rsvd1: reserved bits + * @retain_hdr: added to add back to the packet the header removed + * as part of header removal. This will be done as part of + * header insertion block. + * @rule_id: rule ID that will be returned in the packet status + * @rsvd2: reserved bits + */ +struct ipa3_0_rt_rule_hw_hdr { + union { + u64 word; + struct { + u64 en_rule:16; + u64 pipe_dest_idx:5; + u64 system:1; + u64 hdr_offset:9; + u64 proc_ctx:1; + u64 priority:10; + u64 rsvd1:5; + u64 retain_hdr:1; + u64 rule_id:10; + u64 rsvd2:6; + } hdr; + } u; +}; + +/** + * struct ipa3_0_flt_rule_hw_hdr - HW header of IPA filter rule + * @word: filtering rule properties + * @en_rule: enable rule + * @action: post filtering action + * @rt_tbl_idx: index in routing table + * @retain_hdr: added to add back to the packet the header removed + * as part of header removal. This will be done as part of + * header insertion block. + * @rsvd1: reserved bits + * @priority: Rule priority. Added to distinguish rules order + * at the integrated table consisting from hashable and + * non-hashable parts + * @rsvd2: reserved bits + * @rule_id: rule ID that will be returned in the packet status + * @rsvd3: reserved bits + */ +struct ipa3_0_flt_rule_hw_hdr { + union { + u64 word; + struct { + u64 en_rule:16; + u64 action:5; + u64 rt_tbl_idx:5; + u64 retain_hdr:1; + u64 rsvd1:5; + u64 priority:10; + u64 rsvd2:6; + u64 rule_id:10; + u64 rsvd3:6; + } hdr; + } u; +}; + +/** + * struct ipa4_0_flt_rule_hw_hdr - HW header of IPA filter rule + * @word: filtering rule properties + * @en_rule: enable rule + * @action: post filtering action + * @rt_tbl_idx: index in routing table + * @retain_hdr: added to add back to the packet the header removed + * as part of header removal. This will be done as part of + * header insertion block. + * @pdn_idx: in case of go to src nat action possible to input the pdn index to + * the NAT block + * @set_metadata: enable metadata replacement in the NAT block + * @priority: Rule priority. Added to distinguish rules order + * at the integrated table consisting from hashable and + * non-hashable parts + * @rsvd2: reserved bits + * @rule_id: rule ID that will be returned in the packet status + * @rsvd3: reserved bits + */ +struct ipa4_0_flt_rule_hw_hdr { + union { + u64 word; + struct { + u64 en_rule : 16; + u64 action : 5; + u64 rt_tbl_idx : 5; + u64 retain_hdr : 1; + u64 pdn_idx : 4; + u64 set_metadata : 1; + u64 priority : 10; + u64 rsvd2 : 6; + u64 rule_id : 10; + u64 rsvd3 : 6; + } hdr; + } u; +}; + +int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type); +void ipahal_fltrt_destroy(void); + +#endif /* _IPAHAL_FLTRT_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c new file mode 100644 index 000000000000..9c6cd10aa68a --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c @@ -0,0 +1,547 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "ipahal_hw_stats.h" +#include "ipahal_hw_stats_i.h" +#include "ipahal_i.h" + +struct ipahal_hw_stats_obj { + struct ipahal_stats_init_pyld *(*generate_init_pyld)(void *params, + bool is_atomic_ctx); + int (*get_offset)(void *params, struct ipahal_stats_offset *out); + int (*parse_stats)(void *init_params, void *raw_stats, + void *parsed_stats); +}; + +static int _count_ones(u32 number) +{ + int count = 0; + + while (number) { + count++; + number = number & (number - 1); + } + + return count; +} + +static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_quota( + void *params, bool is_atomic_ctx) +{ + struct ipahal_stats_init_pyld *pyld; + struct ipahal_stats_init_quota *in = + (struct ipahal_stats_init_quota *)params; + int entries = _count_ones(in->enabled_bitmask); + + IPAHAL_DBG_LOW("entries = %d\n", entries); + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + + entries * sizeof(struct ipahal_stats_quota_hw), is_atomic_ctx); + if (!pyld) { + IPAHAL_ERR("no mem\n"); + return NULL; + } + + pyld->len = entries * sizeof(struct ipahal_stats_quota_hw); + return pyld; +} + +static int ipahal_get_offset_quota(void *params, + struct ipahal_stats_offset *out) +{ + struct ipahal_stats_get_offset_quota *in = + (struct ipahal_stats_get_offset_quota *)params; + int entries = _count_ones(in->init.enabled_bitmask); + + IPAHAL_DBG_LOW("\n"); + out->offset = 0; + out->size = entries * sizeof(struct ipahal_stats_quota_hw); + + return 0; +} + +static int ipahal_parse_stats_quota(void *init_params, void *raw_stats, + void *parsed_stats) +{ + struct ipahal_stats_init_quota *init = + (struct ipahal_stats_init_quota *)init_params; + struct ipahal_stats_quota_hw *raw_hw = + (struct ipahal_stats_quota_hw *)raw_stats; + struct ipahal_stats_quota_all *out = + (struct ipahal_stats_quota_all *)parsed_stats; + int stat_idx = 0; + int i; + + memset(out, 0, sizeof(*out)); + IPAHAL_DBG_LOW("\n"); + for (i = 0; i < IPAHAL_MAX_PIPES; i++) { + if (init->enabled_bitmask & (1 << i)) { + IPAHAL_DBG_LOW("pipe %d stat_idx %d\n", i, stat_idx); + out->stats[i].num_ipv4_bytes = + raw_hw[stat_idx].num_ipv4_bytes; + out->stats[i].num_ipv4_pkts = + raw_hw[stat_idx].num_ipv4_pkts; + out->stats[i].num_ipv6_pkts = + raw_hw[stat_idx].num_ipv6_pkts; + out->stats[i].num_ipv6_bytes = + raw_hw[stat_idx].num_ipv6_bytes; + stat_idx++; + } + } + + return 0; +} + +static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_tethering( + void *params, bool is_atomic_ctx) +{ + struct ipahal_stats_init_pyld *pyld; + struct ipahal_stats_init_tethering *in = + (struct ipahal_stats_init_tethering *)params; + int hdr_entries = _count_ones(in->prod_bitmask); + int entries = 0; + int i; + void *pyld_ptr; + u32 incremental_offset; + + IPAHAL_DBG_LOW("prod entries = %d\n", hdr_entries); + for (i = 0; i < sizeof(in->prod_bitmask) * 8; i++) { + if (in->prod_bitmask & (1 << i)) { + if (in->cons_bitmask[i] == 0) { + IPAHAL_ERR("no cons bitmask for prod %d\n", i); + return NULL; + } + entries += _count_ones(in->cons_bitmask[i]); + } + } + IPAHAL_DBG_LOW("sum all entries = %d\n", entries); + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + + hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw) + + entries * sizeof(struct ipahal_stats_tethering_hw), + is_atomic_ctx); + if (!pyld) + return NULL; + + pyld->len = hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw) + + entries * sizeof(struct ipahal_stats_tethering_hw); + + pyld_ptr = pyld->data; + incremental_offset = + (hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw)) + / 8; + for (i = 0; i < sizeof(in->prod_bitmask) * 8; i++) { + if (in->prod_bitmask & (1 << i)) { + struct ipahal_stats_tethering_hdr_hw *hdr = pyld_ptr; + + hdr->dst_mask = in->cons_bitmask[i]; + hdr->offset = incremental_offset; + IPAHAL_DBG_LOW("hdr->dst_mask=0x%x\n", hdr->dst_mask); + IPAHAL_DBG_LOW("hdr->offset=0x%x\n", hdr->offset); + /* add the stats entry */ + incremental_offset += _count_ones(in->cons_bitmask[i]) * + sizeof(struct ipahal_stats_tethering_hw) / 8; + pyld_ptr += sizeof(*hdr); + } + } + + return pyld; +} + +static int ipahal_get_offset_tethering(void *params, + struct ipahal_stats_offset *out) +{ + struct ipahal_stats_get_offset_tethering *in = + (struct ipahal_stats_get_offset_tethering *)params; + int entries = 0; + int i; + + for (i = 0; i < sizeof(in->init.prod_bitmask) * 8; i++) { + if (in->init.prod_bitmask & (1 << i)) { + if (in->init.cons_bitmask[i] == 0) { + IPAHAL_ERR("no cons bitmask for prod %d\n", i); + return -EPERM; + } + entries += _count_ones(in->init.cons_bitmask[i]); + } + } + IPAHAL_DBG_LOW("sum all entries = %d\n", entries); + + /* skip the header */ + out->offset = _count_ones(in->init.prod_bitmask) * + sizeof(struct ipahal_stats_tethering_hdr_hw); + out->size = entries * sizeof(struct ipahal_stats_tethering_hw); + + return 0; +} + +static int ipahal_parse_stats_tethering(void *init_params, void *raw_stats, + void *parsed_stats) +{ + struct ipahal_stats_init_tethering *init = + (struct ipahal_stats_init_tethering *)init_params; + struct ipahal_stats_tethering_hw *raw_hw = + (struct ipahal_stats_tethering_hw *)raw_stats; + struct ipahal_stats_tethering_all *out = + (struct ipahal_stats_tethering_all *)parsed_stats; + int i, j; + int stat_idx = 0; + + memset(out, 0, sizeof(*out)); + IPAHAL_DBG_LOW("\n"); + for (i = 0; i < IPAHAL_MAX_PIPES; i++) { + for (j = 0; j < IPAHAL_MAX_PIPES; j++) { + if ((init->prod_bitmask & (1 << i)) && + init->cons_bitmask[i] & (1 << j)) { + IPAHAL_DBG_LOW("prod %d cons %d\n", i, j); + IPAHAL_DBG_LOW("stat_idx %d\n", stat_idx); + out->stats[i][j].num_ipv4_bytes = + raw_hw[stat_idx].num_ipv4_bytes; + IPAHAL_DBG_LOW("num_ipv4_bytes %lld\n", + out->stats[i][j].num_ipv4_bytes); + out->stats[i][j].num_ipv4_pkts = + raw_hw[stat_idx].num_ipv4_pkts; + IPAHAL_DBG_LOW("num_ipv4_pkts %lld\n", + out->stats[i][j].num_ipv4_pkts); + out->stats[i][j].num_ipv6_pkts = + raw_hw[stat_idx].num_ipv6_pkts; + IPAHAL_DBG_LOW("num_ipv6_pkts %lld\n", + out->stats[i][j].num_ipv6_pkts); + out->stats[i][j].num_ipv6_bytes = + raw_hw[stat_idx].num_ipv6_bytes; + IPAHAL_DBG_LOW("num_ipv6_bytes %lld\n", + out->stats[i][j].num_ipv6_bytes); + stat_idx++; + } + } + } + + return 0; +} + +static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_flt_rt( + void *params, bool is_atomic_ctx) +{ + struct ipahal_stats_init_pyld *pyld; + struct ipahal_stats_init_flt_rt *in = + (struct ipahal_stats_init_flt_rt *)params; + int hdr_entries; + int num_rules = 0; + int i, start_entry; + void *pyld_ptr; + u32 incremental_offset; + + for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) + num_rules += _count_ones(in->rule_id_bitmask[i]); + + if (num_rules == 0) { + IPAHAL_ERR("no rule ids provided\n"); + return NULL; + } + IPAHAL_DBG_LOW("num_rules = %d\n", num_rules); + + hdr_entries = IPAHAL_MAX_RULE_ID_32; + for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) { + if (in->rule_id_bitmask[i] != 0) + break; + hdr_entries--; + } + start_entry = i; + + for (i = IPAHAL_MAX_RULE_ID_32 - 1; i >= start_entry; i--) { + if (in->rule_id_bitmask[i] != 0) + break; + hdr_entries--; + } + IPAHAL_DBG_LOW("hdr_entries = %d\n", hdr_entries); + + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + + hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw) + + num_rules * sizeof(struct ipahal_stats_flt_rt_hw), + is_atomic_ctx); + if (!pyld) { + IPAHAL_ERR("no mem\n"); + return NULL; + } + + pyld->len = hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw) + + num_rules * sizeof(struct ipahal_stats_flt_rt_hw); + + pyld_ptr = pyld->data; + incremental_offset = + (hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw)) + / 8; + for (i = start_entry; i < hdr_entries; i++) { + struct ipahal_stats_flt_rt_hdr_hw *hdr = pyld_ptr; + + hdr->en_mask = in->rule_id_bitmask[i]; + hdr->cnt_offset = incremental_offset; + /* add the stats entry */ + incremental_offset += _count_ones(in->rule_id_bitmask[i]) * + sizeof(struct ipahal_stats_flt_rt_hw) / 8; + pyld_ptr += sizeof(*hdr); + } + + return pyld; +} + +static int ipahal_get_offset_flt_rt(void *params, + struct ipahal_stats_offset *out) +{ + struct ipahal_stats_get_offset_flt_rt *in = + (struct ipahal_stats_get_offset_flt_rt *)params; + int i; + int hdr_entries; + int skip_rules = 0; + int start_entry; + int rule_bit = in->rule_id % 32; + int rule_idx = in->rule_id / 32; + + if (rule_idx >= IPAHAL_MAX_RULE_ID_32) { + IPAHAL_ERR("invalid rule_id %d\n", in->rule_id); + return -EPERM; + } + + hdr_entries = IPAHAL_MAX_RULE_ID_32; + for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) { + if (in->init.rule_id_bitmask[i] != 0) + break; + hdr_entries--; + } + + if (hdr_entries == 0) { + IPAHAL_ERR("no rule ids provided\n"); + return -EPERM; + } + start_entry = i; + + for (i = IPAHAL_MAX_RULE_ID_32 - 1; i >= 0; i--) { + if (in->init.rule_id_bitmask[i] != 0) + break; + hdr_entries--; + } + IPAHAL_DBG_LOW("hdr_entries = %d\n", hdr_entries); + + /* skip the header */ + out->offset = hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw); + + /* skip the previous rules */ + for (i = start_entry; i < rule_idx; i++) + skip_rules += _count_ones(in->init.rule_id_bitmask[i]); + + for (i = 0; i < rule_bit; i++) + if (in->init.rule_id_bitmask[rule_idx] & (1 << i)) + skip_rules++; + + out->offset += skip_rules * sizeof(struct ipahal_stats_flt_rt_hw); + out->size = sizeof(struct ipahal_stats_flt_rt_hw); + + return 0; +} + +static int ipahal_parse_stats_flt_rt(void *init_params, void *raw_stats, + void *parsed_stats) +{ + struct ipahal_stats_flt_rt_hw *raw_hw = + (struct ipahal_stats_flt_rt_hw *)raw_stats; + struct ipahal_stats_flt_rt *out = + (struct ipahal_stats_flt_rt *)parsed_stats; + + memset(out, 0, sizeof(*out)); + IPAHAL_DBG_LOW("\n"); + out->num_packets = raw_hw->num_packets; + out->num_packets_hash = raw_hw->num_packets_hash; + + return 0; +} + +static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_drop( + void *params, bool is_atomic_ctx) +{ + struct ipahal_stats_init_pyld *pyld; + struct ipahal_stats_init_drop *in = + (struct ipahal_stats_init_drop *)params; + int entries = _count_ones(in->enabled_bitmask); + + IPAHAL_DBG_LOW("entries = %d\n", entries); + pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + + entries * sizeof(struct ipahal_stats_drop_hw), is_atomic_ctx); + if (!pyld) + return NULL; + + pyld->len = entries * sizeof(struct ipahal_stats_drop_hw); + + return pyld; +} + +static int ipahal_get_offset_drop(void *params, + struct ipahal_stats_offset *out) +{ + struct ipahal_stats_get_offset_drop *in = + (struct ipahal_stats_get_offset_drop *)params; + int entries = _count_ones(in->init.enabled_bitmask); + + IPAHAL_DBG_LOW("\n"); + out->offset = 0; + out->size = entries * sizeof(struct ipahal_stats_drop_hw); + + return 0; +} + +static int ipahal_parse_stats_drop(void *init_params, void *raw_stats, + void *parsed_stats) +{ + struct ipahal_stats_init_drop *init = + (struct ipahal_stats_init_drop *)init_params; + struct ipahal_stats_drop_hw *raw_hw = + (struct ipahal_stats_drop_hw *)raw_stats; + struct ipahal_stats_drop_all *out = + (struct ipahal_stats_drop_all *)parsed_stats; + int stat_idx = 0; + int i; + + memset(out, 0, sizeof(*out)); + IPAHAL_DBG_LOW("\n"); + for (i = 0; i < IPAHAL_MAX_PIPES; i++) { + if (init->enabled_bitmask & (1 << i)) { + out->stats[i].drop_byte_cnt = + raw_hw[stat_idx].drop_byte_cnt; + out->stats[i].drop_packet_cnt = + raw_hw[stat_idx].drop_packet_cnt; + stat_idx++; + } + } + + return 0; +} + +static struct ipahal_hw_stats_obj + ipahal_hw_stats_objs[IPA_HW_MAX][IPAHAL_HW_STATS_MAX] = { + /* IPAv4 */ + [IPA_HW_v4_0][IPAHAL_HW_STATS_QUOTA] = { + ipahal_generate_init_pyld_quota, + ipahal_get_offset_quota, + ipahal_parse_stats_quota + }, + [IPA_HW_v4_0][IPAHAL_HW_STATS_TETHERING] = { + ipahal_generate_init_pyld_tethering, + ipahal_get_offset_tethering, + ipahal_parse_stats_tethering + }, + [IPA_HW_v4_0][IPAHAL_HW_STATS_FNR] = { + ipahal_generate_init_pyld_flt_rt, + ipahal_get_offset_flt_rt, + ipahal_parse_stats_flt_rt + }, + [IPA_HW_v4_0][IPAHAL_HW_STATS_DROP] = { + ipahal_generate_init_pyld_drop, + ipahal_get_offset_drop, + ipahal_parse_stats_drop + }, +}; + +int ipahal_hw_stats_init(enum ipa_hw_type ipa_hw_type) +{ + int i; + int j; + struct ipahal_hw_stats_obj zero_obj; + struct ipahal_hw_stats_obj *hw_stat_ptr; + + IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type); + + if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) { + IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type); + return -EINVAL; + } + + memset(&zero_obj, 0, sizeof(zero_obj)); + for (i = IPA_HW_v4_0 ; i < ipa_hw_type ; i++) { + for (j = 0; j < IPAHAL_HW_STATS_MAX; j++) { + if (!memcmp(&ipahal_hw_stats_objs[i + 1][j], &zero_obj, + sizeof(struct ipahal_hw_stats_obj))) { + memcpy(&ipahal_hw_stats_objs[i + 1][j], + &ipahal_hw_stats_objs[i][j], + sizeof(struct ipahal_hw_stats_obj)); + } else { + /* + * explicitly overridden stat. + * Check validity + */ + hw_stat_ptr = &ipahal_hw_stats_objs[i + 1][j]; + if (!hw_stat_ptr->get_offset) { + IPAHAL_ERR( + "stat=%d get_offset null ver=%d\n", + j, i+1); + WARN_ON(1); + } + if (!hw_stat_ptr->parse_stats) { + IPAHAL_ERR( + "stat=%d parse_stats null ver=%d\n", + j, i + 1); + WARN_ON(1); + } + } + } + } + + return 0; +} + +int ipahal_stats_get_offset(enum ipahal_hw_stats_type type, void *params, + struct ipahal_stats_offset *out) +{ + if (type < 0 || type >= IPAHAL_HW_STATS_MAX) { + IPAHAL_ERR("Invalid type stat=%d\n", type); + WARN_ON(1); + return -EFAULT; + } + + if (!params || !out) { + IPAHAL_ERR("Null arg\n"); + WARN_ON(1); + return -EFAULT; + } + + return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].get_offset( + params, out); +} + +struct ipahal_stats_init_pyld *ipahal_stats_generate_init_pyld( + enum ipahal_hw_stats_type type, void *params, bool is_atomic_ctx) +{ + struct ipahal_hw_stats_obj *hw_obj_ptr; + + if (type < 0 || type >= IPAHAL_HW_STATS_MAX) { + IPAHAL_ERR("Invalid type stat=%d\n", type); + WARN_ON(1); + return NULL; + } + + if (WARN(!params, "Null arg\n")) + return NULL; + + hw_obj_ptr = &ipahal_hw_stats_objs[ipahal_ctx->hw_type][type]; + return hw_obj_ptr->generate_init_pyld(params, is_atomic_ctx); +} + +int ipahal_parse_stats(enum ipahal_hw_stats_type type, void *init_params, + void *raw_stats, void *parsed_stats) +{ + if (WARN((type < 0 || type >= IPAHAL_HW_STATS_MAX), + "Invalid type stat = %d\n", type)) + return -EFAULT; + + if (WARN((!raw_stats || !parsed_stats), "Null arg\n")) + return -EFAULT; + + return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].parse_stats( + init_params, raw_stats, parsed_stats); +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h new file mode 100644 index 000000000000..cbb1dc302b57 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h @@ -0,0 +1,248 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPAHAL_HW_STATS_H_ +#define _IPAHAL_HW_STATS_H_ + +#include + +#define IPAHAL_MAX_PIPES 32 +#define IPAHAL_MAX_RULE_ID_32 (1024 / 32) /* 10 bits of rule id */ + +enum ipahal_hw_stats_type { + IPAHAL_HW_STATS_QUOTA, + IPAHAL_HW_STATS_TETHERING, + IPAHAL_HW_STATS_FNR, + IPAHAL_HW_STATS_DROP, + IPAHAL_HW_STATS_MAX +}; + +/* + * struct ipahal_stats_init_pyld - Statistics initialization payload + * @len: length of payload + * @data: actual payload data + */ +struct ipahal_stats_init_pyld { + u16 len; + u16 reserved; + u8 data[0]; +}; + +/* + * struct ipahal_stats_offset - Statistics offset parameters + * @offset: offset of the statistic from beginning of stats table + * @size: size of the statistics + */ +struct ipahal_stats_offset { + u32 offset; + u16 size; +}; + +/* + * struct ipahal_stats_init_quota - Initializations parameters for quota + * @enabled_bitmask: bit mask of pipes to be monitored + */ +struct ipahal_stats_init_quota { + u32 enabled_bitmask; +}; + +/* + * struct ipahal_stats_get_offset_quota - Get offset parameters for quota + * @init: initialization parameters used in initialization of stats + */ +struct ipahal_stats_get_offset_quota { + struct ipahal_stats_init_quota init; +}; + +/* + * struct ipahal_stats_quota - Quota statistics + * @num_ipv4_bytes: IPv4 bytes + * @num_ipv6_bytes: IPv6 bytes + * @num_ipv4_pkts: IPv4 packets + * @num_ipv6_pkts: IPv6 packets + */ +struct ipahal_stats_quota { + u64 num_ipv4_bytes; + u64 num_ipv6_bytes; + u64 num_ipv4_pkts; + u64 num_ipv6_pkts; +}; + +/* + * struct ipahal_stats_quota_all - Quota statistics for all pipes + * @stats: array of statistics per pipe + */ +struct ipahal_stats_quota_all { + struct ipahal_stats_quota stats[IPAHAL_MAX_PIPES]; +}; + +/* + * struct ipahal_stats_init_tethering - Initializations parameters for tethering + * @prod_bitmask: bit mask of producer pipes to be monitored + * @cons_bitmask: bit mask of consumer pipes to be monitored per producer + */ +struct ipahal_stats_init_tethering { + u32 prod_bitmask; + u32 cons_bitmask[IPAHAL_MAX_PIPES]; +}; + +/* + * struct ipahal_stats_get_offset_tethering - Get offset parameters for + * tethering + * @init: initialization parameters used in initialization of stats + */ +struct ipahal_stats_get_offset_tethering { + struct ipahal_stats_init_tethering init; +}; + +/* + * struct ipahal_stats_tethering - Tethering statistics + * @num_ipv4_bytes: IPv4 bytes + * @num_ipv6_bytes: IPv6 bytes + * @num_ipv4_pkts: IPv4 packets + * @num_ipv6_pkts: IPv6 packets + */ +struct ipahal_stats_tethering { + u64 num_ipv4_bytes; + u64 num_ipv6_bytes; + u64 num_ipv4_pkts; + u64 num_ipv6_pkts; +}; + +/* + * struct ipahal_stats_tethering_all - Tethering statistics for all pipes + * @stats: matrix of statistics per pair of pipes + */ +struct ipahal_stats_tethering_all { + struct ipahal_stats_tethering + stats[IPAHAL_MAX_PIPES][IPAHAL_MAX_PIPES]; +}; + +/* + * struct ipahal_stats_init_flt_rt - Initializations parameters for flt_rt + * @rule_id_bitmask: array describes which rule ids to monitor. + * rule_id bit is determined by: + * index to the array => rule_id / 32 + * bit to enable => rule_id % 32 + */ +struct ipahal_stats_init_flt_rt { + u32 rule_id_bitmask[IPAHAL_MAX_RULE_ID_32]; +}; + +/* + * struct ipahal_stats_get_offset_flt_rt - Get offset parameters for flt_rt + * @init: initialization parameters used in initialization of stats + * @rule_id: rule_id to get the offset for + */ +struct ipahal_stats_get_offset_flt_rt { + struct ipahal_stats_init_flt_rt init; + u32 rule_id; +}; + +/* + * struct ipahal_stats_flt_rt - flt_rt statistics + * @num_packets: Total number of packets hit this rule + * @num_packets_hash: Total number of packets hit this rule in hash table + */ +struct ipahal_stats_flt_rt { + u32 num_packets; + u32 num_packets_hash; +}; + +/* + * struct ipahal_stats_init_drop - Initializations parameters for Drop + * @enabled_bitmask: bit mask of pipes to be monitored + */ +struct ipahal_stats_init_drop { + u32 enabled_bitmask; +}; + +/* + * struct ipahal_stats_get_offset_drop - Get offset parameters for Drop + * @init: initialization parameters used in initialization of stats + */ +struct ipahal_stats_get_offset_drop { + struct ipahal_stats_init_drop init; +}; + +/* + * struct ipahal_stats_drop - Packet Drop statistics + * @drop_packet_cnt: number of packets dropped + * @drop_byte_cnt: number of bytes dropped + */ +struct ipahal_stats_drop { + u32 drop_packet_cnt; + u32 drop_byte_cnt; +}; + +/* + * struct ipahal_stats_drop_all - Drop statistics for all pipes + * @stats: array of statistics per pipes + */ +struct ipahal_stats_drop_all { + struct ipahal_stats_drop stats[IPAHAL_MAX_PIPES]; +}; + +/* + * ipahal_stats_generate_init_pyld - Generate the init payload for stats + * @type: type of stats + * @params: init_pyld parameters based of stats type + * @is_atomic_ctx: is calling context atomic ? + * + * This function will generate the initialization payload for a particular + * statistic in hardware. IPA driver is expected to use this payload to + * initialize the SRAM. + * + * Return: pointer to ipahal_stats_init_pyld on success or NULL on failure. + */ +struct ipahal_stats_init_pyld *ipahal_stats_generate_init_pyld( + enum ipahal_hw_stats_type type, void *params, bool is_atomic_ctx); + +/* + * ipahal_destroy_stats_init_pyld() - Destroy/Release bulk that was built + * by the ipahal_stats_generate_init_pyld function. + */ +static inline void ipahal_destroy_stats_init_pyld( + struct ipahal_stats_init_pyld *pyld) +{ + kfree(pyld); +} + +/* + * ipahal_stats_get_offset - Get the offset / size of payload for stats + * @type: type of stats + * @params: get_offset parameters based of stats type + * @out: out parameter for the offset and size. + * + * This function will return the offset of the counter from beginning of + * the table.IPA driver is expected to read this portion in SRAM and pass + * it to ipahal_parse_stats() to interprete the stats. + * + * Return: 0 on success and negative on failure + */ +int ipahal_stats_get_offset(enum ipahal_hw_stats_type type, void *params, + struct ipahal_stats_offset *out); + +/* + * ipahal_parse_stats - parse statistics + * @type: type of stats + * @init_params: init_pyld parameters used on init + * @raw_stats: stats read from IPA SRAM + * @parsed_stats: pointer to parsed stats based on type + * + * Return: 0 on success and negative on failure + */ +int ipahal_parse_stats(enum ipahal_hw_stats_type type, void *init_params, + void *raw_stats, void *parsed_stats); + + +#endif /* _IPAHAL_HW_STATS_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h new file mode 100644 index 000000000000..3bb761da5a66 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h @@ -0,0 +1,55 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPAHAL_HW_STATS_I_H_ +#define _IPAHAL_HW_STATS_I_H_ + +#include "ipahal_hw_stats.h" + +int ipahal_hw_stats_init(enum ipa_hw_type ipa_hw_type); + +struct ipahal_stats_quota_hw { + u64 num_ipv4_bytes; + u64 num_ipv4_pkts:32; + u64 num_ipv6_pkts:32; + u64 num_ipv6_bytes; +}; + +struct ipahal_stats_tethering_hdr_hw { + u64 dst_mask:32; + u64 offset:32; +}; + +struct ipahal_stats_tethering_hw { + u64 num_ipv4_bytes; + u64 num_ipv4_pkts:32; + u64 num_ipv6_pkts:32; + u64 num_ipv6_bytes; +}; + +struct ipahal_stats_flt_rt_hdr_hw { + u64 en_mask:32; + u64 reserved:16; + u64 cnt_offset:16; +}; + +struct ipahal_stats_flt_rt_hw { + u64 num_packets_hash:32; + u64 num_packets:32; +}; + +struct ipahal_stats_drop_hw { + u64 drop_byte_cnt:40; + u64 drop_packet_cnt:24; +}; + +#endif /* _IPAHAL_HW_STATS_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h new file mode 100644 index 000000000000..5eb1aef1fb19 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h @@ -0,0 +1,679 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPAHAL_I_H_ +#define _IPAHAL_I_H_ + +#include +#include "../../ipa_common_i.h" + +#define IPAHAL_DRV_NAME "ipahal" + +#define IPAHAL_DBG(fmt, args...) \ + do { \ + pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \ + ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAHAL_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \ + ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAHAL_ERR(fmt, args...) \ + do { \ + pr_err(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \ + ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \ + (kzalloc((__size), ((__is_atomic_ctx) ? GFP_ATOMIC : GFP_KERNEL))) + +/* + * struct ipahal_context - HAL global context data + * @hw_type: IPA H/W type/version. + * @base: Base address to be used for accessing IPA memory. This is + * I/O memory mapped address. + * Controlled by debugfs. default is off + * @dent: Debugfs folder dir entry + * @ipa_pdev: IPA Platform Device. Will be used for DMA memory + * @empty_fltrt_tbl: Empty table to be used at tables init. + */ +struct ipahal_context { + enum ipa_hw_type hw_type; + void __iomem *base; + struct dentry *dent; + struct device *ipa_pdev; + struct ipa_mem_buffer empty_fltrt_tbl; +}; + +extern struct ipahal_context *ipahal_ctx; + + + +/* Immediate commands H/W structures */ + +/* + * struct ipa_imm_cmd_hw_ip_v4_filter_init - IP_V4_FILTER_INIT command payload + * in H/W format. + * Inits IPv4 filter block. + * @hash_rules_addr: Addr in system mem where ipv4 hashable flt rules starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should + * be copied to + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should + * be copied to + * @rsvd: reserved + * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts + */ +struct ipa_imm_cmd_hw_ip_v4_filter_init { + u64 hash_rules_addr:64; + u64 hash_rules_size:12; + u64 hash_local_addr:16; + u64 nhash_rules_size:12; + u64 nhash_local_addr:16; + u64 rsvd:8; + u64 nhash_rules_addr:64; +}; + +/* + * struct ipa_imm_cmd_hw_ip_v6_filter_init - IP_V6_FILTER_INIT command payload + * in H/W format. + * Inits IPv6 filter block. + * @hash_rules_addr: Addr in system mem where ipv6 hashable flt rules starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should + * be copied to + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should + * be copied to + * @rsvd: reserved + * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts + */ +struct ipa_imm_cmd_hw_ip_v6_filter_init { + u64 hash_rules_addr:64; + u64 hash_rules_size:12; + u64 hash_local_addr:16; + u64 nhash_rules_size:12; + u64 nhash_local_addr:16; + u64 rsvd:8; + u64 nhash_rules_addr:64; +}; + +/* + * struct ipa_imm_cmd_hw_ip_v4_nat_init - IP_V4_NAT_INIT command payload + * in H/W format. + * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location + * cache address abd itger related parameters. + * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start + * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT + * table starts. IPv4 NAT rules that result in NAT collision are located + * in this table. + * @index_table_addr: Addr in sys/shared mem where index table, which points + * to NAT table starts + * @index_table_expansion_addr: Addr in sys/shared mem where expansion index + * table starts + * @table_index: For future support of multiple NAT tables + * @rsvd1: reserved + * @ipv4_rules_addr_type: ipv4_rules_addr in sys or shared mem + * @ipv4_expansion_rules_addr_type: ipv4_expansion_rules_addr in + * sys or shared mem + * @index_table_addr_type: index_table_addr in sys or shared mem + * @index_table_expansion_addr_type: index_table_expansion_addr in + * sys or shared mem + * @size_base_tables: Num of entries in NAT tbl and idx tbl (each) + * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion + * idx tbl (each) + * @rsvd2: reserved + * @public_ip_addr: public IP address. for IPAv4 this is the PDN config table + * offset in SMEM + */ +struct ipa_imm_cmd_hw_ip_v4_nat_init { + u64 ipv4_rules_addr:64; + u64 ipv4_expansion_rules_addr:64; + u64 index_table_addr:64; + u64 index_table_expansion_addr:64; + u64 table_index:3; + u64 rsvd1:1; + u64 ipv4_rules_addr_type:1; + u64 ipv4_expansion_rules_addr_type:1; + u64 index_table_addr_type:1; + u64 index_table_expansion_addr_type:1; + u64 size_base_tables:12; + u64 size_expansion_tables:10; + u64 rsvd2:2; + u64 public_ip_addr:32; +}; + +/* + * struct ipa_imm_cmd_hw_ip_v4_routing_init - IP_V4_ROUTING_INIT command payload + * in H/W format. + * Inits IPv4 routing table/structure - with the rules and other related params + * @hash_rules_addr: Addr in system mem where ipv4 hashable rt rules starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should + * be copied to + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should + * be copied to + * @rsvd: reserved + * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts + */ +struct ipa_imm_cmd_hw_ip_v4_routing_init { + u64 hash_rules_addr:64; + u64 hash_rules_size:12; + u64 hash_local_addr:16; + u64 nhash_rules_size:12; + u64 nhash_local_addr:16; + u64 rsvd:8; + u64 nhash_rules_addr:64; +}; + +/* + * struct ipa_imm_cmd_hw_ip_v6_routing_init - IP_V6_ROUTING_INIT command payload + * in H/W format. + * Inits IPv6 routing table/structure - with the rules and other related params + * @hash_rules_addr: Addr in system mem where ipv6 hashable rt rules starts + * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem + * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should + * be copied to + * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem + * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should + * be copied to + * @rsvd: reserved + * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts + */ +struct ipa_imm_cmd_hw_ip_v6_routing_init { + u64 hash_rules_addr:64; + u64 hash_rules_size:12; + u64 hash_local_addr:16; + u64 nhash_rules_size:12; + u64 nhash_local_addr:16; + u64 rsvd:8; + u64 nhash_rules_addr:64; +}; + +/* + * struct ipa_imm_cmd_hw_hdr_init_local - HDR_INIT_LOCAL command payload + * in H/W format. + * Inits hdr table within local mem with the hdrs and their length. + * @hdr_table_addr: Word address in sys mem where the table starts (SRC) + * @size_hdr_table: Size of the above (in bytes) + * @hdr_addr: header address in IPA sram (used as DST for memory copy) + * @rsvd: reserved + */ +struct ipa_imm_cmd_hw_hdr_init_local { + u64 hdr_table_addr:64; + u64 size_hdr_table:12; + u64 hdr_addr:16; + u64 rsvd:4; +}; + +/* + * struct ipa_imm_cmd_hw_nat_dma - NAT_DMA command payload + * in H/W format + * Perform DMA operation on NAT related mem addressess. Copy data into + * different locations within NAT associated tbls. (For add/remove NAT rules) + * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op. + * @rsvd1: reserved + * @base_addr: Base addr to which the DMA operation should be performed. + * @rsvd2: reserved + * @offset: offset in bytes from base addr to write 'data' to + * @data: data to be written + * @rsvd3: reserved + */ +struct ipa_imm_cmd_hw_nat_dma { + u64 table_index:3; + u64 rsvd1:1; + u64 base_addr:2; + u64 rsvd2:2; + u64 offset:32; + u64 data:16; + u64 rsvd3:8; +}; + +/* + * struct ipa_imm_cmd_hw_table_dma_ipav4 - TABLE_DMA command payload + * in H/W format + * Perform DMA operation on NAT and ipv6 connection tracking related mem + * addresses. Copy data into different locations within NAT associated tbls + * (For add/remove NAT rules) + * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op. + * @rsvd1: reserved + * @base_addr: Base addr to which the DMA operation should be performed. + * @rsvd2: reserved + * @offset: offset in bytes from base addr to write 'data' to + * @data: data to be written + * @rsvd3: reserved + */ +struct ipa_imm_cmd_hw_table_dma_ipav4 { + u64 table_index : 3; + u64 rsvd1 : 1; + u64 base_addr : 3; + u64 rsvd2 : 1; + u64 offset : 32; + u64 data : 16; + u64 rsvd3 : 8; +}; + +/* + * struct ipa_imm_cmd_hw_hdr_init_system - HDR_INIT_SYSTEM command payload + * in H/W format. + * Inits hdr table within sys mem with the hdrs and their length. + * @hdr_table_addr: Word address in system memory where the hdrs tbl starts. + */ +struct ipa_imm_cmd_hw_hdr_init_system { + u64 hdr_table_addr:64; +}; + +/* + * struct ipa_imm_cmd_hw_ip_packet_init - IP_PACKET_INIT command payload + * in H/W format. + * Configuration for specific IP pkt. Shall be called prior to an IP pkt + * data. Pkt will not go through IP pkt processing. + * @destination_pipe_index: Destination pipe index (in case routing + * is enabled, this field will overwrite the rt rule) + * @rsvd: reserved + */ +struct ipa_imm_cmd_hw_ip_packet_init { + u64 destination_pipe_index:5; + u64 rsv1:59; +}; + +/* + * struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload + * in H/W format. + * Write value to register. Allows reg changes to be synced with data packet + * and other immediate command. Can be used to access the sram + * @sw_rsvd: Ignored by H/W. May be used by S/W + * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait + * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr + * @value: value to write to register + * @value_mask: mask specifying which value bits to write to the register + * @pipeline_clear_options: options for pipeline to clear + * 0: HPS - no pkt inside HPS (not grp specific) + * 1: source group - The immediate cmd src grp does not use any pkt ctxs + * 2: Wait until no pkt reside inside IPA pipeline + * 3: reserved + * @rsvd: reserved - should be set to zero + */ +struct ipa_imm_cmd_hw_register_write { + u64 sw_rsvd:15; + u64 skip_pipeline_clear:1; + u64 offset:16; + u64 value:32; + u64 value_mask:32; + u64 pipeline_clear_options:2; + u64 rsvd:30; +}; + +/* + * struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload + * in H/W format. + * Write value to register. Allows reg changes to be synced with data packet + * and other immediate command. Can be used to access the sram + * @sw_rsvd: Ignored by H/W. May be used by S/W + * @offset_high: high bits of the Offset field - bits 17-20 + * @rsvd: reserved - should be set to zero + * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr + * @value: value to write to register + * @value_mask: mask specifying which value bits to write to the register + * @rsvd2: reserved - should be set to zero + */ +struct ipa_imm_cmd_hw_register_write_v_4_0 { + u64 sw_rsvd:11; + u64 offset_high:4; + u64 rsvd:1; + u64 offset:16; + u64 value:32; + u64 value_mask:32; + u64 rsvd2:32; +}; + +/* + * struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload + * in H/W format. + * Perform mem copy into or out of the SW area of IPA local mem + * @sw_rsvd: Ignored by H/W. My be used by S/W + * @size: Size in bytes of data to copy. Expected size is up to 2K bytes + * @local_addr: Address in IPA local memory + * @direction: Read or write? + * 0: IPA write, Write to local address from system address + * 1: IPA read, Read from local address to system address + * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait + * @pipeline_clear_options: options for pipeline to clear + * 0: HPS - no pkt inside HPS (not grp specific) + * 1: source group - The immediate cmd src grp does npt use any pkt ctxs + * 2: Wait until no pkt reside inside IPA pipeline + * 3: reserved + * @rsvd: reserved - should be set to zero + * @system_addr: Address in system memory + */ +struct ipa_imm_cmd_hw_dma_shared_mem { + u64 sw_rsvd:16; + u64 size:16; + u64 local_addr:16; + u64 direction:1; + u64 skip_pipeline_clear:1; + u64 pipeline_clear_options:2; + u64 rsvd:12; + u64 system_addr:64; +}; + +/* + * struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload + * in H/W format. + * Perform mem copy into or out of the SW area of IPA local mem + * @sw_rsvd: Ignored by H/W. My be used by S/W + * @size: Size in bytes of data to copy. Expected size is up to 2K bytes + * @clear_after_read: Clear local memory at the end of a read operation allows + * atomic read and clear if HPS is clear. Ignore for writes. + * @local_addr: Address in IPA local memory + * @direction: Read or write? + * 0: IPA write, Write to local address from system address + * 1: IPA read, Read from local address to system address + * @rsvd: reserved - should be set to zero + * @system_addr: Address in system memory + */ +struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 { + u64 sw_rsvd:15; + u64 clear_after_read:1; + u64 size:16; + u64 local_addr:16; + u64 direction:1; + u64 rsvd:15; + u64 system_addr:64; +}; + +/* + * struct ipa_imm_cmd_hw_ip_packet_tag_status - + * IP_PACKET_TAG_STATUS command payload in H/W format. + * This cmd is used for to allow SW to track HW processing by setting a TAG + * value that is passed back to SW inside Packet Status information. + * TAG info will be provided as part of Packet Status info generated for + * the next pkt transferred over the pipe. + * This immediate command must be followed by a packet in the same transfer. + * @sw_rsvd: Ignored by H/W. My be used by S/W + * @tag: Tag that is provided back to SW + */ +struct ipa_imm_cmd_hw_ip_packet_tag_status { + u64 sw_rsvd:16; + u64 tag:48; +}; + +/* + * struct ipa_imm_cmd_hw_dma_task_32b_addr - + * IPA_DMA_TASK_32B_ADDR command payload in H/W format. + * Used by clients using 32bit addresses. Used to perform DMA operation on + * multiple descriptors. + * The Opcode is dynamic, where it holds the number of buffer to process + * @sw_rsvd: Ignored by H/W. My be used by S/W + * @cmplt: Complete flag: When asserted IPA will interrupt SW when the entire + * DMA related data was completely xfered to its destination. + * @eof: Enf Of Frame flag: When asserted IPA will assert the EOT to the + * dest client. This is used used for aggr sequence + * @flsh: Flush flag: When asserted, pkt will go through the IPA blocks but + * will not be xfered to dest client but rather will be discarded + * @lock: Lock pipe flag: When asserted, IPA will stop processing descriptors + * from other EPs in the same src grp (RX queue) + * @unlock: Unlock pipe flag: When asserted, IPA will stop exclusively + * servicing current EP out of the src EPs of the grp (RX queue) + * @size1: Size of buffer1 data + * @addr1: Pointer to buffer1 data + * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs, + * only the first one needs to have this field set. It will be ignored + * in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK + * must contain this field (2 or more buffers) or EOT. + */ +struct ipa_imm_cmd_hw_dma_task_32b_addr { + u64 sw_rsvd:11; + u64 cmplt:1; + u64 eof:1; + u64 flsh:1; + u64 lock:1; + u64 unlock:1; + u64 size1:16; + u64 addr1:32; + u64 packet_size:16; +}; + + + +/* IPA Status packet H/W structures and info */ + +/* + * struct ipa_status_pkt_hw - IPA status packet payload in H/W format. + * This structure describes the status packet H/W structure for the + * following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET, + * IPA_STATUS_SUSPENDED_PACKET. + * Other statuses types has different status packet structure. + * @status_opcode: The Type of the status (Opcode). + * @exception: (not bitmask) - the first exception that took place. + * In case of exception, src endp and pkt len are always valid. + * @status_mask: Bit mask specifying on which H/W blocks the pkt was processed. + * @pkt_len: Pkt pyld len including hdr, include retained hdr if used. Does + * not include padding or checksum trailer len. + * @endp_src_idx: Source end point index. + * @rsvd1: reserved + * @endp_dest_idx: Destination end point index. + * Not valid in case of exception + * @rsvd2: reserved + * @metadata: meta data value used by packet + * @flt_local: Filter table location flag: Does matching flt rule belongs to + * flt tbl that resides in lcl memory? (if not, then system mem) + * @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl? + * @flt_global: Global filter rule flag: Does matching flt rule belongs to + * the global flt tbl? (if not, then the per endp tables) + * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule + * specifies to retain header? + * @flt_rule_id: The ID of the matching filter rule. This info can be combined + * with endp_src_idx to locate the exact rule. ID=0x3FF reserved to specify + * flt miss. In case of miss, all flt info to be ignored + * @rt_local: Route table location flag: Does matching rt rule belongs to + * rt tbl that resides in lcl memory? (if not, then system mem) + * @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl? + * @ucp: UC Processing flag. + * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match + * @rt_rule_id: The ID of the matching rt rule. This info can be combined + * with rt_tbl_idx to locate the exact rule. ID=0x3FF reserved to specify + * rt miss. In case of miss, all rt info to be ignored + * @nat_hit: NAT hit flag: Was their NAT hit? + * @nat_entry_idx: Index of the NAT entry used of NAT processing + * @nat_type: Defines the type of the NAT operation: + * 00: No NAT + * 01: Source NAT + * 10: Destination NAT + * 11: Reserved + * @tag_info: S/W defined value provided via immediate command + * @seq_num: Per source endp unique packet sequence number + * @time_of_day_ctr: running counter from IPA clock + * @hdr_local: Header table location flag: In header insertion, was the header + * taken from the table resides in local memory? (If no, then system mem) + * @hdr_offset: Offset of used header in the header table + * @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table? + * @frag_rule: Frag rule index in H/W frag table in case of frag hit + * @hw_specific: H/W specific reserved value + */ +struct ipa_pkt_status_hw { + u64 status_opcode:8; + u64 exception:8; + u64 status_mask:16; + u64 pkt_len:16; + u64 endp_src_idx:5; + u64 rsvd1:3; + u64 endp_dest_idx:5; + u64 rsvd2:3; + u64 metadata:32; + u64 flt_local:1; + u64 flt_hash:1; + u64 flt_global:1; + u64 flt_ret_hdr:1; + u64 flt_rule_id:10; + u64 rt_local:1; + u64 rt_hash:1; + u64 ucp:1; + u64 rt_tbl_idx:5; + u64 rt_rule_id:10; + u64 nat_hit:1; + u64 nat_entry_idx:13; + u64 nat_type:2; + u64 tag_info:48; + u64 seq_num:8; + u64 time_of_day_ctr:24; + u64 hdr_local:1; + u64 hdr_offset:10; + u64 frag_hit:1; + u64 frag_rule:4; + u64 hw_specific:16; +}; + +/* Size of H/W Packet Status */ +#define IPA3_0_PKT_STATUS_SIZE 32 + +/* Headers and processing context H/W structures and definitions */ + +/* uCP command numbers */ +#define IPA_HDR_UCP_802_3_TO_802_3 6 +#define IPA_HDR_UCP_802_3_TO_ETHII 7 +#define IPA_HDR_UCP_ETHII_TO_802_3 8 +#define IPA_HDR_UCP_ETHII_TO_ETHII 9 +#define IPA_HDR_UCP_L2TP_HEADER_ADD 10 +#define IPA_HDR_UCP_L2TP_HEADER_REMOVE 11 + +/* Processing context TLV type */ +#define IPA_PROC_CTX_TLV_TYPE_END 0 +#define IPA_PROC_CTX_TLV_TYPE_HDR_ADD 1 +#define IPA_PROC_CTX_TLV_TYPE_PROC_CMD 3 + +/** + * struct ipa_hw_hdr_proc_ctx_tlv - + * HW structure of IPA processing context header - TLV part + * @type: 0 - end type + * 1 - header addition type + * 3 - processing command type + * @length: number of bytes after tlv + * for type: + * 0 - needs to be 0 + * 1 - header addition length + * 3 - number of 32B including type and length. + * @value: specific value for type + * for type: + * 0 - needs to be 0 + * 1 - header length + * 3 - command ID (see IPA_HDR_UCP_* definitions) + */ +struct ipa_hw_hdr_proc_ctx_tlv { + u32 type:8; + u32 length:8; + u32 value:16; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_hdr_add - + * HW structure of IPA processing context - add header tlv + * @tlv: IPA processing context TLV + * @hdr_addr: processing context header address + */ +struct ipa_hw_hdr_proc_ctx_hdr_add { + struct ipa_hw_hdr_proc_ctx_tlv tlv; + u32 hdr_addr; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr - + * HW structure of IPA processing context - add l2tp header tlv + * @tlv: IPA processing context TLV + * @l2tp_params: l2tp parameters + */ +struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr { + struct ipa_hw_hdr_proc_ctx_tlv tlv; + struct ipa_l2tp_header_add_procparams l2tp_params; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr - + * HW structure of IPA processing context - remove l2tp header tlv + * @tlv: IPA processing context TLV + * @l2tp_params: l2tp parameters + */ +struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr { + struct ipa_hw_hdr_proc_ctx_tlv tlv; + struct ipa_l2tp_header_remove_procparams l2tp_params; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_add_hdr_seq - + * IPA processing context header - add header sequence + * @hdr_add: add header command + * @end: tlv end command (cmd.type must be 0) + */ +struct ipa_hw_hdr_proc_ctx_add_hdr_seq { + struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add; + struct ipa_hw_hdr_proc_ctx_tlv end; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq - + * IPA processing context header - process command sequence + * @hdr_add: add header command + * @cmd: tlv processing command (cmd.type must be 3) + * @end: tlv end command (cmd.type must be 0) + */ +struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq { + struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add; + struct ipa_hw_hdr_proc_ctx_tlv cmd; + struct ipa_hw_hdr_proc_ctx_tlv end; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq - + * IPA processing context header - process command sequence + * @hdr_add: add header command + * @l2tp_params: l2tp params for header addition + * @end: tlv end command (cmd.type must be 0) + */ +struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq { + struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add; + struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr l2tp_params; + struct ipa_hw_hdr_proc_ctx_tlv end; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq - + * IPA processing context header - process command sequence + * @hdr_add: add header command + * @l2tp_params: l2tp params for header removal + * @end: tlv end command (cmd.type must be 0) + */ +struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq { + struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add; + struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr l2tp_params; + struct ipa_hw_hdr_proc_ctx_tlv end; +}; + +/* IPA HW DPS/HPS image memory sizes */ +#define IPA_HW_DPS_IMG_MEM_SIZE_V3_0 128 +#define IPA_HW_HPS_IMG_MEM_SIZE_V3_0 320 + +#endif /* _IPAHAL_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c new file mode 100644 index 000000000000..6aa6082798d9 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c @@ -0,0 +1,1966 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include "ipahal_i.h" +#include "ipahal_reg.h" +#include "ipahal_reg_i.h" + +static const char *ipareg_name_to_str[IPA_REG_MAX] = { + __stringify(IPA_ROUTE), + __stringify(IPA_IRQ_STTS_EE_n), + __stringify(IPA_IRQ_EN_EE_n), + __stringify(IPA_IRQ_CLR_EE_n), + __stringify(IPA_IRQ_SUSPEND_INFO_EE_n), + __stringify(IPA_SUSPEND_IRQ_EN_EE_n), + __stringify(IPA_SUSPEND_IRQ_CLR_EE_n), + __stringify(IPA_BCR), + __stringify(IPA_ENABLED_PIPES), + __stringify(IPA_COMP_SW_RESET), + __stringify(IPA_VERSION), + __stringify(IPA_TAG_TIMER), + __stringify(IPA_COMP_HW_VERSION), + __stringify(IPA_SPARE_REG_1), + __stringify(IPA_SPARE_REG_2), + __stringify(IPA_COMP_CFG), + __stringify(IPA_STATE_AGGR_ACTIVE), + __stringify(IPA_ENDP_INIT_HDR_n), + __stringify(IPA_ENDP_INIT_HDR_EXT_n), + __stringify(IPA_ENDP_INIT_AGGR_n), + __stringify(IPA_AGGR_FORCE_CLOSE), + __stringify(IPA_ENDP_INIT_ROUTE_n), + __stringify(IPA_ENDP_INIT_MODE_n), + __stringify(IPA_ENDP_INIT_NAT_n), + __stringify(IPA_ENDP_INIT_CONN_TRACK_n), + __stringify(IPA_ENDP_INIT_CTRL_n), + __stringify(IPA_ENDP_INIT_CTRL_SCND_n), + __stringify(IPA_ENDP_INIT_HOL_BLOCK_EN_n), + __stringify(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n), + __stringify(IPA_ENDP_INIT_DEAGGR_n), + __stringify(IPA_ENDP_INIT_SEQ_n), + __stringify(IPA_DEBUG_CNT_REG_n), + __stringify(IPA_ENDP_INIT_CFG_n), + __stringify(IPA_IRQ_EE_UC_n), + __stringify(IPA_ENDP_INIT_HDR_METADATA_MASK_n), + __stringify(IPA_ENDP_INIT_HDR_METADATA_n), + __stringify(IPA_ENDP_INIT_RSRC_GRP_n), + __stringify(IPA_SHARED_MEM_SIZE), + __stringify(IPA_SRAM_DIRECT_ACCESS_n), + __stringify(IPA_DEBUG_CNT_CTRL_n), + __stringify(IPA_UC_MAILBOX_m_n), + __stringify(IPA_FILT_ROUT_HASH_FLUSH), + __stringify(IPA_SINGLE_NDP_MODE), + __stringify(IPA_QCNCM), + __stringify(IPA_SYS_PKT_PROC_CNTXT_BASE), + __stringify(IPA_LOCAL_PKT_PROC_CNTXT_BASE), + __stringify(IPA_ENDP_STATUS_n), + __stringify(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n), + __stringify(IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n), + __stringify(IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n), + __stringify(IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n), + __stringify(IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n), + __stringify(IPA_DST_RSRC_GRP_01_RSRC_TYPE_n), + __stringify(IPA_DST_RSRC_GRP_23_RSRC_TYPE_n), + __stringify(IPA_DST_RSRC_GRP_45_RSRC_TYPE_n), + __stringify(IPA_DST_RSRC_GRP_67_RSRC_TYPE_n), + __stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_0), + __stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_1), + __stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_0), + __stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_1), + __stringify(IPA_HPS_FTCH_ARB_QUEUE_WEIGHT), + __stringify(IPA_QSB_MAX_WRITES), + __stringify(IPA_QSB_MAX_READS), + __stringify(IPA_TX_CFG), + __stringify(IPA_IDLE_INDICATION_CFG), + __stringify(IPA_DPS_SEQUENCER_FIRST), + __stringify(IPA_HPS_SEQUENCER_FIRST), + __stringify(IPA_CLKON_CFG), + __stringify(IPA_STAT_QUOTA_BASE_n), + __stringify(IPA_STAT_QUOTA_MASK_n), + __stringify(IPA_STAT_TETHERING_BASE_n), + __stringify(IPA_STAT_TETHERING_MASK_n), + __stringify(IPA_STAT_FILTER_IPV4_BASE), + __stringify(IPA_STAT_FILTER_IPV6_BASE), + __stringify(IPA_STAT_ROUTER_IPV4_BASE), + __stringify(IPA_STAT_ROUTER_IPV6_BASE), + __stringify(IPA_STAT_FILTER_IPV4_START_ID), + __stringify(IPA_STAT_FILTER_IPV6_START_ID), + __stringify(IPA_STAT_ROUTER_IPV4_START_ID), + __stringify(IPA_STAT_ROUTER_IPV6_START_ID), + __stringify(IPA_STAT_FILTER_IPV4_END_ID), + __stringify(IPA_STAT_FILTER_IPV6_END_ID), + __stringify(IPA_STAT_ROUTER_IPV4_END_ID), + __stringify(IPA_STAT_ROUTER_IPV6_END_ID), + __stringify(IPA_STAT_DROP_CNT_BASE_n), + __stringify(IPA_STAT_DROP_CNT_MASK_n), +}; + +static void ipareg_construct_dummy(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + IPAHAL_ERR("No construct function for %s\n", + ipahal_reg_name_str(reg)); + WARN(1, "invalid register operation"); +} + +static void ipareg_parse_dummy(enum ipahal_reg_name reg, + void *fields, u32 val) +{ + IPAHAL_ERR("No parse function for %s\n", + ipahal_reg_name_str(reg)); + WARN(1, "invalid register operation"); +} + +static void ipareg_construct_rx_hps_clients_depth1( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_rx_hps_clients *clients = + (struct ipahal_reg_rx_hps_clients *)fields; + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0)); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1)); +} + +static void ipareg_construct_rx_hps_clients_depth0( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_rx_hps_clients *clients = + (struct ipahal_reg_rx_hps_clients *)fields; + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0)); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1)); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(2)); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(3)); +} + +static void ipareg_construct_rx_hps_clients_depth0_v3_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_rx_hps_clients *clients = + (struct ipahal_reg_rx_hps_clients *)fields; + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(0)); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(1)); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(2)); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3), + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(3)); +} + +static void ipareg_construct_rsrg_grp_xy( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_rsrc_grp_cfg *grp = + (struct ipahal_reg_rsrc_grp_cfg *)fields; + + IPA_SETFIELD_IN_REG(*val, grp->x_min, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK); + IPA_SETFIELD_IN_REG(*val, grp->x_max, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK); + IPA_SETFIELD_IN_REG(*val, grp->y_min, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK); + IPA_SETFIELD_IN_REG(*val, grp->y_max, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK); +} + +static void ipareg_construct_rsrg_grp_xy_v3_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_rsrc_grp_cfg *grp = + (struct ipahal_reg_rsrc_grp_cfg *)fields; + + IPA_SETFIELD_IN_REG(*val, grp->x_min, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5); + IPA_SETFIELD_IN_REG(*val, grp->x_max, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5); + + /* DST_23 register has only X fields at ipa V3_5 */ + if (reg == IPA_DST_RSRC_GRP_23_RSRC_TYPE_n) + return; + + IPA_SETFIELD_IN_REG(*val, grp->y_min, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5); + IPA_SETFIELD_IN_REG(*val, grp->y_max, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5); +} + +static void ipareg_construct_hash_cfg_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_fltrt_hash_tuple *tuple = + (struct ipahal_reg_fltrt_hash_tuple *)fields; + + IPA_SETFIELD_IN_REG(*val, tuple->flt.src_id, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->flt.src_ip_addr, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_ip_addr, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->flt.src_port, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_port, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->flt.protocol, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->flt.meta_data, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->undefined1, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->rt.src_id, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->rt.src_ip_addr, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_ip_addr, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->rt.src_port, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_port, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->rt.protocol, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->rt.meta_data, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK); + IPA_SETFIELD_IN_REG(*val, tuple->undefined2, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK); +} + +static void ipareg_parse_hash_cfg_n( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_fltrt_hash_tuple *tuple = + (struct ipahal_reg_fltrt_hash_tuple *)fields; + + tuple->flt.src_id = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK); + tuple->flt.src_ip_addr = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK); + tuple->flt.dst_ip_addr = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK); + tuple->flt.src_port = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK); + tuple->flt.dst_port = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK); + tuple->flt.protocol = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK); + tuple->flt.meta_data = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK); + tuple->undefined1 = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK); + tuple->rt.src_id = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK); + tuple->rt.src_ip_addr = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK); + tuple->rt.dst_ip_addr = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK); + tuple->rt.src_port = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK); + tuple->rt.dst_port = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK); + tuple->rt.protocol = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK); + tuple->rt.meta_data = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK); + tuple->undefined2 = + IPA_GETFIELD_FROM_REG(val, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK); +} + +static void ipareg_construct_endp_status_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_ep_cfg_status *ep_status = + (struct ipahal_reg_ep_cfg_status *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_status->status_en, + IPA_ENDP_STATUS_n_STATUS_EN_SHFT, + IPA_ENDP_STATUS_n_STATUS_EN_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_status->status_ep, + IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT, + IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_status->status_location, + IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT, + IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK); +} + +static void ipareg_construct_endp_status_n_v4_0( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_ep_cfg_status *ep_status = + (struct ipahal_reg_ep_cfg_status *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_status->status_en, + IPA_ENDP_STATUS_n_STATUS_EN_SHFT, + IPA_ENDP_STATUS_n_STATUS_EN_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_status->status_ep, + IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT, + IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_status->status_location, + IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT, + IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_status->status_pkt_suppress, + IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_SHFT, + IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_BMSK); +} + +static void ipareg_construct_qcncm( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_qcncm *qcncm = + (struct ipahal_reg_qcncm *)fields; + + IPA_SETFIELD_IN_REG(*val, qcncm->mode_en ? 1 : 0, + IPA_QCNCM_MODE_EN_SHFT, + IPA_QCNCM_MODE_EN_BMSK); + IPA_SETFIELD_IN_REG(*val, qcncm->mode_val, + IPA_QCNCM_MODE_VAL_SHFT, + IPA_QCNCM_MODE_VAL_BMSK); + IPA_SETFIELD_IN_REG(*val, qcncm->undefined, + 0, IPA_QCNCM_MODE_VAL_BMSK); +} + +static void ipareg_parse_qcncm( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_qcncm *qcncm = + (struct ipahal_reg_qcncm *)fields; + + memset(qcncm, 0, sizeof(struct ipahal_reg_qcncm)); + qcncm->mode_en = IPA_GETFIELD_FROM_REG(val, + IPA_QCNCM_MODE_EN_SHFT, + IPA_QCNCM_MODE_EN_BMSK); + qcncm->mode_val = IPA_GETFIELD_FROM_REG(val, + IPA_QCNCM_MODE_VAL_SHFT, + IPA_QCNCM_MODE_VAL_BMSK); + qcncm->undefined = IPA_GETFIELD_FROM_REG(val, + 0, IPA_QCNCM_UNDEFINED1_BMSK); + qcncm->undefined |= IPA_GETFIELD_FROM_REG(val, + 0, IPA_QCNCM_MODE_UNDEFINED2_BMSK); +} + +static void ipareg_construct_single_ndp_mode( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_single_ndp_mode *mode = + (struct ipahal_reg_single_ndp_mode *)fields; + + IPA_SETFIELD_IN_REG(*val, mode->single_ndp_en ? 1 : 0, + IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT, + IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK); + + IPA_SETFIELD_IN_REG(*val, mode->undefined, + IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT, + IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK); +} + +static void ipareg_parse_single_ndp_mode( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_single_ndp_mode *mode = + (struct ipahal_reg_single_ndp_mode *)fields; + + memset(mode, 0, sizeof(struct ipahal_reg_single_ndp_mode)); + mode->single_ndp_en = IPA_GETFIELD_FROM_REG(val, + IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT, + IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK); + mode->undefined = IPA_GETFIELD_FROM_REG(val, + IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT, + IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK); +} + +static void ipareg_construct_debug_cnt_ctrl_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_debug_cnt_ctrl *dbg_cnt_ctrl = + (struct ipahal_reg_debug_cnt_ctrl *)fields; + u8 type; + + IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->en ? 1 : 0, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK); + + switch (dbg_cnt_ctrl->type) { + case DBG_CNT_TYPE_IPV4_FLTR: + type = 0x0; + if (!dbg_cnt_ctrl->rule_idx_pipe_rule) { + IPAHAL_ERR("No FLT global rules\n"); + WARN_ON(1); + } + break; + case DBG_CNT_TYPE_IPV4_ROUT: + type = 0x1; + break; + case DBG_CNT_TYPE_GENERAL: + type = 0x2; + break; + case DBG_CNT_TYPE_IPV6_FLTR: + type = 0x4; + if (!dbg_cnt_ctrl->rule_idx_pipe_rule) { + IPAHAL_ERR("No FLT global rules\n"); + WARN_ON(1); + } + break; + case DBG_CNT_TYPE_IPV6_ROUT: + type = 0x5; + break; + default: + IPAHAL_ERR("Invalid dbg_cnt_ctrl type (%d) for %s\n", + dbg_cnt_ctrl->type, ipahal_reg_name_str(reg)); + WARN_ON(1); + return; + + }; + + IPA_SETFIELD_IN_REG(*val, type, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK); + + IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->product ? 1 : 0, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK); + + IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->src_pipe, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK); + + if (ipahal_ctx->hw_type <= IPA_HW_v3_1) { + IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK); + IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx_pipe_rule, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK + ); + } else { + IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT, + IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5); + } +} + +static void ipareg_parse_shared_mem_size( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_shared_mem_size *smem_sz = + (struct ipahal_reg_shared_mem_size *)fields; + + memset(smem_sz, 0, sizeof(struct ipahal_reg_shared_mem_size)); + smem_sz->shared_mem_sz = IPA_GETFIELD_FROM_REG(val, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK); + + smem_sz->shared_mem_baddr = IPA_GETFIELD_FROM_REG(val, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK); +} + +static void ipareg_construct_endp_init_rsrc_grp_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp = + (struct ipahal_reg_endp_init_rsrc_grp *)fields; + + IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp, + IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT, + IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK); +} + +static void ipareg_construct_endp_init_rsrc_grp_n_v3_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp = + (struct ipahal_reg_endp_init_rsrc_grp *)fields; + + IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp, + IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5, + IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5); +} + +static void ipareg_construct_endp_init_hdr_metadata_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_metadata *metadata = + (struct ipa_ep_cfg_metadata *)fields; + + IPA_SETFIELD_IN_REG(*val, metadata->qmap_id, + IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT, + IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK); +} + +static void ipareg_construct_endp_init_hdr_metadata_mask_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_metadata_mask *metadata_mask = + (struct ipa_ep_cfg_metadata_mask *)fields; + + IPA_SETFIELD_IN_REG(*val, metadata_mask->metadata_mask, + IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT, + IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK); +} + +static void ipareg_construct_endp_init_cfg_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_cfg *cfg = + (struct ipa_ep_cfg_cfg *)fields; + u32 cs_offload_en; + + switch (cfg->cs_offload_en) { + case IPA_DISABLE_CS_OFFLOAD: + cs_offload_en = 0; + break; + case IPA_ENABLE_CS_OFFLOAD_UL: + cs_offload_en = 1; + break; + case IPA_ENABLE_CS_OFFLOAD_DL: + cs_offload_en = 2; + break; + default: + IPAHAL_ERR("Invalid cs_offload_en value for %s\n", + ipahal_reg_name_str(reg)); + WARN_ON(1); + return; + } + + IPA_SETFIELD_IN_REG(*val, cfg->frag_offload_en ? 1 : 0, + IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT, + IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK); + IPA_SETFIELD_IN_REG(*val, cs_offload_en, + IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT, + IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK); + IPA_SETFIELD_IN_REG(*val, cfg->cs_metadata_hdr_offset, + IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT, + IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK); + IPA_SETFIELD_IN_REG(*val, cfg->gen_qmb_master_sel, + IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT, + IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK); + +} + +static void ipareg_construct_endp_init_deaggr_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_deaggr *ep_deaggr = + (struct ipa_ep_cfg_deaggr *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_deaggr->deaggr_hdr_len, + IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT, + IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_valid, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_location, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_deaggr->max_packet_len, + IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT, + IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK); +} + +static void ipareg_construct_endp_init_hol_block_en_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_holb *ep_holb = + (struct ipa_ep_cfg_holb *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_holb->en, + IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT, + IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK); +} + +static void ipareg_construct_endp_init_hol_block_timer_n( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_holb *ep_holb = + (struct ipa_ep_cfg_holb *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_holb->tmr_val, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK); +} + +static void ipareg_construct_endp_init_ctrl_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_ctrl *ep_ctrl = + (struct ipa_ep_cfg_ctrl *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_suspend, + IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT, + IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay, + IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT, + IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK); +} + +static void ipareg_parse_endp_init_ctrl_n(enum ipahal_reg_name reg, + void *fields, u32 val) +{ + struct ipa_ep_cfg_ctrl *ep_ctrl = + (struct ipa_ep_cfg_ctrl *)fields; + + ep_ctrl->ipa_ep_suspend = + ((val & IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK) >> + IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT); + + ep_ctrl->ipa_ep_delay = + ((val & IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK) >> + IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT); +} + +static void ipareg_construct_endp_init_ctrl_n_v4_0(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_ctrl *ep_ctrl = + (struct ipa_ep_cfg_ctrl *)fields; + + WARN_ON(ep_ctrl->ipa_ep_suspend); + + IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay, + IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT, + IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK); +} + +static void ipareg_construct_endp_init_ctrl_scnd_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_ep_cfg_ctrl_scnd *ep_ctrl_scnd = + (struct ipahal_ep_cfg_ctrl_scnd *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_ctrl_scnd->endp_delay, + IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_SHFT, + IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_BMSK); +} + +static void ipareg_construct_endp_init_nat_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_nat *ep_nat = + (struct ipa_ep_cfg_nat *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_nat->nat_en, + IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT, + IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK); +} + +static void ipareg_construct_endp_init_conn_track_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_conn_track *ep_ipv6ct = + (struct ipa_ep_cfg_conn_track *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_ipv6ct->conn_track_en, + IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_SHFT, + IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_BMSK); +} + +static void ipareg_construct_endp_init_mode_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_endp_init_mode *init_mode = + (struct ipahal_reg_endp_init_mode *)fields; + + IPA_SETFIELD_IN_REG(*val, init_mode->ep_mode.mode, + IPA_ENDP_INIT_MODE_n_MODE_SHFT, + IPA_ENDP_INIT_MODE_n_MODE_BMSK); + + IPA_SETFIELD_IN_REG(*val, init_mode->dst_pipe_number, + IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT, + IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK); +} + +static void ipareg_construct_endp_init_route_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_endp_init_route *ep_init_rt = + (struct ipahal_reg_endp_init_route *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_init_rt->route_table_index, + IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT, + IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK); + +} + +static void ipareg_parse_endp_init_aggr_n(enum ipahal_reg_name reg, + void *fields, u32 val) +{ + struct ipa_ep_cfg_aggr *ep_aggr = + (struct ipa_ep_cfg_aggr *)fields; + + memset(ep_aggr, 0, sizeof(struct ipa_ep_cfg_aggr)); + + ep_aggr->aggr_en = + (((val & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) >> + IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT) + == IPA_ENABLE_AGGR); + ep_aggr->aggr = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK) >> + IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT); + ep_aggr->aggr_byte_limit = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK) >> + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT); + ep_aggr->aggr_time_limit = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK) >> + IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT); + ep_aggr->aggr_pkt_limit = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK) >> + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT); + ep_aggr->aggr_sw_eof_active = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK) >> + IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT); + ep_aggr->aggr_hard_byte_limit_en = + ((val & IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK) + >> + IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT); +} + +static void ipareg_construct_endp_init_aggr_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_aggr *ep_aggr = + (struct ipa_ep_cfg_aggr *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_en, + IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr, + IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_byte_limit, + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_time_limit, + IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_pkt_limit, + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_sw_eof_active, + IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK); + + /* At IPAv3 hard_byte_limit is not supported */ + ep_aggr->aggr_hard_byte_limit_en = 0; + IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_hard_byte_limit_en, + IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK); +} + +static void ipareg_construct_endp_init_hdr_ext_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_hdr_ext *ep_hdr_ext; + u8 hdr_endianness; + + ep_hdr_ext = (struct ipa_ep_cfg_hdr_ext *)fields; + hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1; + + IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_pad_to_alignment, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0); + + IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_offset, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_payload_len_inc_padding, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_valid, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK); + + IPA_SETFIELD_IN_REG(*val, hdr_endianness, + IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK); +} + +static void ipareg_construct_endp_init_hdr_n(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipa_ep_cfg_hdr *ep_hdr; + + ep_hdr = (struct ipa_ep_cfg_hdr *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_metadata_reg_valid, + IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2, + IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_remove_additional, + IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2, + IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_a5_mux, + IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size_valid, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_additional_const_len, + IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata_valid, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK); + + IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_len, + IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT, + IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK); +} + +static void ipareg_construct_route(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_route *route; + + route = (struct ipahal_reg_route *)fields; + + IPA_SETFIELD_IN_REG(*val, route->route_dis, + IPA_ROUTE_ROUTE_DIS_SHFT, + IPA_ROUTE_ROUTE_DIS_BMSK); + + IPA_SETFIELD_IN_REG(*val, route->route_def_pipe, + IPA_ROUTE_ROUTE_DEF_PIPE_SHFT, + IPA_ROUTE_ROUTE_DEF_PIPE_BMSK); + + IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_table, + IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT, + IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK); + + IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_ofst, + IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT, + IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK); + + IPA_SETFIELD_IN_REG(*val, route->route_frag_def_pipe, + IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT, + IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK); + + IPA_SETFIELD_IN_REG(*val, route->route_def_retain_hdr, + IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT, + IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK); +} + +static void ipareg_construct_qsb_max_writes(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_qsb_max_writes *max_writes; + + max_writes = (struct ipahal_reg_qsb_max_writes *)fields; + + IPA_SETFIELD_IN_REG(*val, max_writes->qmb_0_max_writes, + IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT, + IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK); + IPA_SETFIELD_IN_REG(*val, max_writes->qmb_1_max_writes, + IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT, + IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK); +} + +static void ipareg_construct_qsb_max_reads(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_qsb_max_reads *max_reads; + + max_reads = (struct ipahal_reg_qsb_max_reads *)fields; + + IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_reads, + IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT, + IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK); + IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_reads, + IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT, + IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK); +} + +static void ipareg_construct_qsb_max_reads_v4_0(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_qsb_max_reads *max_reads; + + max_reads = (struct ipahal_reg_qsb_max_reads *)fields; + + IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_reads, + IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT, + IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK); + IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_reads, + IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT, + IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK); + IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_read_beats, + IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_SHFT_V4_0, + IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_BMSK_V4_0); + IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_read_beats, + IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_SHFT_V4_0, + IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_BMSK_V4_0); +} + +static void ipareg_parse_tx_cfg(enum ipahal_reg_name reg, + void *fields, u32 val) +{ + struct ipahal_reg_tx_cfg *tx_cfg; + + tx_cfg = (struct ipahal_reg_tx_cfg *)fields; + + tx_cfg->tx0_prefetch_disable = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5, + IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5); + + tx_cfg->tx1_prefetch_disable = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5, + IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5); + + tx_cfg->tx0_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5); + + tx_cfg->tx1_prefetch_almost_empty_size = + tx_cfg->tx0_prefetch_almost_empty_size; +} + +static void ipareg_parse_tx_cfg_v4_0(enum ipahal_reg_name reg, + void *fields, u32 val) +{ + struct ipahal_reg_tx_cfg *tx_cfg; + + tx_cfg = (struct ipahal_reg_tx_cfg *)fields; + + tx_cfg->tx0_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0); + + tx_cfg->tx1_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0); + + tx_cfg->dmaw_scnd_outsd_pred_en = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0); + + tx_cfg->dmaw_scnd_outsd_pred_threshold = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0); + + tx_cfg->dmaw_max_beats_256_dis = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0, + IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0); + + tx_cfg->pa_mask_en = IPA_GETFIELD_FROM_REG(val, + IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0, + IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0); +} + +static void ipareg_construct_tx_cfg(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_tx_cfg *tx_cfg; + + tx_cfg = (struct ipahal_reg_tx_cfg *)fields; + + if (tx_cfg->tx0_prefetch_almost_empty_size != + tx_cfg->tx1_prefetch_almost_empty_size) + ipa_assert(); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_disable, + IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5, + IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->tx1_prefetch_disable, + IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5, + IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_almost_empty_size, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5); +} + +static void ipareg_construct_tx_cfg_v4_0(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_tx_cfg *tx_cfg; + + tx_cfg = (struct ipahal_reg_tx_cfg *)fields; + + IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_almost_empty_size, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->tx1_prefetch_almost_empty_size, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0, + IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_scnd_outsd_pred_threshold, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_max_beats_256_dis, + IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0, + IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_scnd_outsd_pred_en, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0, + IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0); + + IPA_SETFIELD_IN_REG(*val, tx_cfg->pa_mask_en, + IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0, + IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0); +} + +static void ipareg_construct_idle_indication_cfg(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_idle_indication_cfg *idle_indication_cfg; + + idle_indication_cfg = (struct ipahal_reg_idle_indication_cfg *)fields; + + IPA_SETFIELD_IN_REG(*val, + idle_indication_cfg->enter_idle_debounce_thresh, + IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_SHFT_V3_5, + IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_BMSK_V3_5); + + IPA_SETFIELD_IN_REG(*val, + idle_indication_cfg->const_non_idle_enable, + IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_SHFT_V3_5, + IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_BMSK_V3_5); +} + +static void ipareg_construct_hps_queue_weights(enum ipahal_reg_name reg, + const void *fields, u32 *val) +{ + struct ipahal_reg_rx_hps_weights *hps_weights; + + hps_weights = (struct ipahal_reg_rx_hps_weights *)fields; + + IPA_SETFIELD_IN_REG(*val, + hps_weights->hps_queue_weight_0, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK); + + IPA_SETFIELD_IN_REG(*val, + hps_weights->hps_queue_weight_1, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK); + + IPA_SETFIELD_IN_REG(*val, + hps_weights->hps_queue_weight_2, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK); + + IPA_SETFIELD_IN_REG(*val, + hps_weights->hps_queue_weight_3, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK); +} + +static void ipareg_parse_hps_queue_weights( + enum ipahal_reg_name reg, void *fields, u32 val) +{ + struct ipahal_reg_rx_hps_weights *hps_weights = + (struct ipahal_reg_rx_hps_weights *)fields; + + memset(hps_weights, 0, sizeof(struct ipahal_reg_rx_hps_weights)); + + hps_weights->hps_queue_weight_0 = IPA_GETFIELD_FROM_REG(val, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK); + + hps_weights->hps_queue_weight_1 = IPA_GETFIELD_FROM_REG(val, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK); + + hps_weights->hps_queue_weight_2 = IPA_GETFIELD_FROM_REG(val, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK); + + hps_weights->hps_queue_weight_3 = IPA_GETFIELD_FROM_REG(val, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK); +} + +/* + * struct ipahal_reg_obj - Register H/W information for specific IPA version + * @construct - CB to construct register value from abstracted structure + * @parse - CB to parse register value to abstracted structure + * @offset - register offset relative to base address + * @n_ofst - N parameterized register sub-offset + */ +struct ipahal_reg_obj { + void (*construct)(enum ipahal_reg_name reg, const void *fields, + u32 *val); + void (*parse)(enum ipahal_reg_name reg, void *fields, + u32 val); + u32 offset; + u32 n_ofst; +}; + +/* + * This table contains the info regarding each register for IPAv3 and later. + * Information like: offset and construct/parse functions. + * All the information on the register on IPAv3 are statically defined below. + * If information is missing regarding some register on some IPA version, + * the init function will fill it with the information from the previous + * IPA version. + * Information is considered missing if all of the fields are 0. + * If offset is -1, this means that the register is removed on the + * specific version. + */ +static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = { + /* IPAv3 */ + [IPA_HW_v3_0][IPA_ROUTE] = { + ipareg_construct_route, ipareg_parse_dummy, + 0x00000048, 0}, + [IPA_HW_v3_0][IPA_IRQ_STTS_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003008, 0x1000}, + [IPA_HW_v3_0][IPA_IRQ_EN_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000300c, 0x1000}, + [IPA_HW_v3_0][IPA_IRQ_CLR_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003010, 0x1000}, + [IPA_HW_v3_0][IPA_IRQ_SUSPEND_INFO_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003098, 0x1000}, + [IPA_HW_v3_0][IPA_BCR] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000001D0, 0}, + [IPA_HW_v3_0][IPA_ENABLED_PIPES] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000038, 0}, + [IPA_HW_v3_0][IPA_COMP_SW_RESET] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000040, 0}, + [IPA_HW_v3_0][IPA_VERSION] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000034, 0}, + [IPA_HW_v3_0][IPA_TAG_TIMER] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000060, 0 }, + [IPA_HW_v3_0][IPA_COMP_HW_VERSION] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000030, 0}, + [IPA_HW_v3_0][IPA_SPARE_REG_1] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00005090, 0}, + [IPA_HW_v3_0][IPA_SPARE_REG_2] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00005094, 0}, + [IPA_HW_v3_0][IPA_COMP_CFG] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000003C, 0}, + [IPA_HW_v3_0][IPA_STATE_AGGR_ACTIVE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000010C, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_n] = { + ipareg_construct_endp_init_hdr_n, ipareg_parse_dummy, + 0x00000810, 0x70}, + [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_EXT_n] = { + ipareg_construct_endp_init_hdr_ext_n, ipareg_parse_dummy, + 0x00000814, 0x70}, + [IPA_HW_v3_0][IPA_ENDP_INIT_AGGR_n] = { + ipareg_construct_endp_init_aggr_n, + ipareg_parse_endp_init_aggr_n, + 0x00000824, 0x70}, + [IPA_HW_v3_0][IPA_AGGR_FORCE_CLOSE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000001EC, 0}, + [IPA_HW_v3_0][IPA_ENDP_INIT_ROUTE_n] = { + ipareg_construct_endp_init_route_n, ipareg_parse_dummy, + 0x00000828, 0x70}, + [IPA_HW_v3_0][IPA_ENDP_INIT_MODE_n] = { + ipareg_construct_endp_init_mode_n, ipareg_parse_dummy, + 0x00000820, 0x70}, + [IPA_HW_v3_0][IPA_ENDP_INIT_NAT_n] = { + ipareg_construct_endp_init_nat_n, ipareg_parse_dummy, + 0x0000080C, 0x70}, + [IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_n] = { + ipareg_construct_endp_init_ctrl_n, + ipareg_parse_endp_init_ctrl_n, + 0x00000800, 0x70}, + [IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_SCND_n] = { + ipareg_construct_endp_init_ctrl_scnd_n, ipareg_parse_dummy, + 0x00000804, 0x70 }, + [IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_EN_n] = { + ipareg_construct_endp_init_hol_block_en_n, + ipareg_parse_dummy, + 0x0000082c, 0x70}, + [IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = { + ipareg_construct_endp_init_hol_block_timer_n, + ipareg_parse_dummy, + 0x00000830, 0x70}, + [IPA_HW_v3_0][IPA_ENDP_INIT_DEAGGR_n] = { + ipareg_construct_endp_init_deaggr_n, + ipareg_parse_dummy, + 0x00000834, 0x70}, + [IPA_HW_v3_0][IPA_ENDP_INIT_SEQ_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000083C, 0x70}, + [IPA_HW_v3_0][IPA_DEBUG_CNT_REG_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000600, 0x4}, + [IPA_HW_v3_0][IPA_ENDP_INIT_CFG_n] = { + ipareg_construct_endp_init_cfg_n, ipareg_parse_dummy, + 0x00000808, 0x70}, + [IPA_HW_v3_0][IPA_IRQ_EE_UC_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000301c, 0x1000}, + [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_MASK_n] = { + ipareg_construct_endp_init_hdr_metadata_mask_n, + ipareg_parse_dummy, + 0x00000818, 0x70}, + [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_n] = { + ipareg_construct_endp_init_hdr_metadata_n, + ipareg_parse_dummy, + 0x0000081c, 0x70}, + [IPA_HW_v3_0][IPA_ENDP_INIT_RSRC_GRP_n] = { + ipareg_construct_endp_init_rsrc_grp_n, + ipareg_parse_dummy, + 0x00000838, 0x70}, + [IPA_HW_v3_0][IPA_SHARED_MEM_SIZE] = { + ipareg_construct_dummy, ipareg_parse_shared_mem_size, + 0x00000054, 0}, + [IPA_HW_v3_0][IPA_SRAM_DIRECT_ACCESS_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00007000, 0x4}, + [IPA_HW_v3_0][IPA_DEBUG_CNT_CTRL_n] = { + ipareg_construct_debug_cnt_ctrl_n, ipareg_parse_dummy, + 0x00000640, 0x4}, + [IPA_HW_v3_0][IPA_UC_MAILBOX_m_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00032000, 0x4}, + [IPA_HW_v3_0][IPA_FILT_ROUT_HASH_FLUSH] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000090, 0}, + [IPA_HW_v3_0][IPA_SINGLE_NDP_MODE] = { + ipareg_construct_single_ndp_mode, ipareg_parse_single_ndp_mode, + 0x00000068, 0}, + [IPA_HW_v3_0][IPA_QCNCM] = { + ipareg_construct_qcncm, ipareg_parse_qcncm, + 0x00000064, 0}, + [IPA_HW_v3_0][IPA_SYS_PKT_PROC_CNTXT_BASE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000001e0, 0}, + [IPA_HW_v3_0][IPA_LOCAL_PKT_PROC_CNTXT_BASE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000001e8, 0}, + [IPA_HW_v3_0][IPA_ENDP_STATUS_n] = { + ipareg_construct_endp_status_n, ipareg_parse_dummy, + 0x00000840, 0x70}, + [IPA_HW_v3_0][IPA_ENDP_FILTER_ROUTER_HSH_CFG_n] = { + ipareg_construct_hash_cfg_n, ipareg_parse_hash_cfg_n, + 0x0000085C, 0x70}, + [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x00000400, 0x20}, + [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x00000404, 0x20}, + [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x00000408, 0x20}, + [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x0000040C, 0x20}, + [IPA_HW_v3_0][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x00000500, 0x20}, + [IPA_HW_v3_0][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x00000504, 0x20}, + [IPA_HW_v3_0][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x00000508, 0x20}, + [IPA_HW_v3_0][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy, + 0x0000050c, 0x20}, + [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = { + ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy, + 0x000023C4, 0}, + [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = { + ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy, + 0x000023C8, 0}, + [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = { + ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy, + 0x000023CC, 0}, + [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = { + ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy, + 0x000023D0, 0}, + [IPA_HW_v3_0][IPA_QSB_MAX_WRITES] = { + ipareg_construct_qsb_max_writes, ipareg_parse_dummy, + 0x00000074, 0}, + [IPA_HW_v3_0][IPA_QSB_MAX_READS] = { + ipareg_construct_qsb_max_reads, ipareg_parse_dummy, + 0x00000078, 0}, + [IPA_HW_v3_0][IPA_DPS_SEQUENCER_FIRST] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0001e000, 0}, + [IPA_HW_v3_0][IPA_HPS_SEQUENCER_FIRST] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0001e080, 0}, + + + /* IPAv3.1 */ + [IPA_HW_v3_1][IPA_IRQ_SUSPEND_INFO_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003030, 0x1000}, + [IPA_HW_v3_1][IPA_SUSPEND_IRQ_EN_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003034, 0x1000}, + [IPA_HW_v3_1][IPA_SUSPEND_IRQ_CLR_EE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00003038, 0x1000}, + + + /* IPAv3.5 */ + [IPA_HW_v3_5][IPA_TX_CFG] = { + ipareg_construct_tx_cfg, ipareg_parse_tx_cfg, + 0x000001FC, 0}, + [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy, + 0x00000400, 0x20}, + [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy, + 0x00000404, 0x20}, + [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0}, + [IPA_HW_v3_5][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0}, + [IPA_HW_v3_5][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy, + 0x00000500, 0x20}, + [IPA_HW_v3_5][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy, + 0x00000504, 0x20}, + [IPA_HW_v3_5][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0}, + [IPA_HW_v3_5][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0}, + [IPA_HW_v3_5][IPA_ENDP_INIT_RSRC_GRP_n] = { + ipareg_construct_endp_init_rsrc_grp_n_v3_5, + ipareg_parse_dummy, + 0x00000838, 0x70}, + [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = { + ipareg_construct_rx_hps_clients_depth0_v3_5, + ipareg_parse_dummy, + 0x000023C4, 0}, + [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0}, + [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = { + ipareg_construct_rx_hps_clients_depth0_v3_5, + ipareg_parse_dummy, + 0x000023CC, 0}, + [IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0}, + [IPA_HW_v3_5][IPA_SPARE_REG_1] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00002780, 0}, + [IPA_HW_v3_5][IPA_SPARE_REG_2] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00002784, 0}, + [IPA_HW_v3_5][IPA_IDLE_INDICATION_CFG] = { + ipareg_construct_idle_indication_cfg, ipareg_parse_dummy, + 0x00000220, 0}, + [IPA_HW_v3_5][IPA_HPS_FTCH_ARB_QUEUE_WEIGHT] = { + ipareg_construct_hps_queue_weights, + ipareg_parse_hps_queue_weights, 0x000005a4, 0}, + + /* IPAv4.0 */ + [IPA_HW_v4_0][IPA_ENDP_INIT_CTRL_n] = { + ipareg_construct_endp_init_ctrl_n_v4_0, ipareg_parse_dummy, + 0x00000800, 0x70 }, + [IPA_HW_v4_0][IPA_TX_CFG] = { + ipareg_construct_tx_cfg_v4_0, ipareg_parse_tx_cfg_v4_0, + 0x000001FC, 0}, + [IPA_HW_v4_0][IPA_DEBUG_CNT_REG_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + -1, 0}, + [IPA_HW_v4_0][IPA_DEBUG_CNT_CTRL_n] = { + ipareg_construct_debug_cnt_ctrl_n, ipareg_parse_dummy, + -1, 0}, + [IPA_HW_v4_0][IPA_QCNCM] = { + ipareg_construct_qcncm, ipareg_parse_qcncm, + -1, 0}, + [IPA_HW_v4_0][IPA_SINGLE_NDP_MODE] = { + ipareg_construct_single_ndp_mode, ipareg_parse_single_ndp_mode, + -1, 0}, + [IPA_HW_v4_0][IPA_QSB_MAX_READS] = { + ipareg_construct_qsb_max_reads_v4_0, ipareg_parse_dummy, + 0x00000078, 0}, + [IPA_HW_v4_0][IPA_FILT_ROUT_HASH_FLUSH] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000014c, 0}, + [IPA_HW_v4_0][IPA_STATE_AGGR_ACTIVE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000000b4, 0}, + [IPA_HW_v4_0][IPA_ENDP_INIT_ROUTE_n] = { + ipareg_construct_endp_init_route_n, ipareg_parse_dummy, + -1, 0}, + [IPA_HW_v4_0][IPA_ENDP_STATUS_n] = { + ipareg_construct_endp_status_n_v4_0, ipareg_parse_dummy, + 0x00000840, 0x70}, + [IPA_HW_v4_0][IPA_CLKON_CFG] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000044, 0}, + [IPA_HW_v4_0][IPA_ENDP_INIT_CONN_TRACK_n] = { + ipareg_construct_endp_init_conn_track_n, + ipareg_parse_dummy, + 0x00000850, 0x70}, + [IPA_HW_v4_0][IPA_STAT_QUOTA_BASE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000700, 0x4 }, + [IPA_HW_v4_0][IPA_STAT_QUOTA_MASK_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000708, 0x4 }, + [IPA_HW_v4_0][IPA_STAT_TETHERING_BASE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000710, 0x4 }, + [IPA_HW_v4_0][IPA_STAT_TETHERING_MASK_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000718, 0x4 }, + [IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_BASE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000720, 0x0 }, + [IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_BASE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000724, 0x0 }, + [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_BASE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000728, 0x0 }, + [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_BASE] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000072C, 0x0 }, + [IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_START_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000730, 0x0 }, + [IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_START_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000734, 0x0 }, + [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_START_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000738, 0x0 }, + [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_START_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000073C, 0x0 }, + [IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_END_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000740, 0x0 }, + [IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_END_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000744, 0x0 }, + [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_END_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000748, 0x0 }, + [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_END_ID] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x0000074C, 0x0 }, + [IPA_HW_v4_0][IPA_STAT_DROP_CNT_BASE_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000750, 0x4 }, + [IPA_HW_v4_0][IPA_STAT_DROP_CNT_MASK_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000758, 0x4 }, +}; + +/* + * ipahal_reg_init() - Build the registers information table + * See ipahal_reg_objs[][] comments + * + * Note: As global variables are initialized with zero, any un-overridden + * register entry will be zero. By this we recognize them. + */ +int ipahal_reg_init(enum ipa_hw_type ipa_hw_type) +{ + int i; + int j; + struct ipahal_reg_obj zero_obj; + + IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type); + + if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) { + IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type); + return -EINVAL; + } + + memset(&zero_obj, 0, sizeof(zero_obj)); + for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) { + for (j = 0; j < IPA_REG_MAX ; j++) { + if (!memcmp(&ipahal_reg_objs[i+1][j], &zero_obj, + sizeof(struct ipahal_reg_obj))) { + memcpy(&ipahal_reg_objs[i+1][j], + &ipahal_reg_objs[i][j], + sizeof(struct ipahal_reg_obj)); + } else { + /* + * explicitly overridden register. + * Check validity + */ + if (!ipahal_reg_objs[i+1][j].offset) { + IPAHAL_ERR( + "reg=%s with zero offset ipa_ver=%d\n", + ipahal_reg_name_str(j), i+1); + WARN_ON(1); + } + if (!ipahal_reg_objs[i+1][j].construct) { + IPAHAL_ERR( + "reg=%s with NULL construct func ipa_ver=%d\n", + ipahal_reg_name_str(j), i+1); + WARN_ON(1); + } + if (!ipahal_reg_objs[i+1][j].parse) { + IPAHAL_ERR( + "reg=%s with NULL parse func ipa_ver=%d\n", + ipahal_reg_name_str(j), i+1); + WARN_ON(1); + } + } + } + } + + return 0; +} + +/* + * ipahal_reg_name_str() - returns string that represent the register + * @reg_name: [in] register name + */ +const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name) +{ + if (reg_name < 0 || reg_name >= IPA_REG_MAX) { + IPAHAL_ERR("requested name of invalid reg=%d\n", reg_name); + return "Invalid Register"; + } + + return ipareg_name_to_str[reg_name]; +} + +/* + * ipahal_read_reg_n() - Get n parameterized reg value + */ +u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n) +{ + u32 offset; + + if (reg >= IPA_REG_MAX) { + IPAHAL_ERR("Invalid register reg=%u\n", reg); + WARN_ON(1); + return -EINVAL; + } + + IPAHAL_DBG_LOW("read from %s n=%u\n", + ipahal_reg_name_str(reg), n); + + offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset; + if (offset == -1) { + IPAHAL_ERR("Read access to obsolete reg=%s\n", + ipahal_reg_name_str(reg)); + WARN_ON(1); + return -EPERM; + } + offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n; + return ioread32(ipahal_ctx->base + offset); +} + +/* + * ipahal_read_reg_mn() - Get mn parameterized reg value + */ +u32 ipahal_read_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n) +{ + u32 offset; + + if (reg >= IPA_REG_MAX) { + IPAHAL_ERR("Invalid register reg=%u\n", reg); + WARN_ON(1); + return -EINVAL; + } + + IPAHAL_DBG_LOW("read %s m=%u n=%u\n", + ipahal_reg_name_str(reg), m, n); + offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset; + if (offset == -1) { + IPAHAL_ERR("Read access to obsolete reg=%s\n", + ipahal_reg_name_str(reg)); + WARN_ON_ONCE(1); + return -EPERM; + } + /* + * Currently there is one register with m and n parameters + * IPA_UC_MAILBOX_m_n. The m value of it is 0x80. + * If more such registers will be added in the future, + * we can move the m parameter to the table above. + */ + offset += 0x80 * m; + offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n; + return ioread32(ipahal_ctx->base + offset); +} + +/* + * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value + */ +void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val) +{ + u32 offset; + + if (reg >= IPA_REG_MAX) { + IPAHAL_ERR("Invalid register reg=%u\n", reg); + WARN_ON(1); + return; + } + + IPAHAL_DBG_LOW("write to %s m=%u n=%u val=%u\n", + ipahal_reg_name_str(reg), m, n, val); + offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset; + if (offset == -1) { + IPAHAL_ERR("Write access to obsolete reg=%s\n", + ipahal_reg_name_str(reg)); + WARN_ON(1); + return; + } + /* + * Currently there is one register with m and n parameters + * IPA_UC_MAILBOX_m_n. The m value of it is 0x80. + * If more such registers will be added in the future, + * we can move the m parameter to the table above. + */ + offset += 0x80 * m; + offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n; + iowrite32(val, ipahal_ctx->base + offset); +} + +/* + * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg + */ +u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields) +{ + u32 val = 0; + u32 offset; + + if (!fields) { + IPAHAL_ERR("Input error fields\n"); + WARN_ON(1); + return -EINVAL; + } + + if (reg >= IPA_REG_MAX) { + IPAHAL_ERR("Invalid register reg=%u\n", reg); + WARN_ON(1); + return -EINVAL; + } + + IPAHAL_DBG_LOW("read from %s n=%u and parse it\n", + ipahal_reg_name_str(reg), n); + offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset; + if (offset == -1) { + IPAHAL_ERR("Read access to obsolete reg=%s\n", + ipahal_reg_name_str(reg)); + WARN_ON(1); + return -EPERM; + } + offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n; + val = ioread32(ipahal_ctx->base + offset); + ipahal_reg_objs[ipahal_ctx->hw_type][reg].parse(reg, fields, val); + + return val; +} + +/* + * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value + */ +void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n, + const void *fields) +{ + u32 val = 0; + u32 offset; + + if (!fields) { + IPAHAL_ERR("Input error fields=%pK\n", fields); + WARN_ON(1); + return; + } + + if (reg >= IPA_REG_MAX) { + IPAHAL_ERR("Invalid register reg=%u\n", reg); + WARN_ON(1); + return; + } + + IPAHAL_DBG_LOW("write to %s n=%u after constructing it\n", + ipahal_reg_name_str(reg), n); + offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset; + if (offset == -1) { + IPAHAL_ERR("Write access to obsolete reg=%s\n", + ipahal_reg_name_str(reg)); + WARN_ON(1); + return; + } + offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n; + ipahal_reg_objs[ipahal_ctx->hw_type][reg].construct(reg, fields, &val); + + iowrite32(val, ipahal_ctx->base + offset); +} + +/* + * Get the offset of a m/n parameterized register + */ +u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n) +{ + u32 offset; + + if (reg >= IPA_REG_MAX) { + IPAHAL_ERR("Invalid register reg=%u\n", reg); + WARN_ON(1); + return -EINVAL; + } + + IPAHAL_DBG_LOW("get offset of %s m=%u n=%u\n", + ipahal_reg_name_str(reg), m, n); + offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset; + if (offset == -1) { + IPAHAL_ERR("Access to obsolete reg=%s\n", + ipahal_reg_name_str(reg)); + WARN_ON(1); + return -EPERM; + } + /* + * Currently there is one register with m and n parameters + * IPA_UC_MAILBOX_m_n. The m value of it is 0x80. + * If more such registers will be added in the future, + * we can move the m parameter to the table above. + */ + offset += 0x80 * m; + offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n; + + return offset; +} + +u32 ipahal_get_reg_base(void) +{ + return 0x00040000; +} + + +/* + * Specific functions + * These functions supply specific register values for specific operations + * that cannot be reached by generic functions. + * E.g. To disable aggregation, need to write to specific bits of the AGGR + * register. The other bits should be untouched. This oeprate is very specific + * and cannot be generically defined. For such operations we define these + * specific functions. + */ + +u32 ipahal_aggr_get_max_byte_limit(void) +{ + return + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK >> + IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT; +} + +u32 ipahal_aggr_get_max_pkt_limit(void) +{ + return + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT; +} + +void ipahal_get_aggr_force_close_valmask(int ep_idx, + struct ipahal_reg_valmask *valmask) +{ + u32 shft; + u32 bmsk; + + if (!valmask) { + IPAHAL_ERR("Input error\n"); + return; + } + + if (ipahal_ctx->hw_type <= IPA_HW_v3_1) { + shft = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT; + bmsk = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK; + } else if (ipahal_ctx->hw_type <= IPA_HW_v3_5_1) { + shft = + IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5; + bmsk = + IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5; + } else { + shft = + IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_0; + bmsk = + IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_0; + } + + if (ep_idx > (sizeof(valmask->val) * 8 - 1)) { + IPAHAL_ERR("too big ep_idx %d\n", ep_idx); + ipa_assert(); + return; + } + IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk); + valmask->mask = bmsk << shft; +} + +void ipahal_get_fltrt_hash_flush_valmask( + struct ipahal_reg_fltrt_hash_flush *flush, + struct ipahal_reg_valmask *valmask) +{ + if (!flush || !valmask) { + IPAHAL_ERR("Input error: flush=%pK ; valmask=%pK\n", + flush, valmask); + return; + } + + memset(valmask, 0, sizeof(struct ipahal_reg_valmask)); + + if (flush->v6_rt) + valmask->val |= + (1<v6_flt) + valmask->val |= + (1<v4_rt) + valmask->val |= + (1<v4_flt) + valmask->val |= + (1<mask = valmask->val; +} + +void ipahal_get_status_ep_valmask(int pipe_num, + struct ipahal_reg_valmask *valmask) +{ + if (!valmask) { + IPAHAL_ERR("Input error\n"); + return; + } + + valmask->val = + (pipe_num & IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) << + IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT; + + valmask->mask = + IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK << + IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h new file mode 100644 index 000000000000..ceb5dfda7eed --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h @@ -0,0 +1,547 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPAHAL_REG_H_ +#define _IPAHAL_REG_H_ + +#include + +/* + * Registers names + * + * NOTE:: Any change to this enum, need to change to ipareg_name_to_str + * array as well. + */ +enum ipahal_reg_name { + IPA_ROUTE, + IPA_IRQ_STTS_EE_n, + IPA_IRQ_EN_EE_n, + IPA_IRQ_CLR_EE_n, + IPA_IRQ_SUSPEND_INFO_EE_n, + IPA_SUSPEND_IRQ_EN_EE_n, + IPA_SUSPEND_IRQ_CLR_EE_n, + IPA_BCR, + IPA_ENABLED_PIPES, + IPA_COMP_SW_RESET, + IPA_VERSION, + IPA_TAG_TIMER, + IPA_COMP_HW_VERSION, + IPA_SPARE_REG_1, + IPA_SPARE_REG_2, + IPA_COMP_CFG, + IPA_STATE_AGGR_ACTIVE, + IPA_ENDP_INIT_HDR_n, + IPA_ENDP_INIT_HDR_EXT_n, + IPA_ENDP_INIT_AGGR_n, + IPA_AGGR_FORCE_CLOSE, + IPA_ENDP_INIT_ROUTE_n, + IPA_ENDP_INIT_MODE_n, + IPA_ENDP_INIT_NAT_n, + IPA_ENDP_INIT_CONN_TRACK_n, + IPA_ENDP_INIT_CTRL_n, + IPA_ENDP_INIT_CTRL_SCND_n, + IPA_ENDP_INIT_HOL_BLOCK_EN_n, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, + IPA_ENDP_INIT_DEAGGR_n, + IPA_ENDP_INIT_SEQ_n, + IPA_DEBUG_CNT_REG_n, + IPA_ENDP_INIT_CFG_n, + IPA_IRQ_EE_UC_n, + IPA_ENDP_INIT_HDR_METADATA_MASK_n, + IPA_ENDP_INIT_HDR_METADATA_n, + IPA_ENDP_INIT_RSRC_GRP_n, + IPA_SHARED_MEM_SIZE, + IPA_SRAM_DIRECT_ACCESS_n, + IPA_DEBUG_CNT_CTRL_n, + IPA_UC_MAILBOX_m_n, + IPA_FILT_ROUT_HASH_FLUSH, + IPA_SINGLE_NDP_MODE, + IPA_QCNCM, + IPA_SYS_PKT_PROC_CNTXT_BASE, + IPA_LOCAL_PKT_PROC_CNTXT_BASE, + IPA_ENDP_STATUS_n, + IPA_ENDP_FILTER_ROUTER_HSH_CFG_n, + IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n, + IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n, + IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n, + IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n, + IPA_DST_RSRC_GRP_01_RSRC_TYPE_n, + IPA_DST_RSRC_GRP_23_RSRC_TYPE_n, + IPA_DST_RSRC_GRP_45_RSRC_TYPE_n, + IPA_DST_RSRC_GRP_67_RSRC_TYPE_n, + IPA_RX_HPS_CLIENTS_MIN_DEPTH_0, + IPA_RX_HPS_CLIENTS_MIN_DEPTH_1, + IPA_RX_HPS_CLIENTS_MAX_DEPTH_0, + IPA_RX_HPS_CLIENTS_MAX_DEPTH_1, + IPA_HPS_FTCH_ARB_QUEUE_WEIGHT, + IPA_QSB_MAX_WRITES, + IPA_QSB_MAX_READS, + IPA_TX_CFG, + IPA_IDLE_INDICATION_CFG, + IPA_DPS_SEQUENCER_FIRST, + IPA_HPS_SEQUENCER_FIRST, + IPA_CLKON_CFG, + IPA_STAT_QUOTA_BASE_n, + IPA_STAT_QUOTA_MASK_n, + IPA_STAT_TETHERING_BASE_n, + IPA_STAT_TETHERING_MASK_n, + IPA_STAT_FILTER_IPV4_BASE, + IPA_STAT_FILTER_IPV6_BASE, + IPA_STAT_ROUTER_IPV4_BASE, + IPA_STAT_ROUTER_IPV6_BASE, + IPA_STAT_FILTER_IPV4_START_ID, + IPA_STAT_FILTER_IPV6_START_ID, + IPA_STAT_ROUTER_IPV4_START_ID, + IPA_STAT_ROUTER_IPV6_START_ID, + IPA_STAT_FILTER_IPV4_END_ID, + IPA_STAT_FILTER_IPV6_END_ID, + IPA_STAT_ROUTER_IPV4_END_ID, + IPA_STAT_ROUTER_IPV6_END_ID, + IPA_STAT_DROP_CNT_BASE_n, + IPA_STAT_DROP_CNT_MASK_n, + IPA_REG_MAX, +}; + +/* + * struct ipahal_reg_route - IPA route register + * @route_dis: route disable + * @route_def_pipe: route default pipe + * @route_def_hdr_table: route default header table + * @route_def_hdr_ofst: route default header offset table + * @route_frag_def_pipe: Default pipe to route fragmented exception + * packets and frag new rule statues, if source pipe does not have + * a notification status pipe defined. + * @route_def_retain_hdr: default value of retain header. It is used + * when no rule was hit + */ +struct ipahal_reg_route { + u32 route_dis; + u32 route_def_pipe; + u32 route_def_hdr_table; + u32 route_def_hdr_ofst; + u8 route_frag_def_pipe; + u32 route_def_retain_hdr; +}; + +/* + * struct ipahal_reg_endp_init_route - IPA ENDP_INIT_ROUTE_n register + * @route_table_index: Default index of routing table (IPA Consumer). + */ +struct ipahal_reg_endp_init_route { + u32 route_table_index; +}; + +/* + * struct ipahal_reg_endp_init_rsrc_grp - IPA_ENDP_INIT_RSRC_GRP_n register + * @rsrc_grp: Index of group for this ENDP. If this ENDP is a source-ENDP, + * index is for source-resource-group. If destination ENPD, index is + * for destination-resoruce-group. + */ +struct ipahal_reg_endp_init_rsrc_grp { + u32 rsrc_grp; +}; + +/* + * struct ipahal_reg_endp_init_mode - IPA ENDP_INIT_MODE_n register + * @dst_pipe_number: This parameter specifies destination output-pipe-packets + * will be routed to. Valid for DMA mode only and for Input + * Pipes only (IPA Consumer) + */ +struct ipahal_reg_endp_init_mode { + u32 dst_pipe_number; + struct ipa_ep_cfg_mode ep_mode; +}; + +/* + * struct ipahal_reg_shared_mem_size - IPA SHARED_MEM_SIZE register + * @shared_mem_sz: Available size [in 8Bytes] of SW partition within + * IPA shared memory. + * @shared_mem_baddr: Offset of SW partition within IPA + * shared memory[in 8Bytes]. To get absolute address of SW partition, + * add this offset to IPA_SRAM_DIRECT_ACCESS_n baddr. + */ +struct ipahal_reg_shared_mem_size { + u32 shared_mem_sz; + u32 shared_mem_baddr; +}; + +/* + * struct ipahal_reg_ep_cfg_status - status configuration in IPA end-point + * @status_en: Determines if end point supports Status Indications. SW should + * set this bit in order to enable Statuses. Output Pipe - send + * Status indications only if bit is set. Input Pipe - forward Status + * indication to STATUS_ENDP only if bit is set. Valid for Input + * and Output Pipes (IPA Consumer and Producer) + * @status_ep: Statuses generated for this endpoint will be forwarded to the + * specified Status End Point. Status endpoint needs to be + * configured with STATUS_EN=1 Valid only for Input Pipes (IPA + * Consumer) + * @status_location: Location of PKT-STATUS on destination pipe. + * If set to 0 (default), PKT-STATUS will be appended before the packet + * for this endpoint. If set to 1, PKT-STATUS will be appended after the + * packet for this endpoint. Valid only for Output Pipes (IPA Producer) + * @status_pkt_suppress: + */ +struct ipahal_reg_ep_cfg_status { + bool status_en; + u8 status_ep; + bool status_location; + u8 status_pkt_suppress; +}; + +/* + * struct ipa_hash_tuple - Hash tuple members for flt and rt + * the fields tells if to be masked or not + * @src_id: pipe number for flt, table index for rt + * @src_ip_addr: IP source address + * @dst_ip_addr: IP destination address + * @src_port: L4 source port + * @dst_port: L4 destination port + * @protocol: IP protocol field + * @meta_data: packet meta-data + * + */ +struct ipahal_reg_hash_tuple { + /* src_id: pipe in flt, tbl index in rt */ + bool src_id; + bool src_ip_addr; + bool dst_ip_addr; + bool src_port; + bool dst_port; + bool protocol; + bool meta_data; +}; + +/* + * struct ipahal_reg_fltrt_hash_tuple - IPA hash tuple register + * @flt: Hash tuple info for filtering + * @rt: Hash tuple info for routing + * @undefinedX: Undefined/Unused bit fields set of the register + */ +struct ipahal_reg_fltrt_hash_tuple { + struct ipahal_reg_hash_tuple flt; + struct ipahal_reg_hash_tuple rt; + u32 undefined1; + u32 undefined2; +}; + +/* + * enum ipahal_reg_dbg_cnt_type - Debug Counter Type + * DBG_CNT_TYPE_IPV4_FLTR - Count IPv4 filtering rules + * DBG_CNT_TYPE_IPV4_ROUT - Count IPv4 routing rules + * DBG_CNT_TYPE_GENERAL - General counter + * DBG_CNT_TYPE_IPV6_FLTR - Count IPv6 filtering rules + * DBG_CNT_TYPE_IPV4_ROUT - Count IPv6 routing rules + */ +enum ipahal_reg_dbg_cnt_type { + DBG_CNT_TYPE_IPV4_FLTR, + DBG_CNT_TYPE_IPV4_ROUT, + DBG_CNT_TYPE_GENERAL, + DBG_CNT_TYPE_IPV6_FLTR, + DBG_CNT_TYPE_IPV6_ROUT, +}; + +/* + * struct ipahal_reg_debug_cnt_ctrl - IPA_DEBUG_CNT_CTRL_n register + * @en - Enable debug counter + * @type - Type of debugging couting + * @product - False->Count Bytes . True->Count #packets + * @src_pipe - Specific Pipe to match. If FF, no need to match + * specific pipe + * @rule_idx_pipe_rule - Global Rule or Pipe Rule. If pipe, then indicated by + * src_pipe. Starting at IPA V3_5, + * no support on Global Rule. This field will be ignored. + * @rule_idx - Rule index. Irrelevant for type General + */ +struct ipahal_reg_debug_cnt_ctrl { + bool en; + enum ipahal_reg_dbg_cnt_type type; + bool product; + u8 src_pipe; + bool rule_idx_pipe_rule; + u16 rule_idx; +}; + +/* + * struct ipahal_reg_rsrc_grp_cfg - Mix/Max values for two rsrc groups + * @x_min - first group min value + * @x_max - first group max value + * @y_min - second group min value + * @y_max - second group max value + */ +struct ipahal_reg_rsrc_grp_cfg { + u32 x_min; + u32 x_max; + u32 y_min; + u32 y_max; +}; + +/* + * struct ipahal_reg_rx_hps_clients - Min or Max values for RX HPS clients + * @client_minmax - Min or Max values. In case of depth 0 the 4 values + * are used. In case of depth 1, only the first 2 values are used + */ +struct ipahal_reg_rx_hps_clients { + u32 client_minmax[4]; +}; + +/* + * struct ipahal_reg_rx_hps_weights - weight values for RX HPS clients + * @hps_queue_weight_0 - 4 bit Weight for RX_HPS_CMDQ #0 (3:0) + * @hps_queue_weight_1 - 4 bit Weight for RX_HPS_CMDQ #1 (7:4) + * @hps_queue_weight_2 - 4 bit Weight for RX_HPS_CMDQ #2 (11:8) + * @hps_queue_weight_3 - 4 bit Weight for RX_HPS_CMDQ #3 (15:12) + */ +struct ipahal_reg_rx_hps_weights { + u32 hps_queue_weight_0; + u32 hps_queue_weight_1; + u32 hps_queue_weight_2; + u32 hps_queue_weight_3; +}; + +/* + * struct ipahal_reg_valmask - holding values and masking for registers + * HAL application may require only value and mask of it for some + * register fields. + * @val - The value + * @mask - Tha mask of the value + */ +struct ipahal_reg_valmask { + u32 val; + u32 mask; +}; + +/* + * struct ipahal_reg_fltrt_hash_flush - Flt/Rt flush configuration + * @v6_rt - Flush IPv6 Routing cache + * @v6_flt - Flush IPv6 Filtering cache + * @v4_rt - Flush IPv4 Routing cache + * @v4_flt - Flush IPv4 Filtering cache + */ +struct ipahal_reg_fltrt_hash_flush { + bool v6_rt; + bool v6_flt; + bool v4_rt; + bool v4_flt; +}; + +/* + * struct ipahal_reg_single_ndp_mode - IPA SINGLE_NDP_MODE register + * @single_ndp_en: When set to '1', IPA builds MBIM frames with up to 1 + * NDP-header. + * @unused: undefined bits of the register + */ +struct ipahal_reg_single_ndp_mode { + bool single_ndp_en; + u32 undefined; +}; + +/* + * struct ipahal_reg_qcncm - IPA QCNCM register + * @mode_en: When QCNCM_MODE_EN=1, IPA will use QCNCM signature. + * @mode_val: Used only when QCNCM_MODE_EN=1 and sets SW Signature in + * the NDP header. + * @unused: undefined bits of the register + */ +struct ipahal_reg_qcncm { + bool mode_en; + u32 mode_val; + u32 undefined; +}; + +/* + * struct ipahal_reg_qsb_max_writes - IPA QSB Max Writes register + * @qmb_0_max_writes: Max number of outstanding writes for GEN_QMB_0 + * @qmb_1_max_writes: Max number of outstanding writes for GEN_QMB_1 + */ +struct ipahal_reg_qsb_max_writes { + u32 qmb_0_max_writes; + u32 qmb_1_max_writes; +}; + +/* + * struct ipahal_reg_qsb_max_reads - IPA QSB Max Reads register + * @qmb_0_max_reads: Max number of outstanding reads for GEN_QMB_0 + * @qmb_1_max_reads: Max number of outstanding reads for GEN_QMB_1 + * @qmb_0_max_read_beats: Max number of outstanding read beats for GEN_QMB_0 + * @qmb_1_max_read_beats: Max number of outstanding read beats for GEN_QMB_1 + */ +struct ipahal_reg_qsb_max_reads { + u32 qmb_0_max_reads; + u32 qmb_1_max_reads; + u32 qmb_0_max_read_beats; + u32 qmb_1_max_read_beats; +}; + +/* + * struct ipahal_reg_tx_cfg - IPA TX_CFG register + * @tx0_prefetch_disable: Disable prefetch on TX0 + * @tx1_prefetch_disable: Disable prefetch on TX1 + * @tx0_prefetch_almost_empty_size: Prefetch almost empty size on TX0 + * @tx1_prefetch_almost_empty_size: Prefetch almost empty size on TX1 + * @dmaw_scnd_outsd_pred_threshold: + * @dmaw_max_beats_256_dis: + * @dmaw_scnd_outsd_pred_en: + * @pa_mask_en: + */ +struct ipahal_reg_tx_cfg { + bool tx0_prefetch_disable; + bool tx1_prefetch_disable; + u32 tx0_prefetch_almost_empty_size; + u32 tx1_prefetch_almost_empty_size; + u32 dmaw_scnd_outsd_pred_threshold; + u32 dmaw_max_beats_256_dis; + u32 dmaw_scnd_outsd_pred_en; + u32 pa_mask_en; + +}; + +/* + * struct ipahal_reg_idle_indication_cfg - IPA IDLE_INDICATION_CFG register + * @const_non_idle_enable: enable the asserting of the IDLE value and DCD + * @enter_idle_debounce_thresh: configure the debounce threshold + */ +struct ipahal_reg_idle_indication_cfg { + u16 enter_idle_debounce_thresh; + bool const_non_idle_enable; +}; + +/* + * struct ipa_ep_cfg_ctrl_scnd - PA_ENDP_INIT_CTRL_SCND_n register + * @endp_delay: delay endpoint + */ +struct ipahal_ep_cfg_ctrl_scnd { + bool endp_delay; +}; + +/* + * ipahal_reg_name_str() - returns string that represent the register + * @reg_name: [in] register name + */ +const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name); + +/* + * ipahal_read_reg_n() - Get the raw value of n parameterized reg + */ +u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n); + +/* + * ipahal_read_reg_mn() - Get mn parameterized reg value + */ +u32 ipahal_read_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n); + +/* + * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value + */ +void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val); + +/* + * ipahal_write_reg_n() - Write to n parameterized reg a raw value + */ +static inline void ipahal_write_reg_n(enum ipahal_reg_name reg, + u32 n, u32 val) +{ + ipahal_write_reg_mn(reg, 0, n, val); +} + +/* + * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg + */ +u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields); + +/* + * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value + */ +void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n, + const void *fields); + +/* + * ipahal_read_reg() - Get the raw value of a reg + */ +static inline u32 ipahal_read_reg(enum ipahal_reg_name reg) +{ + return ipahal_read_reg_n(reg, 0); +} + +/* + * ipahal_write_reg() - Write to reg a raw value + */ +static inline void ipahal_write_reg(enum ipahal_reg_name reg, + u32 val) +{ + ipahal_write_reg_mn(reg, 0, 0, val); +} + +/* + * ipahal_read_reg_fields() - Get the parsed value of a reg + */ +static inline u32 ipahal_read_reg_fields(enum ipahal_reg_name reg, void *fields) +{ + return ipahal_read_reg_n_fields(reg, 0, fields); +} + +/* + * ipahal_write_reg_fields() - Write to reg a parsed value + */ +static inline void ipahal_write_reg_fields(enum ipahal_reg_name reg, + const void *fields) +{ + ipahal_write_reg_n_fields(reg, 0, fields); +} + +/* + * Get the offset of a m/n parameterized register + */ +u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n); + +/* + * Get the offset of a n parameterized register + */ +static inline u32 ipahal_get_reg_n_ofst(enum ipahal_reg_name reg, u32 n) +{ + return ipahal_get_reg_mn_ofst(reg, 0, n); +} + +/* + * Get the offset of a register + */ +static inline u32 ipahal_get_reg_ofst(enum ipahal_reg_name reg) +{ + return ipahal_get_reg_mn_ofst(reg, 0, 0); +} + +/* + * Get the register base address + */ +u32 ipahal_get_reg_base(void); + +/* + * Specific functions + * These functions supply specific register values for specific operations + * that cannot be reached by generic functions. + * E.g. To disable aggregation, need to write to specific bits of the AGGR + * register. The other bits should be untouched. This oeprate is very specific + * and cannot be generically defined. For such operations we define these + * specific functions. + */ +u32 ipahal_aggr_get_max_byte_limit(void); +u32 ipahal_aggr_get_max_pkt_limit(void); +void ipahal_get_aggr_force_close_valmask(int ep_idx, + struct ipahal_reg_valmask *valmask); +void ipahal_get_fltrt_hash_flush_valmask( + struct ipahal_reg_fltrt_hash_flush *flush, + struct ipahal_reg_valmask *valmask); +void ipahal_get_status_ep_valmask(int pipe_num, + struct ipahal_reg_valmask *valmask); + +#endif /* _IPAHAL_REG_H_ */ + diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h new file mode 100644 index 000000000000..664d25454f8e --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h @@ -0,0 +1,363 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPAHAL_REG_I_H_ +#define _IPAHAL_REG_I_H_ + +int ipahal_reg_init(enum ipa_hw_type ipa_hw_type); + +#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask)) +#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \ + (reg |= ((val) << (shift)) & (mask)) +#define IPA_GETFIELD_FROM_REG(reg, shift, mask) \ + (((reg) & (mask)) >> (shift)) + + +/* IPA_ROUTE register */ +#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0 +#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1 +#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1 +#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e +#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6 +#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0X40 +#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7 +#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80 +#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000 +#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11 +#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK 0x1000000 +#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT 0x18 + +/* IPA_ENDP_INIT_HDR_n register */ +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80 +#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000 +#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000 +#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14 +#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000 +#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2 0x8000000 +#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2 0x1b +#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2 0x10000000 +#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2 0x1c + +/* IPA_ENDP_INIT_HDR_EXT_n register */ +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0 0x3c00 + +/* IPA_ENDP_INIT_AGGR_N register */ +#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK 0x1000000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT 0x18 +#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x400000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x16 +#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK 0x200000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x15 +#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x1f8000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0xf +#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x7c00 +#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xa +#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x3e0 +#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5 +#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c +#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2 +#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3 +#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0 + +/* IPA_AGGR_FORCE_CLOSE register */ +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK 0x3fffffff +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0 +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5 0xfffff +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5 0 +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_0 0x7fffff +#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_0 0 + +/* IPA_ENDP_INIT_ROUTE_n register */ +#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f +#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT 0x0 + +/* IPA_ENDP_INIT_MODE_n register */ +#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_BMSK 0x40000000 +#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_SHFT 0x1e +#define IPA_ENDP_INIT_MODE_n_PAD_EN_BMSK 0x20000000 +#define IPA_ENDP_INIT_MODE_n_PAD_EN_SHFT 0x1d +#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_BMSK 0x10000000 +#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_SHFT 0x1c +#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_BMSK 0xffff000 +#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_SHFT 0xc +#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x1f0 +#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x4 +#define IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x7 +#define IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0 + +/* IPA_ENDP_INIT_NAT_n register */ +#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3 +#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0 + +/* IPA_ENDP_INIT_CONN_TRACK_n register */ +#define IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_BMSK 0x1 +#define IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_SHFT 0x0 + +/* IPA_ENDP_INIT_CTRL_n register */ +#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK 0x1 +#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT 0x0 +#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK 0x2 +#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT 0x1 + +/* IPA_ENDP_INIT_CTRL_SCND_n register */ +#define IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_BMSK 0x2 +#define IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_SHFT 0x1 + +/* IPA_ENDP_INIT_HOL_BLOCK_EN_n register */ +#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK 0x1 +#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX 19 +#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX_V_4_0 22 +#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK 0x1 +#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT 0x0 + +/* IPA_ENDP_INIT_HOL_BLOCK_TIMER_n register */ +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK 0xffffffff +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT 0x0 + +/* IPA_ENDP_INIT_DEAGGR_n register */ +#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000 +#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x80 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x7 +#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F +#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0 + +/* IPA_IPA_ENDP_INIT_SEQ_n register */ +#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_BMSK 0xf000 +#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_SHFT 0xc +#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_BMSK 0xf00 +#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_SHFT 0x8 +#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_BMSK 0xf0 +#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_SHFT 0x4 +#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_BMSK 0xf +#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_SHFT 0x0 + +/* IPA_DEBUG_CNT_REG_m register */ +#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff +#define IPA_DEBUG_CNT_REG_N_MAX 15 +#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff +#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0 + +/* IPA_ENDP_INIT_CFG_n register */ +#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK 0x100 +#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT 0x8 +#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78 +#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3 +#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6 +#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1 +#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1 +#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0 + +/* IPA_ENDP_INIT_HDR_METADATA_MASK_n register */ +#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff +#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0 + +/* IPA_IPA_ENDP_INIT_HDR_METADATA_n register */ +#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK 0xffffffff +#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT 0x0 + +/* IPA_ENDP_INIT_RSRC_GRP_n register */ +#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7 +#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0 +#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5 0x3 +#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5 0 + +/* IPA_SHARED_MEM_SIZE register */ +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK 0xffff0000 +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT 0x10 +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK 0xffff +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT 0x0 + +/* IPA_DEBUG_CNT_CTRL_n register */ +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK 0x10000000 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT 0x1c +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK 0x0ff00000 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5 0x1ff00000 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT 0x14 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT 0xc +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK 0x100 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT 0x8 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK 0x70 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT 0x4 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK 0x1 +#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT 0x0 + +/* IPA_FILT_ROUT_HASH_FLUSH register */ +#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT 12 +#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT 8 +#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT 4 +#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT 0 + +/* IPA_SINGLE_NDP_MODE register */ +#define IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK 0xfffffffe +#define IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT 0x1 +#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK 0x1 +#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT 0 + +/* IPA_QCNCM register */ +#define IPA_QCNCM_MODE_UNDEFINED2_BMSK 0xf0000000 +#define IPA_QCNCM_MODE_UNDEFINED2_SHFT 0x1c +#define IPA_QCNCM_MODE_VAL_BMSK 0xffffff0 +#define IPA_QCNCM_MODE_VAL_SHFT 0x4 +#define IPA_QCNCM_UNDEFINED1_BMSK 0xe +#define IPA_QCNCM_UNDEFINED1_SHFT 0x1 +#define IPA_QCNCM_MODE_EN_BMSK 0x1 +#define IPA_QCNCM_MODE_EN_SHFT 0 + +/* IPA_ENDP_STATUS_n register */ +#define IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_BMSK 0x200 +#define IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_SHFT 0x9 +#define IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK 0x100 +#define IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT 0x8 +#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e +#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1 +#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1 +#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0 + +/* IPA_ENDP_FILTER_ROUTER_HSH_CFG_n register */ +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT 0 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK 0x1 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT 1 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK 0x2 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT 2 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK 0x4 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT 3 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK 0x8 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT 4 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK 0x10 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT 5 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK 0x20 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT 6 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK 0x40 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT 7 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK 0xff80 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT 16 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK 0x10000 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT 17 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK 0x20000 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT 18 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK 0x40000 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT 19 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK 0x80000 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT 20 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK 0x100000 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT 21 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK 0x200000 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT 22 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK 0x400000 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT 23 +#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK 0xff800000 + +/* IPA_RSRC_GRP_XY_RSRC_TYPE_n register */ +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK 0xFF000000 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT 24 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK 0xFF0000 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT 16 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK 0xFF00 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT 8 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK 0xFF +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT 0 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5 0x3F000000 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5 24 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5 0x3F0000 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5 16 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5 0x3F00 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5 8 +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5 0x3F +#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5 0 + +/* IPA_IPA_IPA_RX_HPS_CLIENTS_MIN/MAX_DEPTH_0/1 registers */ +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n))) +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(n) \ + (0xF << (8 * (n))) +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(n) (8 * (n)) + +/* IPA_QSB_MAX_WRITES register */ +#define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK (0xf) +#define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT (0) +#define IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK (0xf0) +#define IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT (4) + +/* IPA_QSB_MAX_READS register */ +#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK (0xf) +#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT (0) +#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK (0xf0) +#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT (4) + +/* IPA_QSB_MAX_READS_BEATS register */ +#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_BMSK_V4_0 (0xff0000) +#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_SHFT_V4_0 (0x10) +#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_BMSK_V4_0 (0xff000000) +#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_SHFT_V4_0 (0x18) + +/* IPA_TX_CFG register */ +#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5 (0x1) +#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5 (0) +#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5 (0x2) +#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5 (1) +#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5 (0x1C) +#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5 (2) + +/* IPA_TX_CFG register v4.0 */ +#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0 (0x1e000) +#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0 (0xd) +#define IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0 (0x1000) +#define IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0 (0xc) +#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0 (0x800) +#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0 (0xb) +#define IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0 (0x400) +#define IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0 (0xa) +#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0 (0x3c0) +#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0 (0x6) +#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0 (0x3c) +#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0 (0x2) + +/* IPA_IDLE_INDICATION_CFG regiser */ +#define IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_BMSK_V3_5 (0xffff) +#define IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_SHFT_V3_5 (0) +#define IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_BMSK_V3_5 (0x10000) +#define IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_SHFT_V3_5 (16) + +/* IPA_HPS_FTCH_QUEUE_WEIGHT register */ +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK (0xf) +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT (0x0) +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK (0xf0) +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT (0x4) +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK (0xf00) +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT (0x8) +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK (0xf000) +#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT (0xc) + +#endif /* _IPAHAL_REG_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c new file mode 100644 index 000000000000..b0bfbc574f1d --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -0,0 +1,3313 @@ +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * WWAN Transport Network Driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_qmi_service.h" +#include +#include +#include +#include +#include + +#include "ipa_trace.h" + +#define WWAN_METADATA_SHFT 24 +#define WWAN_METADATA_MASK 0xFF000000 +#define WWAN_DATA_LEN 2000 +#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */ +#define HEADROOM_FOR_QMAP 8 /* for mux header */ +#define TAILROOM 0 /* for padding by mux layer */ +#define MAX_NUM_OF_MUX_CHANNEL 10 /* max mux channels */ +#define UL_FILTER_RULE_HANDLE_START 69 +#define DEFAULT_OUTSTANDING_HIGH 128 +#define DEFAULT_OUTSTANDING_HIGH_CTL (DEFAULT_OUTSTANDING_HIGH+32) +#define DEFAULT_OUTSTANDING_LOW 64 + +#define IPA_WWAN_DEV_NAME "rmnet_ipa%d" +#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0" + +#define IPA_WWAN_RX_SOFTIRQ_THRESH 16 + +#define INVALID_MUX_ID 0xFF +#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64 +#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64 +#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */ +#define NAPI_WEIGHT 60 + +#define IPA_NETDEV() \ + ((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \ + rmnet_ipa3_ctx->wwan_priv->net : NULL) + +#define IPA_WWAN_CONS_DESC_FIFO_SZ 256 + +static int ipa3_wwan_add_ul_flt_rule_to_ipa(void); +static int ipa3_wwan_del_ul_flt_rule_to_ipa(void); +static void ipa3_wwan_msg_free_cb(void*, u32, u32); +static void ipa3_rmnet_rx_cb(void *priv); +static int ipa3_rmnet_poll(struct napi_struct *napi, int budget); + +static void ipa3_wake_tx_queue(struct work_struct *work); +static DECLARE_WORK(ipa3_tx_wakequeue_work, ipa3_wake_tx_queue); + +static void tethering_stats_poll_queue(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work, + tethering_stats_poll_queue); + +enum ipa3_wwan_device_status { + WWAN_DEVICE_INACTIVE = 0, + WWAN_DEVICE_ACTIVE = 1 +}; + +struct ipa3_rmnet_plat_drv_res { + bool ipa_rmnet_ssr; + bool ipa_loaduC; + bool ipa_advertise_sg_support; + bool ipa_napi_enable; + u32 wan_rx_desc_size; +}; + +/** + * struct ipa3_wwan_private - WWAN private data + * @net: network interface struct implemented by this driver + * @stats: iface statistics + * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed + * @outstanding_high: number of outstanding packets allowed + * @outstanding_low: number of outstanding packets which shall cause + * @ch_id: channel id + * @lock: spinlock for mutual exclusion + * @device_status: holds device status + * + * WWAN private - holds all relevant info about WWAN driver + */ +struct ipa3_wwan_private { + struct net_device *net; + struct net_device_stats stats; + atomic_t outstanding_pkts; + int outstanding_high_ctl; + int outstanding_high; + int outstanding_low; + uint32_t ch_id; + spinlock_t lock; + struct completion resource_granted_completion; + enum ipa3_wwan_device_status device_status; + struct napi_struct napi; +}; + +struct rmnet_ipa3_context { + struct ipa3_wwan_private *wwan_priv; + struct ipa_sys_connect_params apps_to_ipa_ep_cfg; + struct ipa_sys_connect_params ipa_to_apps_ep_cfg; + u32 qmap_hdr_hdl; + u32 dflt_v4_wan_rt_hdl; + u32 dflt_v6_wan_rt_hdl; + struct ipa3_rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL]; + int num_q6_rules; + int old_num_q6_rules; + int rmnet_index; + bool egress_set; + bool a7_ul_flt_set; + struct workqueue_struct *rm_q6_wq; + atomic_t is_initialized; + atomic_t is_ssr; + void *subsys_notify_handle; + u32 apps_to_ipa3_hdl; + u32 ipa3_to_apps_hdl; + struct mutex pipe_handle_guard; + struct mutex add_mux_channel_lock; +}; + +static struct rmnet_ipa3_context *rmnet_ipa3_ctx; +static struct ipa3_rmnet_plat_drv_res ipa3_rmnet_res; + +/** + * ipa3_setup_a7_qmap_hdr() - Setup default a7 qmap hdr + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate memory + * -EPERM: failed to add the tables + */ +static int ipa3_setup_a7_qmap_hdr(void) +{ + struct ipa_ioc_add_hdr *hdr; + struct ipa_hdr_add *hdr_entry; + u32 pyld_sz; + int ret; + + /* install the basic exception header */ + pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 * + sizeof(struct ipa_hdr_add); + hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + hdr->num_hdrs = 1; + hdr->commit = 1; + hdr_entry = &hdr->hdr[0]; + + strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */ + + if (ipa3_add_hdr(hdr)) { + IPAWANERR("fail to add IPA_A7_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + + if (hdr_entry->status) { + IPAWANERR("fail to add IPA_A7_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + rmnet_ipa3_ctx->qmap_hdr_hdl = hdr_entry->hdr_hdl; + + ret = 0; +bail: + kfree(hdr); + return ret; +} + +static void ipa3_del_a7_qmap_hdr(void) +{ + struct ipa_ioc_del_hdr *del_hdr; + struct ipa_hdr_del *hdl_entry; + u32 pyld_sz; + int ret; + + pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 * + sizeof(struct ipa_hdr_del); + del_hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!del_hdr) { + IPAWANERR("fail to alloc exception hdr_del\n"); + return; + } + + del_hdr->commit = 1; + del_hdr->num_hdls = 1; + hdl_entry = &del_hdr->hdl[0]; + hdl_entry->hdl = rmnet_ipa3_ctx->qmap_hdr_hdl; + + ret = ipa3_del_hdr(del_hdr); + if (ret || hdl_entry->status) + IPAWANERR("ipa3_del_hdr failed\n"); + else + IPAWANDBG("hdrs deletion done\n"); + + rmnet_ipa3_ctx->qmap_hdr_hdl = 0; + kfree(del_hdr); +} + +static void ipa3_del_qmap_hdr(uint32_t hdr_hdl) +{ + struct ipa_ioc_del_hdr *del_hdr; + struct ipa_hdr_del *hdl_entry; + u32 pyld_sz; + int ret; + + if (hdr_hdl == 0) { + IPAWANERR("Invalid hdr_hdl provided\n"); + return; + } + + pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 * + sizeof(struct ipa_hdr_del); + del_hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!del_hdr) { + IPAWANERR("fail to alloc exception hdr_del\n"); + return; + } + + del_hdr->commit = 1; + del_hdr->num_hdls = 1; + hdl_entry = &del_hdr->hdl[0]; + hdl_entry->hdl = hdr_hdl; + + ret = ipa3_del_hdr(del_hdr); + if (ret || hdl_entry->status) + IPAWANERR("ipa3_del_hdr failed\n"); + else + IPAWANDBG("header deletion done\n"); + + rmnet_ipa3_ctx->qmap_hdr_hdl = 0; + kfree(del_hdr); +} + +static void ipa3_del_mux_qmap_hdrs(void) +{ + int index; + + for (index = 0; index < rmnet_ipa3_ctx->rmnet_index; index++) { + ipa3_del_qmap_hdr(rmnet_ipa3_ctx->mux_channel[index].hdr_hdl); + rmnet_ipa3_ctx->mux_channel[index].hdr_hdl = 0; + } +} + +static int ipa3_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl) +{ + struct ipa_ioc_add_hdr *hdr; + struct ipa_hdr_add *hdr_entry; + char hdr_name[IPA_RESOURCE_NAME_MAX]; + u32 pyld_sz; + int ret; + + pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 * + sizeof(struct ipa_hdr_add); + hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + hdr->num_hdrs = 1; + hdr->commit = 1; + hdr_entry = &hdr->hdr[0]; + + snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d", + A2_MUX_HDR_NAME_V4_PREF, + mux_id); + strlcpy(hdr_entry->name, hdr_name, + IPA_RESOURCE_NAME_MAX); + + hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */ + hdr_entry->hdr[1] = (uint8_t) mux_id; + IPAWANDBG("header (%s) with mux-id: (%d)\n", + hdr_name, + hdr_entry->hdr[1]); + if (ipa3_add_hdr(hdr)) { + IPAWANERR("fail to add IPA_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + + if (hdr_entry->status) { + IPAWANERR("fail to add IPA_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + + ret = 0; + *hdr_hdl = hdr_entry->hdr_hdl; +bail: + kfree(hdr); + return ret; +} + +/** + * ipa3_setup_dflt_wan_rt_tables() - Setup default wan routing tables + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate memory + * -EPERM: failed to add the tables + */ +static int ipa3_setup_dflt_wan_rt_tables(void) +{ + struct ipa_ioc_add_rt_rule *rt_rule; + struct ipa_rt_rule_add *rt_rule_entry; + + rt_rule = + kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 * + sizeof(struct ipa_rt_rule_add), GFP_KERNEL); + if (!rt_rule) + return -ENOMEM; + + /* setup a default v4 route to point to Apps */ + rt_rule->num_rules = 1; + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v4; + strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME, + IPA_RESOURCE_NAME_MAX); + + rt_rule_entry = &rt_rule->rules[0]; + rt_rule_entry->at_rear = 1; + rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS; + rt_rule_entry->rule.hdr_hdl = rmnet_ipa3_ctx->qmap_hdr_hdl; + + if (ipa3_add_rt_rule(rt_rule)) { + IPAWANERR("fail to add dflt_wan v4 rule\n"); + kfree(rt_rule); + return -EPERM; + } + + IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl; + + /* setup a default v6 route to point to A5 */ + rt_rule->ip = IPA_IP_v6; + if (ipa3_add_rt_rule(rt_rule)) { + IPAWANERR("fail to add dflt_wan v6 rule\n"); + kfree(rt_rule); + return -EPERM; + } + IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl; + + kfree(rt_rule); + return 0; +} + +static void ipa3_del_dflt_wan_rt_tables(void) +{ + struct ipa_ioc_del_rt_rule *rt_rule; + struct ipa_rt_rule_del *rt_rule_entry; + int len; + + len = sizeof(struct ipa_ioc_del_rt_rule) + 1 * + sizeof(struct ipa_rt_rule_del); + rt_rule = kzalloc(len, GFP_KERNEL); + if (!rt_rule) + return; + + memset(rt_rule, 0, len); + rt_rule->commit = 1; + rt_rule->num_hdls = 1; + rt_rule->ip = IPA_IP_v4; + + rt_rule_entry = &rt_rule->hdl[0]; + rt_rule_entry->status = -1; + rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl; + + IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n", + rt_rule_entry->hdl, IPA_IP_v4); + if (ipa3_del_rt_rule(rt_rule) || + (rt_rule_entry->status)) { + IPAWANERR("Routing rule deletion failed\n"); + } + + rt_rule->ip = IPA_IP_v6; + rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl; + IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n", + rt_rule_entry->hdl, IPA_IP_v6); + if (ipa3_del_rt_rule(rt_rule) || + (rt_rule_entry->status)) { + IPAWANERR("Routing rule deletion failed\n"); + } + + kfree(rt_rule); +} + +static void ipa3_copy_qmi_flt_rule_ex( + struct ipa_ioc_ext_intf_prop *q6_ul_flt_rule_ptr, + struct ipa_filter_spec_ex_type_v01 *flt_spec_ptr) +{ + int j; + struct ipa_ipfltr_range_eq_16 *q6_ul_filter_nat_ptr; + struct ipa_ipfltr_range_eq_16_type_v01 *filter_spec_nat_ptr; + + q6_ul_flt_rule_ptr->ip = flt_spec_ptr->ip_type; + q6_ul_flt_rule_ptr->action = flt_spec_ptr->filter_action; + if (flt_spec_ptr->is_routing_table_index_valid == true) + q6_ul_flt_rule_ptr->rt_tbl_idx = + flt_spec_ptr->route_table_index; + if (flt_spec_ptr->is_mux_id_valid == true) + q6_ul_flt_rule_ptr->mux_id = + flt_spec_ptr->mux_id; + q6_ul_flt_rule_ptr->rule_id = + flt_spec_ptr->rule_id; + q6_ul_flt_rule_ptr->is_rule_hashable = + flt_spec_ptr->is_rule_hashable; + q6_ul_flt_rule_ptr->eq_attrib.rule_eq_bitmap = + flt_spec_ptr->filter_rule.rule_eq_bitmap; + q6_ul_flt_rule_ptr->eq_attrib.tos_eq_present = + flt_spec_ptr->filter_rule.tos_eq_present; + q6_ul_flt_rule_ptr->eq_attrib.tos_eq = + flt_spec_ptr->filter_rule.tos_eq; + q6_ul_flt_rule_ptr->eq_attrib.protocol_eq_present = + flt_spec_ptr->filter_rule.protocol_eq_present; + q6_ul_flt_rule_ptr->eq_attrib.protocol_eq = + flt_spec_ptr->filter_rule.protocol_eq; + q6_ul_flt_rule_ptr->eq_attrib.num_ihl_offset_range_16 = + flt_spec_ptr->filter_rule.num_ihl_offset_range_16; + + for (j = 0; + j < q6_ul_flt_rule_ptr->eq_attrib.num_ihl_offset_range_16; + j++) { + q6_ul_filter_nat_ptr = + &q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_range_16[j]; + filter_spec_nat_ptr = + &flt_spec_ptr->filter_rule.ihl_offset_range_16[j]; + q6_ul_filter_nat_ptr->offset = + filter_spec_nat_ptr->offset; + q6_ul_filter_nat_ptr->range_low = + filter_spec_nat_ptr->range_low; + q6_ul_filter_nat_ptr->range_high = + filter_spec_nat_ptr->range_high; + } + q6_ul_flt_rule_ptr->eq_attrib.num_offset_meq_32 = + flt_spec_ptr->filter_rule.num_offset_meq_32; + for (j = 0; + j < q6_ul_flt_rule_ptr->eq_attrib.num_offset_meq_32; + j++) { + q6_ul_flt_rule_ptr->eq_attrib.offset_meq_32[j].offset = + flt_spec_ptr->filter_rule.offset_meq_32[j].offset; + q6_ul_flt_rule_ptr->eq_attrib.offset_meq_32[j].mask = + flt_spec_ptr->filter_rule.offset_meq_32[j].mask; + q6_ul_flt_rule_ptr->eq_attrib.offset_meq_32[j].value = + flt_spec_ptr->filter_rule.offset_meq_32[j].value; + } + + q6_ul_flt_rule_ptr->eq_attrib.tc_eq_present = + flt_spec_ptr->filter_rule.tc_eq_present; + q6_ul_flt_rule_ptr->eq_attrib.tc_eq = + flt_spec_ptr->filter_rule.tc_eq; + q6_ul_flt_rule_ptr->eq_attrib.fl_eq_present = + flt_spec_ptr->filter_rule.flow_eq_present; + q6_ul_flt_rule_ptr->eq_attrib.fl_eq = + flt_spec_ptr->filter_rule.flow_eq; + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_eq_16_present = + flt_spec_ptr->filter_rule.ihl_offset_eq_16_present; + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_eq_16.offset = + flt_spec_ptr->filter_rule.ihl_offset_eq_16.offset; + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_eq_16.value = + flt_spec_ptr->filter_rule.ihl_offset_eq_16.value; + + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_eq_32_present = + flt_spec_ptr->filter_rule.ihl_offset_eq_32_present; + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_eq_32.offset = + flt_spec_ptr->filter_rule.ihl_offset_eq_32.offset; + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_eq_32.value = + flt_spec_ptr->filter_rule.ihl_offset_eq_32.value; + + q6_ul_flt_rule_ptr->eq_attrib.num_ihl_offset_meq_32 = + flt_spec_ptr->filter_rule.num_ihl_offset_meq_32; + for (j = 0; + j < q6_ul_flt_rule_ptr->eq_attrib.num_ihl_offset_meq_32; + j++) { + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_meq_32[j].offset = + flt_spec_ptr->filter_rule.ihl_offset_meq_32[j].offset; + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_meq_32[j].mask = + flt_spec_ptr->filter_rule.ihl_offset_meq_32[j].mask; + q6_ul_flt_rule_ptr->eq_attrib.ihl_offset_meq_32[j].value = + flt_spec_ptr->filter_rule.ihl_offset_meq_32[j].value; + } + q6_ul_flt_rule_ptr->eq_attrib.num_offset_meq_128 = + flt_spec_ptr->filter_rule.num_offset_meq_128; + for (j = 0; + j < q6_ul_flt_rule_ptr->eq_attrib.num_offset_meq_128; + j++) { + q6_ul_flt_rule_ptr->eq_attrib.offset_meq_128[j].offset = + flt_spec_ptr->filter_rule.offset_meq_128[j].offset; + memcpy(q6_ul_flt_rule_ptr->eq_attrib.offset_meq_128[j].mask, + flt_spec_ptr->filter_rule.offset_meq_128[j].mask, 16); + memcpy(q6_ul_flt_rule_ptr->eq_attrib.offset_meq_128[j].value, + flt_spec_ptr->filter_rule.offset_meq_128[j].value, 16); + } + + q6_ul_flt_rule_ptr->eq_attrib.metadata_meq32_present = + flt_spec_ptr->filter_rule.metadata_meq32_present; + q6_ul_flt_rule_ptr->eq_attrib.metadata_meq32.offset = + flt_spec_ptr->filter_rule.metadata_meq32.offset; + q6_ul_flt_rule_ptr->eq_attrib.metadata_meq32.mask = + flt_spec_ptr->filter_rule.metadata_meq32.mask; + q6_ul_flt_rule_ptr->eq_attrib.metadata_meq32.value = + flt_spec_ptr->filter_rule.metadata_meq32.value; + q6_ul_flt_rule_ptr->eq_attrib.ipv4_frag_eq_present = + flt_spec_ptr->filter_rule.ipv4_frag_eq_present; +} + + +int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01 + *rule_req) +{ + int i; + + if (rule_req->filter_spec_ex_list_valid == true) { + rmnet_ipa3_ctx->num_q6_rules = + rule_req->filter_spec_ex_list_len; + IPAWANDBG("Received (%d) install_flt_req\n", + rmnet_ipa3_ctx->num_q6_rules); + } else { + rmnet_ipa3_ctx->num_q6_rules = 0; + IPAWANERR("got no UL rules from modem\n"); + return -EINVAL; + } + + /* copy UL filter rules from Modem*/ + for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) { + /* check if rules overside the cache*/ + if (i == MAX_NUM_Q6_RULE) { + IPAWANERR("Reaching (%d) max cache ", + MAX_NUM_Q6_RULE); + IPAWANERR(" however total (%d)\n", + rmnet_ipa3_ctx->num_q6_rules); + goto failure; + } + ipa3_copy_qmi_flt_rule_ex(&ipa3_qmi_ctx->q6_ul_filter_rule[i], + &rule_req->filter_spec_ex_list[i]); + } + + if (rule_req->xlat_filter_indices_list_valid) { + if (rule_req->xlat_filter_indices_list_len > + rmnet_ipa3_ctx->num_q6_rules) { + IPAWANERR("Number of xlat indices is not valid: %d\n", + rule_req->xlat_filter_indices_list_len); + goto failure; + } + IPAWANDBG("Receive %d XLAT indices: ", + rule_req->xlat_filter_indices_list_len); + for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) + IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]); + IPAWANDBG("\n"); + + for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) { + if (rule_req->xlat_filter_indices_list[i] + >= rmnet_ipa3_ctx->num_q6_rules) { + IPAWANERR("Xlat rule idx is wrong: %d\n", + rule_req->xlat_filter_indices_list[i]); + goto failure; + } else { + ipa3_qmi_ctx->q6_ul_filter_rule + [rule_req->xlat_filter_indices_list[i]] + .is_xlat_rule = 1; + IPAWANDBG("Rule %d is xlat rule\n", + rule_req->xlat_filter_indices_list[i]); + } + } + } + goto success; + +failure: + rmnet_ipa3_ctx->num_q6_rules = 0; + memset(ipa3_qmi_ctx->q6_ul_filter_rule, 0, + sizeof(ipa3_qmi_ctx->q6_ul_filter_rule)); + return -EINVAL; + +success: + return 0; +} + +static int ipa3_wwan_add_ul_flt_rule_to_ipa(void) +{ + u32 pyld_sz; + int i, retval = 0; + struct ipa_ioc_add_flt_rule *param; + struct ipa_flt_rule_add flt_rule_entry; + struct ipa_fltr_installed_notif_req_msg_v01 *req; + + pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) + + sizeof(struct ipa_flt_rule_add); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) + return -ENOMEM; + + req = (struct ipa_fltr_installed_notif_req_msg_v01 *) + kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01), + GFP_KERNEL); + if (!req) { + kfree(param); + return -ENOMEM; + } + + param->commit = 1; + param->ep = IPA_CLIENT_APPS_WAN_PROD; + param->global = false; + param->num_rules = (uint8_t)1; + + for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) { + param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip; + memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add)); + flt_rule_entry.at_rear = true; + flt_rule_entry.rule.action = + ipa3_qmi_ctx->q6_ul_filter_rule[i].action; + flt_rule_entry.rule.rt_tbl_idx + = ipa3_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx; + flt_rule_entry.rule.retain_hdr = true; + flt_rule_entry.rule.hashable = + ipa3_qmi_ctx->q6_ul_filter_rule[i].is_rule_hashable; + flt_rule_entry.rule.rule_id = + ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id; + + /* debug rt-hdl*/ + IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n", + i, flt_rule_entry.rule.rt_tbl_idx); + flt_rule_entry.rule.eq_attrib_type = true; + memcpy(&(flt_rule_entry.rule.eq_attrib), + &ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib, + sizeof(struct ipa_ipfltri_rule_eq)); + memcpy(&(param->rules[0]), &flt_rule_entry, + sizeof(struct ipa_flt_rule_add)); + if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) { + retval = -EFAULT; + IPAWANERR("add A7 UL filter rule(%d) failed\n", i); + } else { + /* store the rule handler */ + ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i] = + param->rules[0].flt_rule_hdl; + } + } + + /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/ + req->source_pipe_index = + ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD); + if (req->source_pipe_index == IPA_EP_NOT_ALLOCATED) { + IPAWANERR("ep mapping failed\n"); + retval = -EFAULT; + } + + req->install_status = QMI_RESULT_SUCCESS_V01; + req->rule_id_valid = 1; + req->rule_id_len = rmnet_ipa3_ctx->num_q6_rules; + for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) { + req->rule_id[i] = + ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id; + } + if (ipa3_qmi_filter_notify_send(req)) { + IPAWANDBG("add filter rule index on A7-RX failed\n"); + retval = -EFAULT; + } + rmnet_ipa3_ctx->old_num_q6_rules = rmnet_ipa3_ctx->num_q6_rules; + IPAWANDBG("add (%d) filter rule index on A7-RX\n", + rmnet_ipa3_ctx->old_num_q6_rules); + kfree(param); + kfree(req); + return retval; +} + +static int ipa3_wwan_del_ul_flt_rule_to_ipa(void) +{ + u32 pyld_sz; + int i, retval = 0; + struct ipa_ioc_del_flt_rule *param; + struct ipa_flt_rule_del flt_rule_entry; + + pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) + + sizeof(struct ipa_flt_rule_del); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) + return -ENOMEM; + + + param->commit = 1; + param->num_hdls = (uint8_t) 1; + + for (i = 0; i < rmnet_ipa3_ctx->old_num_q6_rules; i++) { + param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip; + memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del)); + flt_rule_entry.hdl = ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i]; + /* debug rt-hdl*/ + IPAWANDBG("delete-IPA rule index(%d)\n", i); + memcpy(&(param->hdl[0]), &flt_rule_entry, + sizeof(struct ipa_flt_rule_del)); + if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) { + IPAWANERR("del A7 UL filter rule(%d) failed\n", i); + kfree(param); + return -EFAULT; + } + } + + /* set UL filter-rule add-indication */ + rmnet_ipa3_ctx->a7_ul_flt_set = false; + rmnet_ipa3_ctx->old_num_q6_rules = 0; + + kfree(param); + return retval; +} + +static int ipa3_find_mux_channel_index(uint32_t mux_id) +{ + int i; + + for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) { + if (mux_id == rmnet_ipa3_ctx->mux_channel[i].mux_id) + return i; + } + return MAX_NUM_OF_MUX_CHANNEL; +} + +static int find_vchannel_name_index(const char *vchannel_name) +{ + int i; + + for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) { + if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name, + vchannel_name) == 0) + return i; + } + return MAX_NUM_OF_MUX_CHANNEL; +} + +static enum ipa_upstream_type find_upstream_type(const char *upstreamIface) +{ + int i; + + for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) { + if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name, + upstreamIface) == 0) + return IPA_UPSTEAM_MODEM; + } + + if (strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0) + return IPA_UPSTEAM_WLAN; + else + return MAX_NUM_OF_MUX_CHANNEL; +} + +static int ipa3_wwan_register_to_ipa(int index) +{ + struct ipa_tx_intf tx_properties = {0}; + struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} }; + struct ipa_ioc_tx_intf_prop *tx_ipv4_property; + struct ipa_ioc_tx_intf_prop *tx_ipv6_property; + struct ipa_rx_intf rx_properties = {0}; + struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} }; + struct ipa_ioc_rx_intf_prop *rx_ipv4_property; + struct ipa_ioc_rx_intf_prop *rx_ipv6_property; + struct ipa_ext_intf ext_properties = {0}; + struct ipa_ioc_ext_intf_prop *ext_ioc_properties; + u32 pyld_sz; + int ret = 0, i; + + IPAWANDBG("index(%d) device[%s]:\n", index, + rmnet_ipa3_ctx->mux_channel[index].vchannel_name); + if (!rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set) { + ret = ipa3_add_qmap_hdr( + rmnet_ipa3_ctx->mux_channel[index].mux_id, + &rmnet_ipa3_ctx->mux_channel[index].hdr_hdl); + if (ret) { + IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index); + return ret; + } + rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set = true; + } + tx_properties.prop = tx_ioc_properties; + tx_ipv4_property = &tx_properties.prop[0]; + tx_ipv4_property->ip = IPA_IP_v4; + tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS; + snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d", + A2_MUX_HDR_NAME_V4_PREF, + rmnet_ipa3_ctx->mux_channel[index].mux_id); + tx_ipv6_property = &tx_properties.prop[1]; + tx_ipv6_property->ip = IPA_IP_v6; + tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS; + /* no need use A2_MUX_HDR_NAME_V6_PREF, same header */ + snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d", + A2_MUX_HDR_NAME_V4_PREF, + rmnet_ipa3_ctx->mux_channel[index].mux_id); + tx_properties.num_props = 2; + + rx_properties.prop = rx_ioc_properties; + rx_ipv4_property = &rx_properties.prop[0]; + rx_ipv4_property->ip = IPA_IP_v4; + rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_ipv4_property->attrib.meta_data = + rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT; + rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK; + rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_WAN_PROD; + rx_ipv6_property = &rx_properties.prop[1]; + rx_ipv6_property->ip = IPA_IP_v6; + rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_ipv6_property->attrib.meta_data = + rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT; + rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK; + rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_WAN_PROD; + rx_properties.num_props = 2; + + pyld_sz = rmnet_ipa3_ctx->num_q6_rules * + sizeof(struct ipa_ioc_ext_intf_prop); + ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL); + if (!ext_ioc_properties) + return -ENOMEM; + + + ext_properties.prop = ext_ioc_properties; + ext_properties.excp_pipe_valid = true; + ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS; + ext_properties.num_props = rmnet_ipa3_ctx->num_q6_rules; + for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) { + memcpy(&(ext_properties.prop[i]), + &(ipa3_qmi_ctx->q6_ul_filter_rule[i]), + sizeof(struct ipa_ioc_ext_intf_prop)); + ext_properties.prop[i].mux_id = + rmnet_ipa3_ctx->mux_channel[index].mux_id; + IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i, + ext_properties.prop[i].ip, + ext_properties.prop[i].rt_tbl_idx); + IPAWANDBG("action: %d mux:%d\n", + ext_properties.prop[i].action, + ext_properties.prop[i].mux_id); + } + ret = ipa3_register_intf_ext( + rmnet_ipa3_ctx->mux_channel[index].vchannel_name, + &tx_properties, + &rx_properties, + &ext_properties); + if (ret) { + IPAWANERR("[%s]:ipa3_register_intf failed %d\n", + rmnet_ipa3_ctx->mux_channel[index].vchannel_name, ret); + goto fail; + } + rmnet_ipa3_ctx->mux_channel[index].ul_flt_reg = true; +fail: + kfree(ext_ioc_properties); + return ret; +} + +static void ipa3_cleanup_deregister_intf(void) +{ + int i; + int ret; + int8_t *v_name; + + for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) { + v_name = rmnet_ipa3_ctx->mux_channel[i].vchannel_name; + + if (rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg) { + ret = ipa3_deregister_intf(v_name); + if (ret < 0) { + IPAWANERR("de-register device %s(%d) failed\n", + v_name, + i); + return; + } + IPAWANDBG("de-register device %s(%d) success\n", + v_name, + i); + } + rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = false; + } +} + +int ipa3_wwan_update_mux_channel_prop(void) +{ + int ret = 0, i; + /* install UL filter rules */ + if (rmnet_ipa3_ctx->egress_set) { + if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false) { + IPAWANDBG("setup UL filter rules\n"); + if (rmnet_ipa3_ctx->a7_ul_flt_set) { + IPAWANDBG("del previous UL filter rules\n"); + /* delete rule hdlers */ + ret = ipa3_wwan_del_ul_flt_rule_to_ipa(); + if (ret) { + IPAWANERR("failed to del old rules\n"); + return -EINVAL; + } + IPAWANDBG("deleted old UL rules\n"); + } + ret = ipa3_wwan_add_ul_flt_rule_to_ipa(); + } + if (ret) + IPAWANERR("failed to install UL rules\n"); + else + rmnet_ipa3_ctx->a7_ul_flt_set = true; + } + /* update Tx/Rx/Ext property */ + IPAWANDBG("update Tx/Rx/Ext property in IPA\n"); + if (rmnet_ipa3_ctx->rmnet_index == 0) { + IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n"); + return ret; + } + + ipa3_cleanup_deregister_intf(); + + for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) { + ret = ipa3_wwan_register_to_ipa(i); + if (ret < 0) { + IPAWANERR("failed to re-regist %s, mux %d, index %d\n", + rmnet_ipa3_ctx->mux_channel[i].vchannel_name, + rmnet_ipa3_ctx->mux_channel[i].mux_id, + i); + return -ENODEV; + } + IPAWANERR("dev(%s) has registered to IPA\n", + rmnet_ipa3_ctx->mux_channel[i].vchannel_name); + rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = true; + } + return ret; +} + +#ifdef INIT_COMPLETION +#define reinit_completion(x) INIT_COMPLETION(*(x)) +#endif /* INIT_COMPLETION */ + +static int __ipa_wwan_open(struct net_device *dev) +{ + struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev); + + IPAWANDBG("[%s] __wwan_open()\n", dev->name); + if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE) + reinit_completion(&wwan_ptr->resource_granted_completion); + wwan_ptr->device_status = WWAN_DEVICE_ACTIVE; + + if (ipa3_rmnet_res.ipa_napi_enable) + napi_enable(&(wwan_ptr->napi)); + return 0; +} + +/** + * wwan_open() - Opens the wwan network interface. Opens logical + * channel on A2 MUX driver and starts the network stack queue + * + * @dev: network device + * + * Return codes: + * 0: success + * -ENODEV: Error while opening logical channel on A2 MUX driver + */ +static int ipa3_wwan_open(struct net_device *dev) +{ + int rc = 0; + + IPAWANDBG("[%s] wwan_open()\n", dev->name); + rc = __ipa_wwan_open(dev); + if (rc == 0) + netif_start_queue(dev); + return rc; +} + +static int __ipa_wwan_close(struct net_device *dev) +{ + struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev); + int rc = 0; + + if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) { + wwan_ptr->device_status = WWAN_DEVICE_INACTIVE; + /* do not close wwan port once up, this causes + * remote side to hang if tried to open again + */ + reinit_completion(&wwan_ptr->resource_granted_completion); + rc = ipa3_deregister_intf(dev->name); + if (rc) { + IPAWANERR("[%s]: ipa3_deregister_intf failed %d\n", + dev->name, rc); + return rc; + } + return rc; + } else { + return -EBADF; + } +} + +/** + * ipa3_wwan_stop() - Stops the wwan network interface. Closes + * logical channel on A2 MUX driver and stops the network stack + * queue + * + * @dev: network device + * + * Return codes: + * 0: success + * -ENODEV: Error while opening logical channel on A2 MUX driver + */ +static int ipa3_wwan_stop(struct net_device *dev) +{ + IPAWANDBG("[%s]\n", dev->name); + __ipa_wwan_close(dev); + netif_stop_queue(dev); + return 0; +} + +static int ipa3_wwan_change_mtu(struct net_device *dev, int new_mtu) +{ + if (0 > new_mtu || WWAN_DATA_LEN < new_mtu) + return -EINVAL; + IPAWANDBG("[%s] MTU change: old=%d new=%d\n", + dev->name, dev->mtu, new_mtu); + dev->mtu = new_mtu; + return 0; +} + +/** + * ipa3_wwan_xmit() - Transmits an skb. + * + * @skb: skb to be transmitted + * @dev: network device + * + * Return codes: + * 0: success + * NETDEV_TX_BUSY: Error while transmitting the skb. Try again + * later + * -EFAULT: Error while transmitting the skb + */ +static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev) +{ + int ret = 0; + bool qmap_check; + struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev); + struct ipa_tx_meta meta; + + if (skb->protocol != htons(ETH_P_MAP)) { + IPAWANDBG_LOW + ("SW filtering out none QMAP packet received from %s", + current->comm); + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + qmap_check = RMNET_MAP_GET_CD_BIT(skb); + if (netif_queue_stopped(dev)) { + if (qmap_check && + atomic_read(&wwan_ptr->outstanding_pkts) < + wwan_ptr->outstanding_high_ctl) { + pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name); + goto send; + } else { + pr_err("[%s]fatal: %s stopped\n", dev->name, __func__); + return NETDEV_TX_BUSY; + } + } + + /* checking High WM hit */ + if (atomic_read(&wwan_ptr->outstanding_pkts) >= + wwan_ptr->outstanding_high) { + if (!qmap_check) { + IPAWANDBG_LOW("pending(%d)/(%d)- stop(%d)\n", + atomic_read(&wwan_ptr->outstanding_pkts), + wwan_ptr->outstanding_high, + netif_queue_stopped(dev)); + IPAWANDBG_LOW("qmap_chk(%d)\n", qmap_check); + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + } + +send: + /* IPA_RM checking start */ + ret = ipa_rm_inactivity_timer_request_resource( + IPA_RM_RESOURCE_WWAN_0_PROD); + if (ret == -EINPROGRESS) { + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + if (ret) { + pr_err("[%s] fatal: ipa rm timer request resource failed %d\n", + dev->name, ret); + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return -EFAULT; + } + /* IPA_RM checking end */ + + if (RMNET_MAP_GET_CD_BIT(skb)) { + memset(&meta, 0, sizeof(meta)); + meta.pkt_init_dst_ep_valid = true; + meta.pkt_init_dst_ep_remote = true; + meta.pkt_init_dst_ep = + ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_CONS); + ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_PROD, skb, &meta); + } else { + ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_PROD, skb, NULL); + } + + if (ret) { + ret = NETDEV_TX_BUSY; + goto out; + } + + atomic_inc(&wwan_ptr->outstanding_pkts); + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + ret = NETDEV_TX_OK; +out: + if (atomic_read(&wwan_ptr->outstanding_pkts) == 0) + ipa_rm_inactivity_timer_release_resource( + IPA_RM_RESOURCE_WWAN_0_PROD); + return ret; +} + +static void ipa3_wwan_tx_timeout(struct net_device *dev) +{ + IPAWANERR("[%s], data stall in UL\n", dev->name); +} + +/** + * apps_ipa_tx_complete_notify() - Rx notify + * + * @priv: driver context + * @evt: event type + * @data: data provided with event + * + * Check that the packet is the one we sent and release it + * This function will be called in defered context in IPA wq. + */ +static void apps_ipa_tx_complete_notify(void *priv, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + struct net_device *dev = (struct net_device *)priv; + struct ipa3_wwan_private *wwan_ptr; + + if (dev != IPA_NETDEV()) { + IPAWANDBG("Received pre-SSR packet completion\n"); + dev_kfree_skb_any(skb); + return; + } + + if (evt != IPA_WRITE_DONE) { + IPAWANERR("unsupported evt on Tx callback, Drop the packet\n"); + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return; + } + + wwan_ptr = netdev_priv(dev); + atomic_dec(&wwan_ptr->outstanding_pkts); + __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0)); + if (!atomic_read(&rmnet_ipa3_ctx->is_ssr) && + netif_queue_stopped(wwan_ptr->net) && + atomic_read(&wwan_ptr->outstanding_pkts) < + (wwan_ptr->outstanding_low)) { + IPAWANDBG_LOW("Outstanding low (%d) - waking up queue\n", + wwan_ptr->outstanding_low); + netif_wake_queue(wwan_ptr->net); + } + + if (atomic_read(&wwan_ptr->outstanding_pkts) == 0) + ipa_rm_inactivity_timer_release_resource( + IPA_RM_RESOURCE_WWAN_0_PROD); + __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0)); + dev_kfree_skb_any(skb); +} + +/** + * apps_ipa_packet_receive_notify() - Rx notify + * + * @priv: driver context + * @evt: event type + * @data: data provided with event + * + * IPA will pass a packet to the Linux network stack with skb->data + */ +static void apps_ipa_packet_receive_notify(void *priv, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct net_device *dev = (struct net_device *)priv; + + if (evt == IPA_RECEIVE) { + struct sk_buff *skb = (struct sk_buff *)data; + int result; + unsigned int packet_len = skb->len; + + IPAWANDBG_LOW("Rx packet was received"); + skb->dev = IPA_NETDEV(); + skb->protocol = htons(ETH_P_MAP); + + if (ipa3_rmnet_res.ipa_napi_enable) { + trace_rmnet_ipa_netif_rcv_skb3(dev->stats.rx_packets); + result = netif_receive_skb(skb); + } else { + if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH + == 0) { + trace_rmnet_ipa_netifni3(dev->stats.rx_packets); + result = netif_rx_ni(skb); + } else { + trace_rmnet_ipa_netifrx3(dev->stats.rx_packets); + result = netif_rx(skb); + } + } + + if (result) { + pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n", + __func__, __LINE__); + dev->stats.rx_dropped++; + } + dev->stats.rx_packets++; + dev->stats.rx_bytes += packet_len; + } else if (evt == IPA_CLIENT_START_POLL) + ipa3_rmnet_rx_cb(priv); + else if (evt == IPA_CLIENT_COMP_NAPI) { + if (ipa3_rmnet_res.ipa_napi_enable) + napi_complete(&(rmnet_ipa3_ctx->wwan_priv->napi)); + } else + IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt); +} + +static int handle3_ingress_format(struct net_device *dev, + struct rmnet_ioctl_extended_s *in) +{ + int ret = 0; + struct ipa_sys_connect_params *ipa_wan_ep_cfg; + + IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n"); + ipa_wan_ep_cfg = &rmnet_ipa3_ctx->ipa_to_apps_ep_cfg; + if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM) + ipa_wan_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en = + IPA_ENABLE_CS_OFFLOAD_DL; + + if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) { + IPAWANDBG("get AGG size %d count %d\n", + in->u.ingress_format.agg_size, + in->u.ingress_format.agg_count); + + ret = ipa_disable_apps_wan_cons_deaggr( + in->u.ingress_format.agg_size, + in->u.ingress_format.agg_count); + + if (!ret) { + ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_byte_limit = + in->u.ingress_format.agg_size; + ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit = + in->u.ingress_format.agg_count; + } + } + + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 4; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 1; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2; + + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = true; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_little_endian = 0; + ipa_wan_ep_cfg->ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000; + + ipa_wan_ep_cfg->client = IPA_CLIENT_APPS_WAN_CONS; + ipa_wan_ep_cfg->notify = apps_ipa_packet_receive_notify; + ipa_wan_ep_cfg->priv = dev; + + ipa_wan_ep_cfg->napi_enabled = ipa3_rmnet_res.ipa_napi_enable; + ipa_wan_ep_cfg->desc_fifo_sz = + ipa3_rmnet_res.wan_rx_desc_size * IPA_FIFO_ELEMENT_SIZE; + + mutex_lock(&rmnet_ipa3_ctx->pipe_handle_guard); + + if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) { + IPAWANDBG("In SSR sequence/recovery\n"); + mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); + return -EFAULT; + } + ret = ipa3_setup_sys_pipe(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg, + &rmnet_ipa3_ctx->ipa3_to_apps_hdl); + + mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); + + if (ret) + IPAWANERR("failed to configure ingress\n"); + + return ret; +} + +/** + * handle3_egress_format() - Egress data format configuration + * + * Setup IPA egress system pipe and Configure: + * header handling, checksum, de-aggregation and fifo size + * + * @dev: network device + * @e: egress configuration + */ +static int handle3_egress_format(struct net_device *dev, + struct rmnet_ioctl_extended_s *e) +{ + int rc; + struct ipa_sys_connect_params *ipa_wan_ep_cfg; + + IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n"); + ipa_wan_ep_cfg = &rmnet_ipa3_ctx->apps_to_ipa_ep_cfg; + if ((e->u.data) & RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) { + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 8; + ipa_wan_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en = + IPA_ENABLE_CS_OFFLOAD_UL; + ipa_wan_ep_cfg->ipa_ep_cfg.cfg.cs_metadata_hdr_offset = 1; + } else { + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 4; + } + + if ((e->u.data) & RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION) { + IPAWANERR("WAN UL Aggregation not supported\n"); + WARN_ON(1); + return -EINVAL; + ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_DEAGGR; + ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr = IPA_QCMAP; + + ipa_wan_ep_cfg->ipa_ep_cfg.deaggr.packet_offset_valid = false; + + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2; + + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = + true; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = + IPA_HDR_PAD; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = + 2; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = + true; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = + 0; + ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_little_endian = + false; + } else { + IPAWANDBG("WAN UL Aggregation disabled\n"); + ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR; + } + + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1; + /* modem want offset at 0! */ + ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 0; + + ipa_wan_ep_cfg->ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_WAN_PROD; + ipa_wan_ep_cfg->ipa_ep_cfg.mode.mode = IPA_BASIC; + + ipa_wan_ep_cfg->client = IPA_CLIENT_APPS_WAN_PROD; + ipa_wan_ep_cfg->notify = apps_ipa_tx_complete_notify; + ipa_wan_ep_cfg->desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ; + ipa_wan_ep_cfg->priv = dev; + + mutex_lock(&rmnet_ipa3_ctx->pipe_handle_guard); + if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) { + IPAWANDBG("In SSR sequence/recovery\n"); + mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); + return -EFAULT; + } + rc = ipa3_setup_sys_pipe( + ipa_wan_ep_cfg, &rmnet_ipa3_ctx->apps_to_ipa3_hdl); + if (rc) { + IPAWANERR("failed to config egress endpoint\n"); + mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); + return rc; + } + mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); + + if (rmnet_ipa3_ctx->num_q6_rules != 0) { + /* already got Q6 UL filter rules*/ + if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false) + rc = ipa3_wwan_add_ul_flt_rule_to_ipa(); + if (rc) + IPAWANERR("install UL rules failed\n"); + else + rmnet_ipa3_ctx->a7_ul_flt_set = true; + } else { + /* wait Q6 UL filter rules*/ + IPAWANDBG("no UL-rules\n"); + } + rmnet_ipa3_ctx->egress_set = true; + + return rc; +} + +/** + * ipa3_wwan_ioctl() - I/O control for wwan network driver. + * + * @dev: network device + * @ifr: ignored + * @cmd: cmd to be excecuded. can be one of the following: + * IPA_WWAN_IOCTL_OPEN - Open the network interface + * IPA_WWAN_IOCTL_CLOSE - Close the network interface + * + * Return codes: + * 0: success + * NETDEV_TX_BUSY: Error while transmitting the skb. Try again + * later + * -EFAULT: Error while transmitting the skb + */ +static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + int rc = 0; + int mru = 1000, epid = 1, mux_index, len; + struct ipa_msg_meta msg_meta; + struct ipa_wan_msg *wan_msg = NULL; + struct rmnet_ioctl_extended_s ext_ioctl_data; + struct rmnet_ioctl_data_s ioctl_data; + struct ipa3_rmnet_mux_val *mux_channel; + int rmnet_index; + uint32_t mux_id; + int8_t *v_name; + struct mutex *mux_mutex_ptr; + + IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd); + switch (cmd) { + /* Set Ethernet protocol */ + case RMNET_IOCTL_SET_LLP_ETHERNET: + break; + /* Set RAWIP protocol */ + case RMNET_IOCTL_SET_LLP_IP: + break; + /* Get link protocol */ + case RMNET_IOCTL_GET_LLP: + ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + /* Set QoS header enabled */ + case RMNET_IOCTL_SET_QOS_ENABLE: + return -EINVAL; + /* Set QoS header disabled */ + case RMNET_IOCTL_SET_QOS_DISABLE: + break; + /* Get QoS header state */ + case RMNET_IOCTL_GET_QOS: + ioctl_data.u.operation_mode = RMNET_MODE_NONE; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + /* Get operation mode */ + case RMNET_IOCTL_GET_OPMODE: + ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + /* Open transport port */ + case RMNET_IOCTL_OPEN: + break; + /* Close transport port */ + case RMNET_IOCTL_CLOSE: + break; + /* Flow enable */ + case RMNET_IOCTL_FLOW_ENABLE: + IPAWANERR("RMNET_IOCTL_FLOW_ENABLE not supported\n"); + rc = -EFAULT; + break; + /* Flow disable */ + case RMNET_IOCTL_FLOW_DISABLE: + IPAWANERR("RMNET_IOCTL_FLOW_DISABLE not supported\n"); + rc = -EFAULT; + break; + /* Set flow handle */ + case RMNET_IOCTL_FLOW_SET_HNDL: + break; + + /* Extended IOCTLs */ + case RMNET_IOCTL_EXTENDED: + IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n"); + if (copy_from_user(&ext_ioctl_data, + (u8 *)ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s))) { + IPAWANERR("failed to copy extended ioctl data\n"); + rc = -EFAULT; + break; + } + switch (ext_ioctl_data.extended_ioctl) { + /* Get features */ + case RMNET_IOCTL_GET_SUPPORTED_FEATURES: + IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n"); + ext_ioctl_data.u.data = + (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL | + RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT | + RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT); + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &ext_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* Set MRU */ + case RMNET_IOCTL_SET_MRU: + mru = ext_ioctl_data.u.data; + IPAWANDBG("get MRU size %d\n", + ext_ioctl_data.u.data); + break; + /* Get MRU */ + case RMNET_IOCTL_GET_MRU: + ext_ioctl_data.u.data = mru; + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &ext_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* GET SG support */ + case RMNET_IOCTL_GET_SG_SUPPORT: + ext_ioctl_data.u.data = + ipa3_rmnet_res.ipa_advertise_sg_support; + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &ext_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* Get endpoint ID */ + case RMNET_IOCTL_GET_EPID: + IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n"); + ext_ioctl_data.u.data = epid; + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &ext_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + if (copy_from_user(&ext_ioctl_data, + (u8 *)ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s))) { + IPAWANERR("copy extended ioctl data failed\n"); + rc = -EFAULT; + break; + } + IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n", + ext_ioctl_data.u.data); + break; + /* Endpoint pair */ + case RMNET_IOCTL_GET_EP_PAIR: + IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n"); + ext_ioctl_data.u.ipa_ep_pair.consumer_pipe_num = + ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD); + ext_ioctl_data.u.ipa_ep_pair.producer_pipe_num = + ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &ext_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + if (copy_from_user(&ext_ioctl_data, + (u8 *)ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s))) { + IPAWANERR("copy extended ioctl data failed\n"); + rc = -EFAULT; + break; + } + IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n", + ext_ioctl_data.u.ipa_ep_pair.consumer_pipe_num, + ext_ioctl_data.u.ipa_ep_pair.producer_pipe_num); + break; + /* Get driver name */ + case RMNET_IOCTL_GET_DRIVER_NAME: + memcpy(&ext_ioctl_data.u.if_name, + IPA_NETDEV()->name, + sizeof(IFNAMSIZ)); + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &ext_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* Add MUX ID */ + case RMNET_IOCTL_ADD_MUX_CHANNEL: + mux_id = ext_ioctl_data.u.rmnet_mux_val.mux_id; + mux_index = ipa3_find_mux_channel_index( + ext_ioctl_data.u.rmnet_mux_val.mux_id); + if (mux_index < MAX_NUM_OF_MUX_CHANNEL) { + IPAWANDBG("already setup mux(%d)\n", mux_id); + return rc; + } + mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock); + if (rmnet_ipa3_ctx->rmnet_index + >= MAX_NUM_OF_MUX_CHANNEL) { + IPAWANERR("Exceed mux_channel limit(%d)\n", + rmnet_ipa3_ctx->rmnet_index); + mutex_unlock( + &rmnet_ipa3_ctx->add_mux_channel_lock); + return -EFAULT; + } + IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n", + ext_ioctl_data.u.rmnet_mux_val.mux_id, + ext_ioctl_data.u.rmnet_mux_val.vchannel_name); + /* cache the mux name and id */ + mux_channel = rmnet_ipa3_ctx->mux_channel; + rmnet_index = rmnet_ipa3_ctx->rmnet_index; + + mux_channel[rmnet_index].mux_id = + ext_ioctl_data.u.rmnet_mux_val.mux_id; + memcpy(mux_channel[rmnet_index].vchannel_name, + ext_ioctl_data.u.rmnet_mux_val.vchannel_name, + sizeof(mux_channel[rmnet_index] + .vchannel_name)); + mux_channel[rmnet_index].vchannel_name[ + IFNAMSIZ - 1] = '\0'; + + IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n", + mux_channel[rmnet_index].vchannel_name, + mux_channel[rmnet_index].mux_id, + rmnet_index); + /* check if UL filter rules coming*/ + v_name = + ext_ioctl_data.u.rmnet_mux_val.vchannel_name; + if (rmnet_ipa3_ctx->num_q6_rules != 0) { + mux_mutex_ptr = + &rmnet_ipa3_ctx->add_mux_channel_lock; + IPAWANERR("dev(%s) register to IPA\n", + v_name); + rc = ipa3_wwan_register_to_ipa( + rmnet_ipa3_ctx->rmnet_index); + if (rc < 0) { + IPAWANERR("device %s reg IPA failed\n", + v_name); + mutex_unlock(mux_mutex_ptr); + return -ENODEV; + } + mux_channel[rmnet_index].mux_channel_set = + true; + mux_channel[rmnet_index].ul_flt_reg = + true; + } else { + IPAWANDBG("dev(%s) haven't registered to IPA\n", + v_name); + mux_channel[rmnet_index].mux_channel_set = + true; + mux_channel[rmnet_index].ul_flt_reg = + false; + } + rmnet_ipa3_ctx->rmnet_index++; + mutex_unlock(&rmnet_ipa3_ctx->add_mux_channel_lock); + break; + case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT: + rc = handle3_egress_format(dev, &ext_ioctl_data); + break; + case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */ + rc = handle3_ingress_format(dev, &ext_ioctl_data); + break; + case RMNET_IOCTL_SET_XLAT_DEV_INFO: + wan_msg = kzalloc(sizeof(struct ipa_wan_msg), + GFP_KERNEL); + if (!wan_msg) + return -ENOMEM; + + len = sizeof(wan_msg->upstream_ifname) > + sizeof(ext_ioctl_data.u.if_name) ? + sizeof(ext_ioctl_data.u.if_name) : + sizeof(wan_msg->upstream_ifname); + strlcpy(wan_msg->upstream_ifname, + ext_ioctl_data.u.if_name, len); + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = WAN_XLAT_CONNECT; + msg_meta.msg_len = sizeof(struct ipa_wan_msg); + rc = ipa3_send_msg(&msg_meta, wan_msg, + ipa3_wwan_msg_free_cb); + if (rc) { + IPAWANERR("Failed to send XLAT_CONNECT msg\n"); + kfree(wan_msg); + } + break; + /* Get agg count */ + case RMNET_IOCTL_GET_AGGREGATION_COUNT: + break; + /* Set agg count */ + case RMNET_IOCTL_SET_AGGREGATION_COUNT: + break; + /* Get agg size */ + case RMNET_IOCTL_GET_AGGREGATION_SIZE: + break; + /* Set agg size */ + case RMNET_IOCTL_SET_AGGREGATION_SIZE: + break; + /* Do flow control */ + case RMNET_IOCTL_FLOW_CONTROL: + break; + /* For legacy use */ + case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL: + break; + /* Get HW/SW map */ + case RMNET_IOCTL_GET_HWSW_MAP: + break; + /* Set RX Headroom */ + case RMNET_IOCTL_SET_RX_HEADROOM: + break; + default: + IPAWANERR("[%s] unsupported extended cmd[%d]", + dev->name, + ext_ioctl_data.extended_ioctl); + rc = -EINVAL; + } + break; + default: + IPAWANERR("[%s] unsupported cmd[%d]", + dev->name, cmd); + rc = -EINVAL; + } + return rc; +} + +static const struct net_device_ops ipa3_wwan_ops_ip = { + .ndo_open = ipa3_wwan_open, + .ndo_stop = ipa3_wwan_stop, + .ndo_start_xmit = ipa3_wwan_xmit, + .ndo_tx_timeout = ipa3_wwan_tx_timeout, + .ndo_do_ioctl = ipa3_wwan_ioctl, + .ndo_change_mtu = ipa3_wwan_change_mtu, + .ndo_set_mac_address = 0, + .ndo_validate_addr = 0, +}; + +/** + * wwan_setup() - Setups the wwan network driver. + * + * @dev: network device + * + * Return codes: + * None + */ + +static void ipa3_wwan_setup(struct net_device *dev) +{ + dev->netdev_ops = &ipa3_wwan_ops_ip; + ether_setup(dev); + /* set this after calling ether_setup */ + dev->header_ops = 0; /* No header */ + dev->type = ARPHRD_RAWIP; + dev->hard_header_len = 0; + dev->mtu = WWAN_DATA_LEN; + dev->addr_len = 0; + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + dev->needed_headroom = HEADROOM_FOR_QMAP; + dev->needed_tailroom = TAILROOM; + dev->watchdog_timeo = 1000; +} + +/* IPA_RM related functions start*/ +static void ipa3_q6_prod_rm_request_resource(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_request, + ipa3_q6_prod_rm_request_resource); +static void ipa3_q6_prod_rm_release_resource(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_release, + ipa3_q6_prod_rm_release_resource); + +static void ipa3_q6_prod_rm_request_resource(struct work_struct *work) +{ + int ret = 0; + + ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD); + if (ret < 0 && ret != -EINPROGRESS) { + IPAWANERR("ipa_rm_request_resource failed %d\n", ret); + return; + } +} + +static int ipa3_q6_rm_request_resource(void) +{ + queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq, + &ipa3_q6_con_rm_request, 0); + return 0; +} + +static void ipa3_q6_prod_rm_release_resource(struct work_struct *work) +{ + int ret = 0; + + ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD); + if (ret < 0 && ret != -EINPROGRESS) { + IPAWANERR("ipa_rm_release_resource failed %d\n", ret); + return; + } +} + + +static int ipa3_q6_rm_release_resource(void) +{ + queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq, + &ipa3_q6_con_rm_release, 0); + return 0; +} + + +static void ipa3_q6_rm_notify_cb(void *user_data, + enum ipa_rm_event event, + unsigned long data) +{ + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + IPAWANDBG_LOW("Q6_PROD GRANTED CB\n"); + break; + case IPA_RM_RESOURCE_RELEASED: + IPAWANDBG_LOW("Q6_PROD RELEASED CB\n"); + break; + default: + return; + } +} +static int ipa3_q6_initialize_rm(void) +{ + struct ipa_rm_create_params create_params; + struct ipa_rm_perf_profile profile; + int result; + + /* Initialize IPA_RM workqueue */ + rmnet_ipa3_ctx->rm_q6_wq = create_singlethread_workqueue("clnt_req"); + if (!rmnet_ipa3_ctx->rm_q6_wq) + return -ENOMEM; + + memset(&create_params, 0, sizeof(create_params)); + create_params.name = IPA_RM_RESOURCE_Q6_PROD; + create_params.reg_params.notify_cb = &ipa3_q6_rm_notify_cb; + result = ipa_rm_create_resource(&create_params); + if (result) + goto create_rsrc_err1; + memset(&create_params, 0, sizeof(create_params)); + create_params.name = IPA_RM_RESOURCE_Q6_CONS; + create_params.release_resource = &ipa3_q6_rm_release_resource; + create_params.request_resource = &ipa3_q6_rm_request_resource; + result = ipa_rm_create_resource(&create_params); + if (result) + goto create_rsrc_err2; + /* add dependency*/ + result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (result) + goto add_dpnd_err; + /* setup Performance profile */ + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = 100; + result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD, + &profile); + if (result) + goto set_perf_err; + result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS, + &profile); + if (result) + goto set_perf_err; + return result; + +set_perf_err: + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_APPS_CONS); +add_dpnd_err: + result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); + if (result < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_CONS, result); +create_rsrc_err2: + result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); + if (result < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_PROD, result); +create_rsrc_err1: + destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq); + return result; +} + +void ipa3_q6_deinitialize_rm(void) +{ + int ret; + + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (ret < 0) + IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", + IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS, + ret); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); + if (ret < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_CONS, ret); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); + if (ret < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_PROD, ret); + + if (rmnet_ipa3_ctx->rm_q6_wq) + destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq); +} + +static void ipa3_wake_tx_queue(struct work_struct *work) +{ + if (IPA_NETDEV()) { + __netif_tx_lock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0)); + netif_wake_queue(IPA_NETDEV()); + __netif_tx_unlock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0)); + } +} + +/** + * ipa3_rm_resource_granted() - Called upon + * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped. + * + * @work: work object supplied ny workqueue + * + * Return codes: + * None + */ +static void ipa3_rm_resource_granted(void *dev) +{ + IPAWANDBG_LOW("Resource Granted - starting queue\n"); + schedule_work(&ipa3_tx_wakequeue_work); +} + +/** + * ipa3_rm_notify() - Callback function for RM events. Handles + * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events. + * IPA_RM_RESOURCE_GRANTED is handled in the context of shared + * workqueue. + * + * @dev: network device + * @event: IPA RM event + * @data: Additional data provided by IPA RM + * + * Return codes: + * None + */ +static void ipa3_rm_notify(void *dev, enum ipa_rm_event event, + unsigned long data) +{ + struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev); + + pr_debug("%s: event %d\n", __func__, event); + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) { + complete_all(&wwan_ptr->resource_granted_completion); + break; + } + ipa3_rm_resource_granted(dev); + break; + case IPA_RM_RESOURCE_RELEASED: + break; + default: + pr_err("%s: unknown event %d\n", __func__, event); + break; + } +} + +/* IPA_RM related functions end*/ + +static int ipa3_ssr_notifier_cb(struct notifier_block *this, + unsigned long code, + void *data); + +static struct notifier_block ipa3_ssr_notifier = { + .notifier_call = ipa3_ssr_notifier_cb, +}; + +static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev, + struct ipa3_rmnet_plat_drv_res *ipa_rmnet_drv_res) +{ + int result; + + ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ; + ipa_rmnet_drv_res->ipa_rmnet_ssr = + of_property_read_bool(pdev->dev.of_node, + "qcom,rmnet-ipa-ssr"); + pr_info("IPA SSR support = %s\n", + ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False"); + ipa_rmnet_drv_res->ipa_loaduC = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-loaduC"); + pr_info("IPA ipa-loaduC = %s\n", + ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False"); + + ipa_rmnet_drv_res->ipa_advertise_sg_support = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-advertise-sg-support"); + pr_info("IPA SG support = %s\n", + ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False"); + + ipa_rmnet_drv_res->ipa_napi_enable = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-napi-enable"); + pr_info("IPA Napi Enable = %s\n", + ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False"); + + /* Get IPA WAN RX desc fifo size */ + result = of_property_read_u32(pdev->dev.of_node, + "qcom,wan-rx-desc-size", + &ipa_rmnet_drv_res->wan_rx_desc_size); + if (result) + pr_info("using default for wan-rx-desc-size = %u\n", + ipa_rmnet_drv_res->wan_rx_desc_size); + else + IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n", + ipa_rmnet_drv_res->wan_rx_desc_size); + + return 0; +} + +struct ipa3_rmnet_context ipa3_rmnet_ctx; +static int ipa3_wwan_probe(struct platform_device *pdev); +struct platform_device *m_pdev; + +static void ipa3_delayed_probe(struct work_struct *work) +{ + (void)ipa3_wwan_probe(m_pdev); +} + +static DECLARE_WORK(ipa3_scheduled_probe, ipa3_delayed_probe); + +static void ipa3_ready_cb(void *user_data) +{ + struct platform_device *pdev = (struct platform_device *)(user_data); + + m_pdev = pdev; + + IPAWANDBG("IPA ready callback has been triggered\n"); + + schedule_work(&ipa3_scheduled_probe); +} + +/** + * ipa3_wwan_probe() - Initialized the module and registers as a + * network interface to the network stack + * + * Note: In case IPA driver hasn't initialized already, the probe function + * will return immediately after registering a callback to be invoked when + * IPA driver initialization is complete. + * + * Return codes: + * 0: success + * -ENOMEM: No memory available + * -EFAULT: Internal error + */ +static int ipa3_wwan_probe(struct platform_device *pdev) +{ + int ret, i; + struct net_device *dev; + struct ipa_rm_create_params ipa_rm_params; /* IPA_RM */ + struct ipa_rm_perf_profile profile; /* IPA_RM */ + + pr_info("rmnet_ipa3 started initialization\n"); + + if (!ipa3_is_ready()) { + IPAWANDBG("IPA driver not ready, registering callback\n"); + ret = ipa_register_ipa_ready_cb(ipa3_ready_cb, (void *)pdev); + + /* + * If we received -EEXIST, IPA has initialized. So we need + * to continue the probing process. + */ + if (ret != -EEXIST) { + if (ret) + IPAWANERR("IPA CB reg failed - %d\n", ret); + return ret; + } + } + + ret = get_ipa_rmnet_dts_configuration(pdev, &ipa3_rmnet_res); + ipa3_rmnet_ctx.ipa_rmnet_ssr = ipa3_rmnet_res.ipa_rmnet_ssr; + + ret = ipa3_init_q6_smem(); + if (ret) { + IPAWANERR("ipa3_init_q6_smem failed\n"); + return ret; + } + + /* initialize tx/rx endpoint setup */ + memset(&rmnet_ipa3_ctx->apps_to_ipa_ep_cfg, 0, + sizeof(struct ipa_sys_connect_params)); + memset(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg, 0, + sizeof(struct ipa_sys_connect_params)); + + /* initialize ex property setup */ + rmnet_ipa3_ctx->num_q6_rules = 0; + rmnet_ipa3_ctx->old_num_q6_rules = 0; + rmnet_ipa3_ctx->rmnet_index = 0; + rmnet_ipa3_ctx->egress_set = false; + rmnet_ipa3_ctx->a7_ul_flt_set = false; + for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) + memset(&rmnet_ipa3_ctx->mux_channel[i], 0, + sizeof(struct ipa3_rmnet_mux_val)); + + /* start A7 QMI service/client */ + if (ipa3_rmnet_res.ipa_loaduC) + /* Android platform loads uC */ + ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01); + else + /* LE platform not loads uC */ + ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01); + + /* construct default WAN RT tbl for IPACM */ + ret = ipa3_setup_a7_qmap_hdr(); + if (ret) + goto setup_a7_qmap_hdr_err; + ret = ipa3_setup_dflt_wan_rt_tables(); + if (ret) + goto setup_dflt_wan_rt_tables_err; + + if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) { + /* Start transport-driver fd ioctl for ipacm for first init */ + ret = ipa3_wan_ioctl_init(); + if (ret) + goto wan_ioctl_init_err; + } else { + /* Enable sending QMI messages after SSR */ + ipa3_wan_ioctl_enable_qmi_messages(); + } + + /* initialize wan-driver netdev */ + dev = alloc_netdev(sizeof(struct ipa3_wwan_private), + IPA_WWAN_DEV_NAME, + NET_NAME_UNKNOWN, + ipa3_wwan_setup); + if (!dev) { + IPAWANERR("no memory for netdev\n"); + ret = -ENOMEM; + goto alloc_netdev_err; + } + rmnet_ipa3_ctx->wwan_priv = netdev_priv(dev); + memset(rmnet_ipa3_ctx->wwan_priv, 0, + sizeof(*(rmnet_ipa3_ctx->wwan_priv))); + IPAWANDBG("wwan_ptr (private) = %pK", rmnet_ipa3_ctx->wwan_priv); + rmnet_ipa3_ctx->wwan_priv->net = dev; + rmnet_ipa3_ctx->wwan_priv->outstanding_high = DEFAULT_OUTSTANDING_HIGH; + rmnet_ipa3_ctx->wwan_priv->outstanding_low = DEFAULT_OUTSTANDING_LOW; + atomic_set(&rmnet_ipa3_ctx->wwan_priv->outstanding_pkts, 0); + spin_lock_init(&rmnet_ipa3_ctx->wwan_priv->lock); + init_completion( + &rmnet_ipa3_ctx->wwan_priv->resource_granted_completion); + + if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) { + /* IPA_RM configuration starts */ + ret = ipa3_q6_initialize_rm(); + if (ret) { + IPAWANERR("ipa3_q6_initialize_rm failed, ret: %d\n", + ret); + goto q6_init_err; + } + } + + memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params)); + ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD; + ipa_rm_params.reg_params.user_data = dev; + ipa_rm_params.reg_params.notify_cb = ipa3_rm_notify; + ret = ipa_rm_create_resource(&ipa_rm_params); + if (ret) { + IPAWANERR("unable to create resourse %d in IPA RM\n", + IPA_RM_RESOURCE_WWAN_0_PROD); + goto create_rsrc_err; + } + ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_INACTIVITY_TIMER); + if (ret) { + IPAWANERR("ipa rm timer init failed %d on resourse %d\n", + ret, IPA_RM_RESOURCE_WWAN_0_PROD); + goto timer_init_err; + } + /* add dependency */ + ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (ret) + goto add_dpnd_err; + /* setup Performance profile */ + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; + ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD, + &profile); + if (ret) + goto set_perf_err; + /* IPA_RM configuration ends */ + + /* Enable SG support in netdevice. */ + if (ipa3_rmnet_res.ipa_advertise_sg_support) + dev->hw_features |= NETIF_F_SG; + + if (ipa3_rmnet_res.ipa_napi_enable) + netif_napi_add(dev, &(rmnet_ipa3_ctx->wwan_priv->napi), + ipa3_rmnet_poll, NAPI_WEIGHT); + ret = register_netdev(dev); + if (ret) { + IPAWANERR("unable to register ipa_netdev %d rc=%d\n", + 0, ret); + goto set_perf_err; + } + + IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n", dev->name); + if (ret) { + IPAWANERR("default configuration failed rc=%d\n", + ret); + goto config_err; + } + atomic_set(&rmnet_ipa3_ctx->is_initialized, 1); + if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) { + /* offline charging mode */ + ipa3_proxy_clk_unvote(); + } + atomic_set(&rmnet_ipa3_ctx->is_ssr, 0); + + IPAWANERR("rmnet_ipa completed initialization\n"); + return 0; +config_err: + if (ipa3_rmnet_res.ipa_napi_enable) + netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi)); + unregister_netdev(dev); +set_perf_err: + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (ret) + IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS, + ret); +add_dpnd_err: + ret = ipa_rm_inactivity_timer_destroy( + IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */ + if (ret) + IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, ret); +timer_init_err: + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + if (ret) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, ret); +create_rsrc_err: + + if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) + ipa3_q6_deinitialize_rm(); + +q6_init_err: + free_netdev(dev); + rmnet_ipa3_ctx->wwan_priv = NULL; +alloc_netdev_err: + ipa3_wan_ioctl_deinit(); +wan_ioctl_init_err: + ipa3_del_dflt_wan_rt_tables(); +setup_dflt_wan_rt_tables_err: + ipa3_del_a7_qmap_hdr(); +setup_a7_qmap_hdr_err: + ipa3_qmi_service_exit(); + atomic_set(&rmnet_ipa3_ctx->is_ssr, 0); + return ret; +} + +static int ipa3_wwan_remove(struct platform_device *pdev) +{ + int ret; + + IPAWANINFO("rmnet_ipa started deinitialization\n"); + mutex_lock(&rmnet_ipa3_ctx->pipe_handle_guard); + ret = ipa3_teardown_sys_pipe(rmnet_ipa3_ctx->ipa3_to_apps_hdl); + if (ret < 0) + IPAWANERR("Failed to teardown IPA->APPS pipe\n"); + else + rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1; + ret = ipa3_teardown_sys_pipe(rmnet_ipa3_ctx->apps_to_ipa3_hdl); + if (ret < 0) + IPAWANERR("Failed to teardown APPS->IPA pipe\n"); + else + rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1; + if (ipa3_rmnet_res.ipa_napi_enable) + netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi)); + mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); + unregister_netdev(IPA_NETDEV()); + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (ret < 0) + IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS, + ret); + ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD); + if (ret < 0) + IPAWANERR( + "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, ret); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + if (ret < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, ret); + cancel_work_sync(&ipa3_tx_wakequeue_work); + cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work); + if (IPA_NETDEV()) + free_netdev(IPA_NETDEV()); + rmnet_ipa3_ctx->wwan_priv = NULL; + /* No need to remove wwan_ioctl during SSR */ + if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) + ipa3_wan_ioctl_deinit(); + ipa3_del_dflt_wan_rt_tables(); + ipa3_del_a7_qmap_hdr(); + ipa3_del_mux_qmap_hdrs(); + if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false) + ipa3_wwan_del_ul_flt_rule_to_ipa(); + ipa3_cleanup_deregister_intf(); + atomic_set(&rmnet_ipa3_ctx->is_initialized, 0); + IPAWANINFO("rmnet_ipa completed deinitialization\n"); + return 0; +} + +/** + * rmnet_ipa_ap_suspend() - suspend callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP suspend + * operation is invoked, usually by pressing a suspend button. + * + * Returns -EAGAIN to runtime_pm framework in case there are pending packets + * in the Tx queue. This will postpone the suspend operation until all the + * pending packets will be transmitted. + * + * In case there are no packets to send, releases the WWAN0_PROD entity. + * As an outcome, the number of IPA active clients should be decremented + * until IPA clocks can be gated. + */ +static int rmnet_ipa_ap_suspend(struct device *dev) +{ + struct net_device *netdev = IPA_NETDEV(); + struct ipa3_wwan_private *wwan_ptr; + int ret; + + IPAWANDBG("Enter...\n"); + + if (netdev == NULL) { + IPAWANERR("netdev is NULL.\n"); + ret = 0; + goto bail; + } + + netif_tx_lock_bh(netdev); + wwan_ptr = netdev_priv(netdev); + if (wwan_ptr == NULL) { + IPAWANERR("wwan_ptr is NULL.\n"); + ret = 0; + goto unlock_and_bail; + } + + /* Do not allow A7 to suspend in case there are outstanding packets */ + if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) { + IPAWANDBG("Outstanding packets, postponing AP suspend.\n"); + ret = -EAGAIN; + goto unlock_and_bail; + } + + /* Make sure that there is no Tx operation ongoing */ + netif_stop_queue(netdev); + ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + ret = 0; + +unlock_and_bail: + netif_tx_unlock_bh(netdev); +bail: + IPAWANDBG("Exit with %d\n", ret); + return ret; +} + +/** + * rmnet_ipa_ap_resume() - resume callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP resume + * operation is invoked. + * + * Enables the network interface queue and returns success to the + * runtime_pm framework. + */ +static int rmnet_ipa_ap_resume(struct device *dev) +{ + struct net_device *netdev = IPA_NETDEV(); + + IPAWANDBG("Enter...\n"); + if (netdev) + netif_wake_queue(netdev); + IPAWANDBG("Exit\n"); + + return 0; +} + +static void ipa_stop_polling_stats(void) +{ + cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work); + ipa3_rmnet_ctx.polling_interval = 0; +} + +static const struct of_device_id rmnet_ipa_dt_match[] = { + {.compatible = "qcom,rmnet-ipa3"}, + {}, +}; +MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match); + +static const struct dev_pm_ops rmnet_ipa_pm_ops = { + .suspend_noirq = rmnet_ipa_ap_suspend, + .resume_noirq = rmnet_ipa_ap_resume, +}; + +static struct platform_driver rmnet_ipa_driver = { + .driver = { + .name = "rmnet_ipa3", + .owner = THIS_MODULE, + .pm = &rmnet_ipa_pm_ops, + .of_match_table = rmnet_ipa_dt_match, + }, + .probe = ipa3_wwan_probe, + .remove = ipa3_wwan_remove, +}; + +/** + * rmnet_ipa_send_ssr_notification(bool ssr_done) - send SSR notification + * + * This function sends the SSR notification before modem shutdown and + * after_powerup from SSR framework, to user-space module + */ +static void rmnet_ipa_send_ssr_notification(bool ssr_done) +{ + struct ipa_msg_meta msg_meta; + int rc; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + if (ssr_done) + msg_meta.msg_type = IPA_SSR_AFTER_POWERUP; + else + msg_meta.msg_type = IPA_SSR_BEFORE_SHUTDOWN; + rc = ipa_send_msg(&msg_meta, NULL, NULL); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + return; + } +} + +static int ipa3_ssr_notifier_cb(struct notifier_block *this, + unsigned long code, + void *data) +{ + if (!ipa3_rmnet_ctx.ipa_rmnet_ssr) + return NOTIFY_DONE; + + switch (code) { + case SUBSYS_BEFORE_SHUTDOWN: + IPAWANINFO("IPA received MPSS BEFORE_SHUTDOWN\n"); + /* send SSR before-shutdown notification to IPACM */ + rmnet_ipa_send_ssr_notification(false); + atomic_set(&rmnet_ipa3_ctx->is_ssr, 1); + ipa3_q6_pre_shutdown_cleanup(); + if (IPA_NETDEV()) + netif_stop_queue(IPA_NETDEV()); + ipa3_qmi_stop_workqueues(); + ipa3_wan_ioctl_stop_qmi_messages(); + ipa_stop_polling_stats(); + if (atomic_read(&rmnet_ipa3_ctx->is_initialized)) + platform_driver_unregister(&rmnet_ipa_driver); + IPAWANINFO("IPA BEFORE_SHUTDOWN handling is complete\n"); + break; + case SUBSYS_AFTER_SHUTDOWN: + IPAWANINFO("IPA Received MPSS AFTER_SHUTDOWN\n"); + if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) + ipa3_q6_post_shutdown_cleanup(); + IPAWANINFO("IPA AFTER_SHUTDOWN handling is complete\n"); + break; + case SUBSYS_BEFORE_POWERUP: + IPAWANINFO("IPA received MPSS BEFORE_POWERUP\n"); + if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) + /* clean up cached QMI msg/handlers */ + ipa3_qmi_service_exit(); + /*hold a proxy vote for the modem*/ + ipa3_proxy_clk_vote(); + ipa3_reset_freeze_vote(); + IPAWANINFO("IPA BEFORE_POWERUP handling is complete\n"); + break; + case SUBSYS_AFTER_POWERUP: + IPAWANINFO("IPA received MPSS AFTER_POWERUP\n"); + if (!atomic_read(&rmnet_ipa3_ctx->is_initialized) && + atomic_read(&rmnet_ipa3_ctx->is_ssr)) + platform_driver_register(&rmnet_ipa_driver); + + IPAWANINFO("IPA AFTER_POWERUP handling is complete\n"); + break; + default: + IPAWANDBG("Unsupported subsys notification, IPA received: %lu", + code); + break; + } + + IPAWANDBG_LOW("Exit\n"); + return NOTIFY_DONE; +} + +/** + * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa_send_msg + * @buff: pointer to buffer containing the message + * @len: message len + * @type: message type + * + * This function is invoked when ipa_send_msg is complete (Provided as a + * free function pointer along with the message). + */ +static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type) +{ + if (!buff) { + IPAWANERR("Null buffer\n"); + return; + } + + if (type != IPA_TETHERING_STATS_UPDATE_STATS && + type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) { + IPAWANERR("Wrong type given. buff %pK type %d\n", + buff, type); + } + kfree(buff); +} + +/** + * rmnet_ipa_get_stats_and_update() - Gets pipe stats from Modem + * + * This function queries the IPA Modem driver for the pipe stats + * via QMI, and updates the user space IPA entity. + */ +static void rmnet_ipa_get_stats_and_update(void) +{ + struct ipa_get_data_stats_req_msg_v01 req; + struct ipa_get_data_stats_resp_msg_v01 *resp; + struct ipa_msg_meta msg_meta; + int rc; + + resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01), + GFP_KERNEL); + if (!resp) + return; + + memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01)); + memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01)); + + req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01; + + rc = ipa3_qmi_get_data_stats(&req, resp); + if (rc) { + IPAWANERR("ipa3_qmi_get_data_stats failed: %d\n", rc); + kfree(resp); + return; + } + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS; + msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01); + rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + kfree(resp); + return; + } +} + +/** + * tethering_stats_poll_queue() - Stats polling function + * @work - Work entry + * + * This function is scheduled periodically (per the interval) in + * order to poll the IPA Modem driver for the pipe stats. + */ +static void tethering_stats_poll_queue(struct work_struct *work) +{ + rmnet_ipa_get_stats_and_update(); + + /* Schedule again only if there's an active polling interval */ + if (ipa3_rmnet_ctx.polling_interval != 0) + schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, + msecs_to_jiffies(ipa3_rmnet_ctx.polling_interval*1000)); +} + +/** + * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem + * + * This function retrieves the data usage (used quota) from the IPA Modem driver + * via QMI, and updates IPA user space entity. + */ +static void rmnet_ipa_get_network_stats_and_update(void) +{ + struct ipa_get_apn_data_stats_req_msg_v01 req; + struct ipa_get_apn_data_stats_resp_msg_v01 *resp; + struct ipa_msg_meta msg_meta; + int rc; + + resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01), + GFP_KERNEL); + if (!resp) + return; + + memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01)); + memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01)); + + req.mux_id_list_valid = true; + req.mux_id_list_len = 1; + req.mux_id_list[0] = ipa3_rmnet_ctx.metered_mux_id; + + rc = ipa3_qmi_get_network_stats(&req, resp); + if (rc) { + IPAWANERR("ipa3_qmi_get_network_stats failed: %d\n", rc); + kfree(resp); + return; + } + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS; + msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01); + rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + kfree(resp); + return; + } +} + +/** + * rmnet_ipa_send_quota_reach_ind() - send quota_reach notification from + * IPA Modem + * This function sends the quota_reach indication from the IPA Modem driver + * via QMI, to user-space module + */ +static void rmnet_ipa_send_quota_reach_ind(void) +{ + struct ipa_msg_meta msg_meta; + int rc; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_QUOTA_REACH; + rc = ipa_send_msg(&msg_meta, NULL, NULL); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + return; + } +} + +/** + * rmnet_ipa3_poll_tethering_stats() - Tethering stats polling IOCTL handler + * @data - IOCTL data + * + * This function handles WAN_IOC_POLL_TETHERING_STATS. + * In case polling interval received is 0, polling will stop + * (If there's a polling in progress, it will allow it to finish), and then will + * fetch network stats, and update the IPA user space. + * + * Return codes: + * 0: Success + */ +int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data) +{ + ipa3_rmnet_ctx.polling_interval = data->polling_interval_secs; + + cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work); + + if (ipa3_rmnet_ctx.polling_interval == 0) { + ipa3_qmi_stop_data_qouta(); + rmnet_ipa_get_network_stats_and_update(); + rmnet_ipa_get_stats_and_update(); + return 0; + } + + schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0); + return 0; +} + +/** + * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler + * @data - IOCTL data + * + * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface. + * It translates the given interface name to the Modem MUX ID and + * sends the request of the quota to the IPA Modem driver via QMI. + * + * Return codes: + * 0: Success + * -EFAULT: Invalid interface name provided + * other: See ipa_qmi_set_data_quota + */ +static int rmnet_ipa3_set_data_quota_modem( + struct wan_ioctl_set_data_quota *data) +{ + u32 mux_id; + int index; + struct ipa_set_data_usage_quota_req_msg_v01 req; + + /* stop quota */ + if (!data->set_quota) + ipa3_qmi_stop_data_qouta(); + + /* prevent string buffer overflows */ + data->interface_name[IFNAMSIZ-1] = '\0'; + + index = find_vchannel_name_index(data->interface_name); + IPAWANERR("iface name %s, quota %lu\n", + data->interface_name, + (unsigned long int) data->quota_mbytes); + + if (index == MAX_NUM_OF_MUX_CHANNEL) { + IPAWANERR("%s is an invalid iface name\n", + data->interface_name); + return -EFAULT; + } + + mux_id = rmnet_ipa3_ctx->mux_channel[index].mux_id; + ipa3_rmnet_ctx.metered_mux_id = mux_id; + + memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01)); + req.apn_quota_list_valid = true; + req.apn_quota_list_len = 1; + req.apn_quota_list[0].mux_id = mux_id; + req.apn_quota_list[0].num_Mbytes = data->quota_mbytes; + + return ipa3_qmi_set_data_quota(&req); +} + +static int rmnet_ipa3_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data) +{ + struct ipa_set_wifi_quota wifi_quota; + int rc = 0; + + memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota)); + wifi_quota.set_quota = data->set_quota; + wifi_quota.quota_bytes = data->quota_mbytes; + IPAWANERR("iface name %s, quota %lu\n", + data->interface_name, + (unsigned long int) data->quota_mbytes); + + rc = ipa3_set_wlan_quota(&wifi_quota); + /* check if wlan-fw takes this quota-set */ + if (!wifi_quota.set_valid) + rc = -EFAULT; + return rc; +} + +/** + * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler + * @data - IOCTL data + * + * This function handles WAN_IOC_SET_DATA_QUOTA. + * It translates the given interface name to the Modem MUX ID and + * sends the request of the quota to the IPA Modem driver via QMI. + * + * Return codes: + * 0: Success + * -EFAULT: Invalid interface name provided + * other: See ipa_qmi_set_data_quota + */ +int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data) +{ + enum ipa_upstream_type upstream_type; + int rc = 0; + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->interface_name); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR("Wrong interface_name name %s\n", + data->interface_name); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + rc = rmnet_ipa3_set_data_quota_wifi(data); + if (rc) { + IPAWANERR("set quota on wifi failed\n"); + return rc; + } + } else { + rc = rmnet_ipa3_set_data_quota_modem(data); + if (rc) { + IPAWANERR("set quota on modem failed\n"); + return rc; + } + } + return rc; +} +/* rmnet_ipa_set_tether_client_pipe() - + * @data - IOCTL data + * + * This function handles WAN_IOC_SET_DATA_QUOTA. + * It translates the given interface name to the Modem MUX ID and + * sends the request of the quota to the IPA Modem driver via QMI. + * + * Return codes: + * 0: Success + * -EFAULT: Invalid interface name provided + * other: See ipa_qmi_set_data_quota + */ +int rmnet_ipa3_set_tether_client_pipe( + struct wan_ioctl_set_tether_client_pipe *data) +{ + int number, i; + + /* error checking if ul_src_pipe_len valid or not*/ + if (data->ul_src_pipe_len > QMI_IPA_MAX_PIPES_V01 || + data->ul_src_pipe_len < 0) { + IPAWANERR("UL src pipes %d exceeding max %d\n", + data->ul_src_pipe_len, + QMI_IPA_MAX_PIPES_V01); + return -EFAULT; + } + /* error checking if dl_dst_pipe_len valid or not*/ + if (data->dl_dst_pipe_len > QMI_IPA_MAX_PIPES_V01 || + data->dl_dst_pipe_len < 0) { + IPAWANERR("DL dst pipes %d exceeding max %d\n", + data->dl_dst_pipe_len, + QMI_IPA_MAX_PIPES_V01); + return -EFAULT; + } + + IPAWANDBG("client %d, UL %d, DL %d, reset %d\n", + data->ipa_client, + data->ul_src_pipe_len, + data->dl_dst_pipe_len, + data->reset_client); + number = data->ul_src_pipe_len; + for (i = 0; i < number; i++) { + IPAWANDBG("UL index-%d pipe %d\n", i, + data->ul_src_pipe_list[i]); + if (data->reset_client) + ipa3_set_client(data->ul_src_pipe_list[i], + 0, false); + else + ipa3_set_client(data->ul_src_pipe_list[i], + data->ipa_client, true); + } + number = data->dl_dst_pipe_len; + for (i = 0; i < number; i++) { + IPAWANDBG("DL index-%d pipe %d\n", i, + data->dl_dst_pipe_list[i]); + if (data->reset_client) + ipa3_set_client(data->dl_dst_pipe_list[i], + 0, false); + else + ipa3_set_client(data->dl_dst_pipe_list[i], + data->ipa_client, false); + } + return 0; +} + +static int rmnet_ipa3_query_tethering_stats_wifi( + struct wan_ioctl_query_tether_stats *data, bool reset) +{ + struct ipa_get_wdi_sap_stats *sap_stats; + int rc; + + sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats), + GFP_KERNEL); + if (!sap_stats) + return -ENOMEM; + + memset(sap_stats, 0, sizeof(struct ipa_get_wdi_sap_stats)); + + sap_stats->reset_stats = reset; + IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats); + + rc = ipa3_get_wlan_stats(sap_stats); + if (rc) { + IPAWANERR("can't get ipa3_get_wlan_stats\n"); + kfree(sap_stats); + return rc; + } else if (reset) { + kfree(sap_stats); + return 0; + } + + if (sap_stats->stats_valid) { + data->ipv4_tx_packets = sap_stats->ipv4_tx_packets; + data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes; + data->ipv4_rx_packets = sap_stats->ipv4_rx_packets; + data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes; + data->ipv6_tx_packets = sap_stats->ipv6_tx_packets; + data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes; + data->ipv6_rx_packets = sap_stats->ipv6_rx_packets; + data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes; + } + + IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n", + (unsigned long int) data->ipv4_rx_packets, + (unsigned long int) data->ipv6_rx_packets, + (unsigned long int) data->ipv4_rx_bytes, + (unsigned long int) data->ipv6_rx_bytes); + IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n", + (unsigned long int) data->ipv4_tx_packets, + (unsigned long int) data->ipv6_tx_packets, + (unsigned long int) data->ipv4_tx_bytes, + (unsigned long int) data->ipv6_tx_bytes); + + kfree(sap_stats); + return rc; +} + +static int rmnet_ipa3_query_tethering_stats_modem( + struct wan_ioctl_query_tether_stats *data, bool reset) +{ + struct ipa_get_data_stats_req_msg_v01 *req; + struct ipa_get_data_stats_resp_msg_v01 *resp; + int pipe_len, rc; + struct ipa_pipe_stats_info_type_v01 *stat_ptr; + + req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01), + GFP_KERNEL); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01), + GFP_KERNEL); + if (!resp) { + kfree(req); + return -ENOMEM; + } + memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01)); + memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01)); + + req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01; + if (reset) { + req->reset_stats_valid = true; + req->reset_stats = true; + IPAWANERR("reset the pipe stats\n"); + } else { + /* print tethered-client enum */ + IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client); + } + + rc = ipa3_qmi_get_data_stats(req, resp); + if (rc) { + IPAWANERR("can't get ipa_qmi_get_data_stats\n"); + kfree(req); + kfree(resp); + return rc; + } else if (data == NULL) { + kfree(req); + kfree(resp); + return 0; + } + + if (resp->dl_dst_pipe_stats_list_valid) { + for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len; + pipe_len++) { + stat_ptr = + &resp->dl_dst_pipe_stats_list[pipe_len]; + + IPAWANDBG_LOW("Check entry(%d) dl_dst_pipe(%d)\n", + pipe_len, + stat_ptr->pipe_index); + IPAWANDBG_LOW("dl_p_v4(%lu)v6(%lu)\n", + (unsigned long int) stat_ptr->num_ipv4_packets, + (unsigned long int) stat_ptr->num_ipv6_packets + ); + IPAWANDBG_LOW("dl_b_v4(%lu)v6(%lu)\n", + (unsigned long int) stat_ptr->num_ipv4_bytes, + (unsigned long int) stat_ptr->num_ipv6_bytes); + if (ipa_get_client_uplink( + stat_ptr->pipe_index) == false) { + if (data->ipa_client == ipa_get_client( + stat_ptr->pipe_index)) { + /* update the DL stats */ + data->ipv4_rx_packets += + stat_ptr->num_ipv4_packets; + data->ipv6_rx_packets += + stat_ptr->num_ipv6_packets; + data->ipv4_rx_bytes += + stat_ptr->num_ipv4_bytes; + data->ipv6_rx_bytes += + stat_ptr->num_ipv6_bytes; + } + } + } + } + IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n", + (unsigned long int) data->ipv4_rx_packets, + (unsigned long int) data->ipv6_rx_packets, + (unsigned long int) data->ipv4_rx_bytes, + (unsigned long int) data->ipv6_rx_bytes); + + if (resp->ul_src_pipe_stats_list_valid) { + for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len; + pipe_len++) { + stat_ptr = + &resp->ul_src_pipe_stats_list[pipe_len]; + IPAWANDBG_LOW("Check entry(%d) ul_dst_pipe(%d)\n", + pipe_len, + stat_ptr->pipe_index); + IPAWANDBG_LOW("ul_p_v4(%lu)v6(%lu)\n", + (unsigned long int) stat_ptr->num_ipv4_packets, + (unsigned long int) stat_ptr->num_ipv6_packets + ); + IPAWANDBG_LOW("ul_b_v4(%lu)v6(%lu)\n", + (unsigned long int)stat_ptr->num_ipv4_bytes, + (unsigned long int) stat_ptr->num_ipv6_bytes); + if (ipa_get_client_uplink( + stat_ptr->pipe_index) == true) { + if (data->ipa_client == + stat_ptr->pipe_index) { + /* update the DL stats */ + data->ipv4_tx_packets += + stat_ptr->num_ipv4_packets; + data->ipv6_tx_packets += + stat_ptr->num_ipv6_packets; + data->ipv4_tx_bytes += + stat_ptr->num_ipv4_bytes; + data->ipv6_tx_bytes += + stat_ptr->num_ipv6_bytes; + } + } + } + } + IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n", + (unsigned long int) data->ipv4_tx_packets, + (unsigned long int) data->ipv6_tx_packets, + (unsigned long int) data->ipv4_tx_bytes, + (unsigned long int) data->ipv6_tx_bytes); + kfree(req); + kfree(resp); + return 0; +} + +int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data, + bool reset) +{ + enum ipa_upstream_type upstream_type; + int rc = 0; + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->upstreamIface); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR(" Wrong upstreamIface name %s\n", + data->upstreamIface); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + IPAWANDBG_LOW(" query wifi-backhaul stats\n"); + rc = rmnet_ipa3_query_tethering_stats_wifi( + data, false); + if (rc) { + IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + } else { + IPAWANDBG_LOW(" query modem-backhaul stats\n"); + rc = rmnet_ipa3_query_tethering_stats_modem( + data, false); + if (rc) { + IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + } + return rc; +} + +int rmnet_ipa3_query_tethering_stats_all( + struct wan_ioctl_query_tether_stats_all *data) +{ + struct wan_ioctl_query_tether_stats tether_stats; + enum ipa_upstream_type upstream_type; + int rc = 0; + + memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats)); + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->upstreamIface); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR(" Wrong upstreamIface name %s\n", + data->upstreamIface); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + IPAWANDBG_LOW(" query wifi-backhaul stats\n"); + rc = rmnet_ipa3_query_tethering_stats_wifi( + &tether_stats, data->reset_stats); + if (rc) { + IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + data->tx_bytes = tether_stats.ipv4_tx_bytes + + tether_stats.ipv6_tx_bytes; + data->rx_bytes = tether_stats.ipv4_rx_bytes + + tether_stats.ipv6_rx_bytes; + } else { + IPAWANDBG_LOW(" query modem-backhaul stats\n"); + tether_stats.ipa_client = data->ipa_client; + rc = rmnet_ipa3_query_tethering_stats_modem( + &tether_stats, data->reset_stats); + if (rc) { + IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + data->tx_bytes = tether_stats.ipv4_tx_bytes + + tether_stats.ipv6_tx_bytes; + data->rx_bytes = tether_stats.ipv4_rx_bytes + + tether_stats.ipv6_rx_bytes; + } + return rc; +} + +int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data) +{ + enum ipa_upstream_type upstream_type; + struct wan_ioctl_query_tether_stats tether_stats; + int rc = 0; + + memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats)); + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->upstreamIface); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR(" Wrong upstreamIface name %s\n", + data->upstreamIface); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + IPAWANERR(" reset wifi-backhaul stats\n"); + rc = rmnet_ipa3_query_tethering_stats_wifi( + NULL, true); + if (rc) { + IPAWANERR("reset WLAN stats failed\n"); + return rc; + } + } else { + IPAWANERR(" reset modem-backhaul stats\n"); + rc = rmnet_ipa3_query_tethering_stats_modem( + &tether_stats, true); + if (rc) { + IPAWANERR("reset MODEM stats failed\n"); + return rc; + } + } + return rc; +} + +/** + * ipa3_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota + * @mux_id - The MUX ID on which the quota has been reached + * + * This function broadcasts a Netlink event using the kobject of the + * rmnet_ipa interface in order to alert the user space that the quota + * on the specific interface which matches the mux_id has been reached. + * + */ +void ipa3_broadcast_quota_reach_ind(u32 mux_id, + enum ipa_upstream_type upstream_type) +{ + char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE]; + char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE]; + char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE]; + char *envp[IPA_UEVENT_NUM_EVNP] = { + alert_msg, iface_name_l, iface_name_m, NULL}; + int res; + int index; + + /* check upstream_type*/ + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR(" Wrong upstreamIface type %d\n", upstream_type); + return; + } else if (upstream_type == IPA_UPSTEAM_MODEM) { + index = ipa3_find_mux_channel_index(mux_id); + if (index == MAX_NUM_OF_MUX_CHANNEL) { + IPAWANERR("%u is an mux ID\n", mux_id); + return; + } + } + res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE, + "ALERT_NAME=%s", "quotaReachedAlert"); + if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) { + IPAWANERR("message too long (%d)", res); + return; + } + /* posting msg for L-release for CNE */ + if (upstream_type == IPA_UPSTEAM_MODEM) { + res = snprintf(iface_name_l, + IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "UPSTREAM=%s", + rmnet_ipa3_ctx->mux_channel[index].vchannel_name); + } else { + res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME); + } + if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) { + IPAWANERR("message too long (%d)", res); + return; + } + /* posting msg for M-release for CNE */ + if (upstream_type == IPA_UPSTEAM_MODEM) { + res = snprintf(iface_name_m, + IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "INTERFACE=%s", + rmnet_ipa3_ctx->mux_channel[index].vchannel_name); + } else { + res = snprintf(iface_name_m, + IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "INTERFACE=%s", + IPA_UPSTEAM_WLAN_IFACE_NAME); + } + if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) { + IPAWANERR("message too long (%d)", res); + return; + } + + IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n", + alert_msg, iface_name_l, iface_name_m); + kobject_uevent_env(&(IPA_NETDEV()->dev.kobj), + KOBJ_CHANGE, envp); + + rmnet_ipa_send_quota_reach_ind(); +} + +/** + * ipa3_q6_handshake_complete() - Perform operations once Q6 is up + * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR. + * + * This function is invoked once the handshake between the IPA AP driver + * and IPA Q6 driver is complete. At this point, it is possible to perform + * operations which can't be performed until IPA Q6 driver is up. + * + */ +void ipa3_q6_handshake_complete(bool ssr_bootup) +{ + /* It is required to recover the network stats after SSR recovery */ + if (ssr_bootup) { + /* + * In case the uC is required to be loaded by the Modem, + * the proxy vote will be removed only when uC loading is + * complete and indication is received by the AP. After SSR, + * uC is already loaded. Therefore, proxy vote can be removed + * once Modem init is complete. + */ + ipa3_proxy_clk_unvote(); + + /* send SSR power-up notification to IPACM */ + rmnet_ipa_send_ssr_notification(true); + + /* + * It is required to recover the network stats after + * SSR recovery + */ + rmnet_ipa_get_network_stats_and_update(); + } +} + +static int __init ipa3_wwan_init(void) +{ + rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL); + if (!rmnet_ipa3_ctx) + return -ENOMEM; + + + atomic_set(&rmnet_ipa3_ctx->is_initialized, 0); + atomic_set(&rmnet_ipa3_ctx->is_ssr, 0); + + mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard); + mutex_init(&rmnet_ipa3_ctx->add_mux_channel_lock); + rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1; + rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1; + + ipa3_qmi_init(); + + /* Register for Modem SSR */ + rmnet_ipa3_ctx->subsys_notify_handle = subsys_notif_register_notifier( + SUBSYS_MODEM, + &ipa3_ssr_notifier); + if (!IS_ERR(rmnet_ipa3_ctx->subsys_notify_handle)) + return platform_driver_register(&rmnet_ipa_driver); + else + return (int)PTR_ERR(rmnet_ipa3_ctx->subsys_notify_handle); +} + +static void __exit ipa3_wwan_cleanup(void) +{ + int ret; + + ipa3_qmi_cleanup(); + mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard); + mutex_destroy(&rmnet_ipa3_ctx->add_mux_channel_lock); + ret = subsys_notif_unregister_notifier( + rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier); + if (ret) + IPAWANERR( + "Error subsys_notif_unregister_notifier system %s, ret=%d\n", + SUBSYS_MODEM, ret); + platform_driver_unregister(&rmnet_ipa_driver); + kfree(rmnet_ipa3_ctx); + rmnet_ipa3_ctx = NULL; +} + +static void ipa3_wwan_msg_free_cb(void *buff, u32 len, u32 type) +{ + kfree(buff); +} + +static void ipa3_rmnet_rx_cb(void *priv) +{ + IPAWANDBG_LOW("\n"); + napi_schedule(&(rmnet_ipa3_ctx->wwan_priv->napi)); +} + +static int ipa3_rmnet_poll(struct napi_struct *napi, int budget) +{ + int rcvd_pkts = 0; + + rcvd_pkts = ipa_rx_poll(rmnet_ipa3_ctx->ipa3_to_apps_hdl, + NAPI_WEIGHT); + IPAWANDBG_LOW("rcvd packets: %d\n", rcvd_pkts); + return rcvd_pkts; +} + +late_initcall(ipa3_wwan_init); +module_exit(ipa3_wwan_cleanup); +MODULE_DESCRIPTION("WWAN Network Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c new file mode 100644 index 000000000000..c7a61868ebe0 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c @@ -0,0 +1,446 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_qmi_service.h" + +#define DRIVER_NAME "wwan_ioctl" + +#ifdef CONFIG_COMPAT +#define WAN_IOC_ADD_FLT_RULE32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ADD_FLT_RULE, \ + compat_uptr_t) +#define WAN_IOC_ADD_FLT_RULE_INDEX32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ADD_FLT_INDEX, \ + compat_uptr_t) +#define WAN_IOC_POLL_TETHERING_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_POLL_TETHERING_STATS, \ + compat_uptr_t) +#define WAN_IOC_SET_DATA_QUOTA32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_SET_DATA_QUOTA, \ + compat_uptr_t) +#define WAN_IOC_SET_TETHER_CLIENT_PIPE32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_SET_TETHER_CLIENT_PIPE, \ + compat_uptr_t) +#define WAN_IOC_QUERY_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_TETHER_STATS, \ + compat_uptr_t) +#define WAN_IOC_RESET_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_RESET_TETHER_STATS, \ + compat_uptr_t) +#define WAN_IOC_QUERY_DL_FILTER_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_DL_FILTER_STATS, \ + compat_uptr_t) +#define WAN_IOC_QUERY_TETHER_STATS_ALL32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_TETHER_STATS_ALL, \ + compat_uptr_t) +#endif + +static unsigned int dev_num = 1; +static struct cdev ipa3_wan_ioctl_cdev; +static unsigned int ipa3_process_ioctl = 1; +static struct class *class; +static dev_t device; + +static long ipa3_wan_ioctl(struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int retval = 0; + u32 pyld_sz; + u8 *param = NULL; + + IPAWANDBG("device %s got ioctl events :>>>\n", + DRIVER_NAME); + + if (!ipa3_process_ioctl) { + IPAWANDBG("modem is in SSR, ignoring ioctl\n"); + return -EAGAIN; + } + + switch (cmd) { + case WAN_IOC_ADD_FLT_RULE: + IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(struct ipa_install_fltr_rule_req_msg_v01); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (ipa3_qmi_filter_request_send( + (struct ipa_install_fltr_rule_req_msg_v01 *)param)) { + IPAWANDBG("IPACM->Q6 add filter rule failed\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_ADD_FLT_RULE_EX: + IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_EX :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (ipa3_qmi_filter_request_ex_send( + (struct ipa_install_fltr_rule_req_ex_msg_v01 *)param)) { + IPAWANDBG("IPACM->Q6 add filter rule failed\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_ADD_FLT_RULE_INDEX: + IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(struct ipa_fltr_installed_notif_req_msg_v01); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (ipa3_qmi_filter_notify_send( + (struct ipa_fltr_installed_notif_req_msg_v01 *)param)) { + IPAWANDBG("IPACM->Q6 rule index fail\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_VOTE_FOR_BW_MBPS: + IPAWANDBG("device %s got WAN_IOC_VOTE_FOR_BW_MBPS :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(uint32_t); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (ipa3_vote_for_bus_bw((uint32_t *)param)) { + IPAWANERR("Failed to vote for bus BW\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_POLL_TETHERING_STATS: + IPAWANDBG_LOW("got WAN_IOCTL_POLL_TETHERING_STATS :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_poll_tethering_stats); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (rmnet_ipa3_poll_tethering_stats( + (struct wan_ioctl_poll_tethering_stats *)param)) { + IPAWANERR("WAN_IOCTL_POLL_TETHERING_STATS failed\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_SET_DATA_QUOTA: + IPAWANDBG_LOW("got WAN_IOCTL_SET_DATA_QUOTA :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_set_data_quota); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (rmnet_ipa3_set_data_quota( + (struct wan_ioctl_set_data_quota *)param)) { + IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_SET_TETHER_CLIENT_PIPE: + IPAWANDBG_LOW("got WAN_IOC_SET_TETHER_CLIENT_PIPE :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_set_tether_client_pipe); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (rmnet_ipa3_set_tether_client_pipe( + (struct wan_ioctl_set_tether_client_pipe *)param)) { + IPAWANERR("WAN_IOC_SET_TETHER_CLIENT_PIPE failed\n"); + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_QUERY_TETHER_STATS: + IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_query_tether_stats); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + + if (rmnet_ipa3_query_tethering_stats( + (struct wan_ioctl_query_tether_stats *)param, false)) { + IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n"); + retval = -EFAULT; + break; + } + + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_QUERY_TETHER_STATS_ALL: + IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS_ALL :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_query_tether_stats_all); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + + if (rmnet_ipa3_query_tethering_stats_all( + (struct wan_ioctl_query_tether_stats_all *)param)) { + IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n"); + retval = -EFAULT; + break; + } + + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_RESET_TETHER_STATS: + IPAWANDBG_LOW("device %s got WAN_IOC_RESET_TETHER_STATS :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(struct wan_ioctl_reset_tether_stats); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + + if (rmnet_ipa3_reset_tethering_stats( + (struct wan_ioctl_reset_tether_stats *)param)) { + IPAWANERR("WAN_IOC_RESET_TETHER_STATS failed\n"); + retval = -EFAULT; + break; + } + break; + + default: + retval = -ENOTTY; + } + kfree(param); + return retval; +} + +#ifdef CONFIG_COMPAT +long ipa3_compat_wan_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case WAN_IOC_ADD_FLT_RULE32: + cmd = WAN_IOC_ADD_FLT_RULE; + break; + case WAN_IOC_ADD_FLT_RULE_INDEX32: + cmd = WAN_IOC_ADD_FLT_RULE_INDEX; + break; + case WAN_IOC_POLL_TETHERING_STATS32: + cmd = WAN_IOC_POLL_TETHERING_STATS; + break; + case WAN_IOC_SET_DATA_QUOTA32: + cmd = WAN_IOC_SET_DATA_QUOTA; + break; + case WAN_IOC_SET_TETHER_CLIENT_PIPE32: + cmd = WAN_IOC_SET_TETHER_CLIENT_PIPE; + break; + case WAN_IOC_QUERY_TETHER_STATS32: + cmd = WAN_IOC_QUERY_TETHER_STATS; + break; + case WAN_IOC_RESET_TETHER_STATS32: + cmd = WAN_IOC_RESET_TETHER_STATS; + break; + case WAN_IOC_QUERY_DL_FILTER_STATS32: + cmd = WAN_IOC_QUERY_DL_FILTER_STATS; + break; + default: + return -ENOIOCTLCMD; + } + return ipa3_wan_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); +} +#endif + +static int ipa3_wan_ioctl_open(struct inode *inode, struct file *filp) +{ + IPAWANDBG("\n IPA A7 ipa3_wan_ioctl open OK :>>>> "); + return 0; +} + +const struct file_operations rmnet_ipa3_fops = { + .owner = THIS_MODULE, + .open = ipa3_wan_ioctl_open, + .read = NULL, + .unlocked_ioctl = ipa3_wan_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ipa3_compat_wan_ioctl, +#endif +}; + +int ipa3_wan_ioctl_init(void) +{ + unsigned int wan_ioctl_major = 0; + int ret; + struct device *dev; + + device = MKDEV(wan_ioctl_major, 0); + + ret = alloc_chrdev_region(&device, 0, dev_num, DRIVER_NAME); + if (ret) { + IPAWANERR(":device_alloc err.\n"); + goto dev_alloc_err; + } + wan_ioctl_major = MAJOR(device); + + class = class_create(THIS_MODULE, DRIVER_NAME); + if (IS_ERR(class)) { + IPAWANERR(":class_create err.\n"); + goto class_err; + } + + dev = device_create(class, NULL, device, + NULL, DRIVER_NAME); + if (IS_ERR(dev)) { + IPAWANERR(":device_create err.\n"); + goto device_err; + } + + cdev_init(&ipa3_wan_ioctl_cdev, &rmnet_ipa3_fops); + ret = cdev_add(&ipa3_wan_ioctl_cdev, device, dev_num); + if (ret) { + IPAWANERR(":cdev_add err.\n"); + goto cdev_add_err; + } + + ipa3_process_ioctl = 1; + + IPAWANDBG("IPA %s major(%d) initial ok :>>>>\n", + DRIVER_NAME, wan_ioctl_major); + return 0; + +cdev_add_err: + device_destroy(class, device); +device_err: + class_destroy(class); +class_err: + unregister_chrdev_region(device, dev_num); +dev_alloc_err: + return -ENODEV; +} + +void ipa3_wan_ioctl_stop_qmi_messages(void) +{ + ipa3_process_ioctl = 0; +} + +void ipa3_wan_ioctl_enable_qmi_messages(void) +{ + ipa3_process_ioctl = 1; +} + +void ipa3_wan_ioctl_deinit(void) +{ + cdev_del(&ipa3_wan_ioctl_cdev); + device_destroy(class, device); + class_destroy(class); + unregister_chrdev_region(device, dev_num); +} diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c new file mode 100644 index 000000000000..b50fa4ccda7e --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c @@ -0,0 +1,250 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" + +#define TETH_BRIDGE_DRV_NAME "ipa_tethering_bridge" + +#define TETH_DBG(fmt, args...) \ + pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args) +#define TETH_DBG_FUNC_ENTRY() \ + pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d ENTRY\n", __func__, __LINE__) +#define TETH_DBG_FUNC_EXIT() \ + pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d EXIT\n", __func__, __LINE__) +#define TETH_ERR(fmt, args...) \ + pr_err(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) + +/** + * struct ipa3_teth_bridge_ctx - Tethering bridge driver context information + * @class: kernel class pointer + * @dev_num: kernel device number + * @dev: kernel device struct pointer + * @cdev: kernel character device struct + */ +struct ipa3_teth_bridge_ctx { + struct class *class; + dev_t dev_num; + struct device *dev; + struct cdev cdev; +}; +static struct ipa3_teth_bridge_ctx *ipa3_teth_ctx; + +/** + * teth_bridge_ipa_cb() - Callback to handle IPA data path events + * @priv - private data + * @evt - event type + * @data - event specific data (usually skb) + * + * This callback is called by IPA driver for exception packets from USB. + * All exception packets are handled by Q6 and should not reach this function. + * Packets will arrive to AP exception pipe only in case where packets are + * sent from USB before Q6 has setup the call. + */ +static void teth_bridge_ipa_cb(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + + TETH_DBG_FUNC_ENTRY(); + if (evt != IPA_RECEIVE) { + TETH_ERR("unexpected event %d\n", evt); + WARN_ON(1); + return; + } + + TETH_ERR("Unexpected exception packet from USB, dropping packet\n"); + dev_kfree_skb_any(skb); + TETH_DBG_FUNC_EXIT(); +} + +/** + * ipa3_teth_bridge_init() - Initialize the Tethering bridge driver + * @params - in/out params for USB initialization API (please look at struct + * definition for more info) + * + * USB driver gets a pointer to a callback function (usb_notify_cb) and an + * associated data. + * + * Builds IPA resource manager dependency graph. + * + * Return codes: 0: success, + * -EINVAL - Bad parameter + * Other negative value - Failure + */ +int ipa3_teth_bridge_init(struct teth_bridge_init_params *params) +{ + TETH_DBG_FUNC_ENTRY(); + + if (!params) { + TETH_ERR("Bad parameter\n"); + TETH_DBG_FUNC_EXIT(); + return -EINVAL; + } + + params->usb_notify_cb = teth_bridge_ipa_cb; + params->private_data = NULL; + params->skip_ep_cfg = true; + + TETH_DBG_FUNC_EXIT(); + return 0; +} + +/** + * ipa3_teth_bridge_disconnect() - Disconnect tethering bridge module + */ +int ipa3_teth_bridge_disconnect(enum ipa_client_type client) +{ + TETH_DBG_FUNC_ENTRY(); + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_Q6_CONS); + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_USB_CONS); + TETH_DBG_FUNC_EXIT(); + + return 0; +} + +/** + * ipa3_teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call + * @connect_params: Connection info + * + * Return codes: 0: success + * -EINVAL: invalid parameters + * -EPERM: Operation not permitted as the bridge is already + * connected + */ +int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params) +{ + int res = 0; + + TETH_DBG_FUNC_ENTRY(); + + /* Build the dependency graph, first add_dependency call is sync + * in order to make sure the IPA clocks are up before we continue + * and notify the USB driver it may continue. + */ + res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res < 0) { + TETH_ERR("ipa_rm_add_dependency() failed.\n"); + goto bail; + } + + /* this add_dependency call can't be sync since it will block until USB + * status is connected (which can happen only after the tethering + * bridge is connected), the clocks are already up so the call doesn't + * need to block. + */ + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_USB_CONS); + if (res < 0 && res != -EINPROGRESS) { + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_Q6_CONS); + TETH_ERR("ipa_rm_add_dependency() failed.\n"); + goto bail; + } + + res = 0; + +bail: + TETH_DBG_FUNC_EXIT(); + return res; +} + +static long ipa3_teth_bridge_ioctl(struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + IPAERR("No ioctls are supported!\n"); + return -ENOIOCTLCMD; +} + +static const struct file_operations ipa3_teth_bridge_drv_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = ipa3_teth_bridge_ioctl, +}; + +/** + * ipa3_teth_bridge_driver_init() - Initialize tethering bridge driver + * + */ +int ipa3_teth_bridge_driver_init(void) +{ + int res; + + TETH_DBG("Tethering bridge driver init\n"); + ipa3_teth_ctx = kzalloc(sizeof(*ipa3_teth_ctx), GFP_KERNEL); + if (!ipa3_teth_ctx) + return -ENOMEM; + + ipa3_teth_ctx->class = class_create(THIS_MODULE, TETH_BRIDGE_DRV_NAME); + + res = alloc_chrdev_region(&ipa3_teth_ctx->dev_num, 0, 1, + TETH_BRIDGE_DRV_NAME); + if (res) { + TETH_ERR("alloc_chrdev_region err.\n"); + res = -ENODEV; + goto fail_alloc_chrdev_region; + } + + ipa3_teth_ctx->dev = device_create(ipa3_teth_ctx->class, + NULL, + ipa3_teth_ctx->dev_num, + ipa3_teth_ctx, + TETH_BRIDGE_DRV_NAME); + if (IS_ERR(ipa3_teth_ctx->dev)) { + TETH_ERR(":device_create err.\n"); + res = -ENODEV; + goto fail_device_create; + } + + cdev_init(&ipa3_teth_ctx->cdev, &ipa3_teth_bridge_drv_fops); + ipa3_teth_ctx->cdev.owner = THIS_MODULE; + ipa3_teth_ctx->cdev.ops = &ipa3_teth_bridge_drv_fops; + + res = cdev_add(&ipa3_teth_ctx->cdev, ipa3_teth_ctx->dev_num, 1); + if (res) { + TETH_ERR(":cdev_add err=%d\n", -res); + res = -ENODEV; + goto fail_cdev_add; + } + TETH_DBG("Tethering bridge driver init OK\n"); + + return 0; +fail_cdev_add: + device_destroy(ipa3_teth_ctx->class, ipa3_teth_ctx->dev_num); +fail_device_create: + unregister_chrdev_region(ipa3_teth_ctx->dev_num, 1); +fail_alloc_chrdev_region: + kfree(ipa3_teth_ctx); + ipa3_teth_ctx = NULL; + + return res; +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Tethering bridge driver"); diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile new file mode 100644 index 000000000000..af46bf210427 --- /dev/null +++ b/drivers/platform/msm/ipa/test/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o +ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o ipa_test_hw_stats.o diff --git a/drivers/platform/msm/ipa/test/ipa_test_dma.c b/drivers/platform/msm/ipa/test/ipa_test_dma.c new file mode 100644 index 000000000000..7a377766b79c --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_test_dma.c @@ -0,0 +1,1050 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "../ipa_v3/ipa_i.h" +#include "ipa_ut_framework.h" + +#define IPA_TEST_DMA_WQ_NAME_BUFF_SZ 64 +#define IPA_TEST_DMA_MT_TEST_NUM_WQ 400 +#define IPA_TEST_DMA_MEMCPY_BUFF_SIZE 16384 +#define IPA_TEST_DMA_MAX_PKT_SIZE 0xFF00 +#define IPA_DMA_TEST_LOOP_NUM 1000 +#define IPA_DMA_TEST_INT_LOOP_NUM 50 +#define IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM 128 +#define IPA_DMA_RUN_TEST_UNIT_IN_LOOP(test_unit, iters, rc, args...) \ + do { \ + int __i; \ + for (__i = 0; __i < iters; __i++) { \ + IPA_UT_LOG(#test_unit " START iter %d\n", __i); \ + rc = test_unit(args); \ + if (!rc) \ + continue; \ + IPA_UT_LOG(#test_unit " failed %d\n", rc); \ + break; \ + } \ + } while (0) + +/** + * struct ipa_test_dma_async_user_data - user_data structure for async memcpy + * @src_mem: source memory buffer + * @dest_mem: destination memory buffer + * @call_serial_number: Id of the caller + * @copy_done: Completion object + */ +struct ipa_test_dma_async_user_data { + struct ipa_mem_buffer src_mem; + struct ipa_mem_buffer dest_mem; + int call_serial_number; + struct completion copy_done; +}; + +/** + * ipa_test_dma_setup() - Suite setup function + */ +static int ipa_test_dma_setup(void **ppriv) +{ + int rc; + + IPA_UT_DBG("Start Setup\n"); + + if (!ipa3_ctx) { + IPA_UT_ERR("No IPA ctx\n"); + return -EINVAL; + } + + rc = ipa_dma_init(); + if (rc) + IPA_UT_ERR("Fail to init ipa_dma - return code %d\n", rc); + else + IPA_UT_DBG("ipa_dma_init() Completed successfully!\n"); + + *ppriv = NULL; + + return rc; +} + +/** + * ipa_test_dma_teardown() - Suite teardown function + */ +static int ipa_test_dma_teardown(void *priv) +{ + IPA_UT_DBG("Start Teardown\n"); + ipa_dma_destroy(); + return 0; +} + +static int ipa_test_dma_alloc_buffs(struct ipa_mem_buffer *src, + struct ipa_mem_buffer *dest, + int size) +{ + int i; + static int val = 1; + int rc; + + val++; + src->size = size; + src->base = dma_alloc_coherent(ipa3_ctx->pdev, src->size, + &src->phys_base, GFP_KERNEL); + if (!src->base) { + IPA_UT_LOG("fail to alloc dma mem %d bytes\n", size); + IPA_UT_TEST_FAIL_REPORT("fail to alloc dma mem"); + return -ENOMEM; + } + + dest->size = size; + dest->base = dma_alloc_coherent(ipa3_ctx->pdev, dest->size, + &dest->phys_base, GFP_KERNEL); + if (!dest->base) { + IPA_UT_LOG("fail to alloc dma mem %d bytes\n", size); + IPA_UT_TEST_FAIL_REPORT("fail to alloc dma mem"); + rc = -ENOMEM; + goto fail_alloc_dest; + } + + memset(dest->base, 0, dest->size); + for (i = 0; i < src->size; i++) + memset(src->base + i, (val + i) & 0xFF, 1); + rc = memcmp(dest->base, src->base, dest->size); + if (rc == 0) { + IPA_UT_LOG("dest & src buffers are equal\n"); + IPA_UT_TEST_FAIL_REPORT("dest & src buffers are equal"); + rc = -EFAULT; + goto fail_buf_cmp; + } + + return 0; + +fail_buf_cmp: + dma_free_coherent(ipa3_ctx->pdev, dest->size, dest->base, + dest->phys_base); +fail_alloc_dest: + dma_free_coherent(ipa3_ctx->pdev, src->size, src->base, + src->phys_base); + return rc; +} + +static void ipa_test_dma_destroy_buffs(struct ipa_mem_buffer *src, + struct ipa_mem_buffer *dest) +{ + dma_free_coherent(ipa3_ctx->pdev, src->size, src->base, + src->phys_base); + dma_free_coherent(ipa3_ctx->pdev, dest->size, dest->base, + dest->phys_base); +} + +/** + * ipa_test_dma_memcpy_sync() - memcpy in sync mode + * + * @size: buffer size + * @expect_fail: test expects the memcpy to fail + * + * To be run during tests + * 1. Alloc src and dst buffers + * 2. sync memcpy src to dst via dma + * 3. compare src and dts if memcpy succeeded as expected + */ +static int ipa_test_dma_memcpy_sync(int size, bool expect_fail) +{ + int rc = 0; + int i; + struct ipa_mem_buffer src_mem; + struct ipa_mem_buffer dest_mem; + u8 *src; + u8 *dest; + + rc = ipa_test_dma_alloc_buffs(&src_mem, &dest_mem, size); + if (rc) { + IPA_UT_LOG("fail to alloc buffers\n"); + IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers"); + return rc; + } + + rc = ipa_dma_sync_memcpy(dest_mem.phys_base, src_mem.phys_base, size); + if (!expect_fail && rc) { + IPA_UT_LOG("fail to sync memcpy - rc = %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy failed"); + goto free_buffs; + } + if (expect_fail && !rc) { + IPA_UT_LOG("sync memcpy succeeded while expected to fail\n"); + IPA_UT_TEST_FAIL_REPORT( + "sync memcpy succeeded while expected to fail"); + rc = -EFAULT; + goto free_buffs; + } + + if (!rc) { + /* if memcpy succeeded, compare the buffers */ + rc = memcmp(dest_mem.base, src_mem.base, size); + if (rc) { + IPA_UT_LOG("BAD memcpy - buffs are not equals\n"); + IPA_UT_TEST_FAIL_REPORT( + "BAD memcpy - buffs are not equals"); + src = src_mem.base; + dest = dest_mem.base; + for (i = 0; i < size; i++) { + if (*(src + i) != *(dest + i)) { + IPA_UT_LOG("byte: %d 0x%x != 0x%x\n", + i, *(src + i), *(dest + i)); + } + } + } + } else { + /* if memcpy failed as expected, update the rc */ + rc = 0; + } + +free_buffs: + ipa_test_dma_destroy_buffs(&src_mem, &dest_mem); + return rc; +} + +static void ipa_test_dma_async_memcpy_cb(void *comp_obj) +{ + struct completion *xfer_done; + + if (!comp_obj) { + IPA_UT_ERR("Invalid Input\n"); + return; + } + xfer_done = (struct completion *)comp_obj; + complete(xfer_done); +} + +static void ipa_test_dma_async_memcpy_cb_user_data(void *user_param) +{ + int rc; + int i; + u8 *src; + u8 *dest; + struct ipa_test_dma_async_user_data *udata = + (struct ipa_test_dma_async_user_data *)user_param; + + if (!udata) { + IPA_UT_ERR("Invalid user param\n"); + return; + } + + rc = memcmp(udata->dest_mem.base, udata->src_mem.base, + udata->src_mem.size); + if (rc) { + IPA_UT_LOG("BAD memcpy - buffs are not equal sn=%d\n", + udata->call_serial_number); + IPA_UT_TEST_FAIL_REPORT( + "BAD memcpy - buffs are not equal"); + src = udata->src_mem.base; + dest = udata->dest_mem.base; + for (i = 0; i < udata->src_mem.size; i++) { + if (*(src + i) != *(dest + i)) { + IPA_UT_ERR("byte: %d 0x%x != 0x%x\n", i, + *(src + i), *(dest + i)); + } + } + return; + } + + IPA_UT_LOG("Notify on async memcopy sn=%d\n", + udata->call_serial_number); + complete(&(udata->copy_done)); +} + +/** + * ipa_test_dma_memcpy_async() - memcpy in async mode + * + * @size: buffer size + * @expect_fail: test expected the memcpy to fail + * + * To be run during tests + * 1. Alloc src and dst buffers + * 2. async memcpy src to dst via dma and wait for completion + * 3. compare src and dts if memcpy succeeded as expected + */ +static int ipa_test_dma_memcpy_async(int size, bool expect_fail) +{ + int rc = 0; + int i; + struct ipa_mem_buffer src_mem; + struct ipa_mem_buffer dest_mem; + u8 *src; + u8 *dest; + struct completion xfer_done; + + rc = ipa_test_dma_alloc_buffs(&src_mem, &dest_mem, size); + if (rc) { + IPA_UT_LOG("fail to alloc buffers\n"); + IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers"); + return rc; + } + + init_completion(&xfer_done); + rc = ipa_dma_async_memcpy(dest_mem.phys_base, src_mem.phys_base, size, + ipa_test_dma_async_memcpy_cb, &xfer_done); + if (!expect_fail && rc) { + IPA_UT_LOG("fail to initiate async memcpy - rc=%d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("async memcpy initiate failed"); + goto free_buffs; + } + if (expect_fail && !rc) { + IPA_UT_LOG("async memcpy succeeded while expected to fail\n"); + IPA_UT_TEST_FAIL_REPORT( + "async memcpy succeeded while expected to fail"); + rc = -EFAULT; + goto free_buffs; + } + + if (!rc) { + /* if memcpy succeeded, compare the buffers */ + wait_for_completion(&xfer_done); + rc = memcmp(dest_mem.base, src_mem.base, size); + if (rc) { + IPA_UT_LOG("BAD memcpy - buffs are not equals\n"); + IPA_UT_TEST_FAIL_REPORT( + "BAD memcpy - buffs are not equals"); + src = src_mem.base; + dest = dest_mem.base; + for (i = 0; i < size; i++) { + if (*(src + i) != *(dest + i)) { + IPA_UT_LOG("byte: %d 0x%x != 0x%x\n", + i, *(src + i), *(dest + i)); + } + } + } + } else { + /* if memcpy failed as expected, update the rc */ + rc = 0; + } + +free_buffs: + ipa_test_dma_destroy_buffs(&src_mem, &dest_mem); + return rc; +} + +/** + * ipa_test_dma_sync_async_memcpy() - memcpy in sync and then async mode + * + * @size: buffer size + * + * To be run during tests + * 1. several sync memcopy in row + * 2. several async memcopy - + * back-to-back (next async try initiated after prev is completed) + */ +static int ipa_test_dma_sync_async_memcpy(int size) +{ + int rc; + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_sync, + IPA_DMA_TEST_INT_LOOP_NUM, rc, size, false); + if (rc) { + IPA_UT_LOG("sync memcopy fail rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcopy fail"); + return rc; + } + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_async, + IPA_DMA_TEST_INT_LOOP_NUM, rc, size, false); + if (rc) { + IPA_UT_LOG("async memcopy fail rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("async memcopy fail"); + return rc; + } + + return 0; +} + +/** + * TEST: test control API - enable/disable dma + * 1. enable dma + * 2. disable dma + */ +static int ipa_test_dma_control_api(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: memcpy before dma enable + * + * 1. sync memcpy - should fail + * 2. async memcpy - should fail + */ +static int ipa_test_dma_memcpy_before_enable(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, true); + if (rc) { + IPA_UT_LOG("sync memcpy succeeded unexpectedly rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy succeeded unexpectedly"); + return rc; + } + + rc = ipa_test_dma_memcpy_async(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, true); + if (rc) { + IPA_UT_LOG("async memcpy succeeded unexpectedly rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy succeeded unexpectedly"); + return rc; + } + + return 0; +} + +/** + * TEST: Sync memory copy + * + * 1. dma enable + * 2. sync memcpy + * 3. dma disable + */ +static int ipa_test_dma_sync_memcpy(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false); + if (rc) { + IPA_UT_LOG("sync memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Small sync memory copy + * + * 1. dma enable + * 2. small sync memcpy + * 3. small sync memcpy + * 4. dma disable + */ +static int ipa_test_dma_sync_memcpy_small(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_test_dma_memcpy_sync(4, false); + if (rc) { + IPA_UT_LOG("sync memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_test_dma_memcpy_sync(7, false); + if (rc) { + IPA_UT_LOG("sync memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Async memory copy + * + * 1. dma enable + * 2. async memcpy + * 3. dma disable + */ +static int ipa_test_dma_async_memcpy(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_test_dma_memcpy_async(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false); + if (rc) { + IPA_UT_LOG("async memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("async memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Small async memory copy + * + * 1. dma enable + * 2. async memcpy + * 3. async memcpy + * 4. dma disable + */ +static int ipa_test_dma_async_memcpy_small(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_test_dma_memcpy_async(4, false); + if (rc) { + IPA_UT_LOG("async memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("async memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_test_dma_memcpy_async(7, false); + if (rc) { + IPA_UT_LOG("async memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("async memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Iteration of sync memory copy + * + * 1. dma enable + * 2. sync memcpy in loop - in row + * 3. dma disable + */ +static int ipa_test_dma_sync_memcpy_in_loop(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_sync, + IPA_DMA_TEST_LOOP_NUM, rc, + IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false); + if (rc) { + IPA_UT_LOG("Iterations of sync memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("Iterations of sync memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Iteration of async memory copy + * + * 1. dma enable + * 2. async memcpy in loop - back-to-back + * next async copy is initiated once previous one completed + * 3. dma disable + */ +static int ipa_test_dma_async_memcpy_in_loop(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_async, + IPA_DMA_TEST_LOOP_NUM, rc, + IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false); + if (rc) { + IPA_UT_LOG("Iterations of async memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("Iterations of async memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/** + * TEST: Iteration of interleaved sync and async memory copy + * + * 1. dma enable + * 2. sync and async memcpy in loop - interleaved + * 3. dma disable + */ +static int ipa_test_dma_interleaved_sync_async_memcpy_in_loop(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_sync_async_memcpy, + IPA_DMA_TEST_INT_LOOP_NUM, rc, + IPA_TEST_DMA_MEMCPY_BUFF_SIZE); + if (rc) { + IPA_UT_LOG( + "Iterations of interleaved sync async memcpy failed rc=%d\n" + , rc); + IPA_UT_TEST_FAIL_REPORT( + "Iterations of interleaved sync async memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +static atomic_t ipa_test_dma_mt_test_pass; + +struct one_memcpy_work { + struct work_struct work_s; + int size; +}; + +static void ipa_test_dma_wrapper_test_one_sync(struct work_struct *work) +{ + int rc; + struct one_memcpy_work *data = + container_of(work, struct one_memcpy_work, work_s); + + rc = ipa_test_dma_memcpy_sync(data->size, false); + if (rc) { + IPA_UT_LOG("fail sync memcpy from thread rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail sync memcpy from thread"); + return; + } + atomic_inc(&ipa_test_dma_mt_test_pass); +} + +static void ipa_test_dma_wrapper_test_one_async(struct work_struct *work) +{ + int rc; + struct one_memcpy_work *data = + container_of(work, struct one_memcpy_work, work_s); + + rc = ipa_test_dma_memcpy_async(data->size, false); + if (rc) { + IPA_UT_LOG("fail async memcpy from thread rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail async memcpy from thread"); + return; + } + atomic_inc(&ipa_test_dma_mt_test_pass); +} + +/** + * TEST: Multiple threads running sync and sync mem copy + * + * 1. dma enable + * 2. In-loop + * 2.1 create wq for sync memcpy + * 2.2 create wq for async memcpy + * 2.3 queue sync memcpy work + * 2.4 queue async memcoy work + * 3. In-loop + * 3.1 flush and destroy wq sync + * 3.2 flush and destroy wq async + * 3. dma disable + */ +static int ipa_test_dma_mt_sync_async(void *priv) +{ + int rc; + int i; + static struct workqueue_struct *wq_sync[IPA_TEST_DMA_MT_TEST_NUM_WQ]; + static struct workqueue_struct *wq_async[IPA_TEST_DMA_MT_TEST_NUM_WQ]; + static struct one_memcpy_work async[IPA_TEST_DMA_MT_TEST_NUM_WQ]; + static struct one_memcpy_work sync[IPA_TEST_DMA_MT_TEST_NUM_WQ]; + char buff[IPA_TEST_DMA_WQ_NAME_BUFF_SZ]; + + memset(wq_sync, 0, sizeof(wq_sync)); + memset(wq_sync, 0, sizeof(wq_async)); + memset(async, 0, sizeof(async)); + memset(sync, 0, sizeof(sync)); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + atomic_set(&ipa_test_dma_mt_test_pass, 0); + for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) { + snprintf(buff, sizeof(buff), "ipa_test_dmaSwq%d", i); + wq_sync[i] = create_singlethread_workqueue(buff); + if (!wq_sync[i]) { + IPA_UT_ERR("failed to create sync wq#%d\n", i); + rc = -EFAULT; + goto fail_create_wq; + } + snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipa_test_dmaAwq%d", i); + wq_async[i] = create_singlethread_workqueue(buff); + if (!wq_async[i]) { + IPA_UT_ERR("failed to create async wq#%d\n", i); + rc = -EFAULT; + goto fail_create_wq; + } + + if (i % 2) { + sync[i].size = IPA_TEST_DMA_MEMCPY_BUFF_SIZE; + async[i].size = IPA_TEST_DMA_MEMCPY_BUFF_SIZE; + } else { + sync[i].size = 4; + async[i].size = 4; + } + INIT_WORK(&sync[i].work_s, ipa_test_dma_wrapper_test_one_sync); + queue_work(wq_sync[i], &sync[i].work_s); + INIT_WORK(&async[i].work_s, + ipa_test_dma_wrapper_test_one_async); + queue_work(wq_async[i], &async[i].work_s); + } + + for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) { + flush_workqueue(wq_sync[i]); + destroy_workqueue(wq_sync[i]); + flush_workqueue(wq_async[i]); + destroy_workqueue(wq_async[i]); + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + if ((2 * IPA_TEST_DMA_MT_TEST_NUM_WQ) != + atomic_read(&ipa_test_dma_mt_test_pass)) { + IPA_UT_LOG( + "Multi-threaded sync/async memcopy failed passed=%d\n" + , atomic_read(&ipa_test_dma_mt_test_pass)); + IPA_UT_TEST_FAIL_REPORT( + "Multi-threaded sync/async memcopy failed"); + return -EFAULT; + } + + return 0; + +fail_create_wq: + (void)ipa_dma_disable(); + for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) { + if (wq_sync[i]) + destroy_workqueue(wq_sync[i]); + if (wq_async[i]) + destroy_workqueue(wq_async[i]); + } + + return rc; +} + +/** + * TEST: Several parallel async memory copy iterations + * + * 1. create several user_data structures - one per iteration + * 2. allocate buffs. Give slice for each iteration + * 3. iterations of async mem copy + * 4. wait for all to complete + * 5. dma disable + */ +static int ipa_test_dma_parallel_async_memcpy_in_loop(void *priv) +{ + int rc; + struct ipa_test_dma_async_user_data *udata; + struct ipa_mem_buffer all_src_mem; + struct ipa_mem_buffer all_dest_mem; + int i; + bool is_fail = false; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + udata = kzalloc(IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM * + sizeof(struct ipa_test_dma_async_user_data), GFP_KERNEL); + if (!udata) { + IPA_UT_ERR("fail allocate user_data array\n"); + (void)ipa_dma_disable(); + return -ENOMEM; + } + + rc = ipa_test_dma_alloc_buffs(&all_src_mem, &all_dest_mem, + IPA_TEST_DMA_MEMCPY_BUFF_SIZE); + if (rc) { + IPA_UT_LOG("fail to alloc buffers\n"); + IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers"); + kfree(udata); + (void)ipa_dma_disable(); + return rc; + } + + for (i = 0 ; i < IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM ; i++) { + udata[i].src_mem.size = + IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM; + udata[i].src_mem.base = all_src_mem.base + i * + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + udata[i].src_mem.phys_base = all_src_mem.phys_base + i * + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + + udata[i].dest_mem.size = + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + udata[i].dest_mem.base = all_dest_mem.base + i * + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + udata[i].dest_mem.phys_base = all_dest_mem.phys_base + i * + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM); + + udata[i].call_serial_number = i + 1; + init_completion(&(udata[i].copy_done)); + rc = ipa_dma_async_memcpy(udata[i].dest_mem.phys_base, + udata[i].src_mem.phys_base, + (IPA_TEST_DMA_MEMCPY_BUFF_SIZE / + IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM), + ipa_test_dma_async_memcpy_cb_user_data, &udata[i]); + if (rc) { + IPA_UT_LOG("async memcpy initiation fail i=%d rc=%d\n", + i, rc); + is_fail = true; + } + } + + for (i = 0; i < IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM ; i++) + wait_for_completion(&udata[i].copy_done); + + ipa_test_dma_destroy_buffs(&all_src_mem, &all_dest_mem); + kfree(udata); + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + if (is_fail) { + IPA_UT_LOG("async memcopy failed\n"); + IPA_UT_TEST_FAIL_REPORT("async memcopy failed"); + return -EFAULT; + } + + return 0; +} + +/** + * TEST: Sync memory copy + * + * 1. dma enable + * 2. sync memcpy with max packet size + * 3. dma disable + */ +static int ipa_test_dma_sync_memcpy_max_pkt_size(void *priv) +{ + int rc; + + IPA_UT_LOG("Test Start\n"); + + rc = ipa_dma_enable(); + if (rc) { + IPA_UT_LOG("DMA enable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail enable dma"); + return rc; + } + + rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MAX_PKT_SIZE, false); + if (rc) { + IPA_UT_LOG("sync memcpy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("sync memcpy failed"); + (void)ipa_dma_disable(); + return rc; + } + + rc = ipa_dma_disable(); + if (rc) { + IPA_UT_LOG("DMA disable failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail disable dma"); + return rc; + } + + return 0; +} + +/* Suite definition block */ +IPA_UT_DEFINE_SUITE_START(dma, "DMA for GSI", + ipa_test_dma_setup, ipa_test_dma_teardown) +{ + IPA_UT_ADD_TEST(control_api, + "Control API", + ipa_test_dma_control_api, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(memcpy_before_enable, + "Call memcpy before dma enable and expect it to fail", + ipa_test_dma_memcpy_before_enable, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(sync_memcpy, + "Sync memory copy", + ipa_test_dma_sync_memcpy, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(sync_memcpy_small, + "Small Sync memory copy", + ipa_test_dma_sync_memcpy_small, + true, IPA_HW_v3_5, IPA_HW_MAX), + IPA_UT_ADD_TEST(async_memcpy, + "Async memory copy", + ipa_test_dma_async_memcpy, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(async_memcpy_small, + "Small async memory copy", + ipa_test_dma_async_memcpy_small, + true, IPA_HW_v3_5, IPA_HW_MAX), + IPA_UT_ADD_TEST(sync_memcpy_in_loop, + "Several sync memory copy iterations", + ipa_test_dma_sync_memcpy_in_loop, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(async_memcpy_in_loop, + "Several async memory copy iterations", + ipa_test_dma_async_memcpy_in_loop, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(interleaved_sync_async_memcpy_in_loop, + "Several interleaved sync and async memory copy iterations", + ipa_test_dma_interleaved_sync_async_memcpy_in_loop, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(multi_threaded_multiple_sync_async_memcpy, + "Several multi-threaded sync and async memory copy iterations", + ipa_test_dma_mt_sync_async, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(parallel_async_memcpy_in_loop, + "Several parallel async memory copy iterations", + ipa_test_dma_parallel_async_memcpy_in_loop, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(sync_memcpy_max_pkt_size, + "Sync memory copy with max packet size", + ipa_test_dma_sync_memcpy_max_pkt_size, + true, IPA_HW_v3_0, IPA_HW_MAX), +} IPA_UT_DEFINE_SUITE_END(dma); diff --git a/drivers/platform/msm/ipa/test/ipa_test_example.c b/drivers/platform/msm/ipa/test/ipa_test_example.c new file mode 100644 index 000000000000..7b35db2eaefd --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_test_example.c @@ -0,0 +1,99 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "ipa_ut_framework.h" + +/** + * Example IPA Unit-test suite + * To be a reference for writing new suites and tests. + * This suite is also used as unit-test for the testing framework itself. + * Structure: + * 1- Define the setup and teardown functions + * Not Mandatory. Null may be used as well + * 2- For each test, define its Run() function + * 3- Use IPA_UT_DEFINE_SUITE_START() to start defining the suite + * 4- use IPA_UT_ADD_TEST() for adding tests within + * the suite definition block + * 5- IPA_UT_DEFINE_SUITE_END() close the suite definition + */ + +static int ipa_test_example_dummy; + +static int ipa_test_example_suite_setup(void **ppriv) +{ + IPA_UT_DBG("Start Setup - set 0x1234F\n"); + + ipa_test_example_dummy = 0x1234F; + *ppriv = (void *)&ipa_test_example_dummy; + + return 0; +} + +static int ipa_test_example_teardown(void *priv) +{ + IPA_UT_DBG("Start Teardown\n"); + IPA_UT_DBG("priv=0x%pK - value=0x%x\n", priv, *((int *)priv)); + + return 0; +} + +static int ipa_test_example_test1(void *priv) +{ + IPA_UT_LOG("priv=0x%pK - value=0x%x\n", priv, *((int *)priv)); + ipa_test_example_dummy++; + + return 0; +} + +static int ipa_test_example_test2(void *priv) +{ + IPA_UT_LOG("priv=0x%pK - value=0x%x\n", priv, *((int *)priv)); + ipa_test_example_dummy++; + + return 0; +} + +static int ipa_test_example_test3(void *priv) +{ + IPA_UT_LOG("priv=0x%pK - value=0x%x\n", priv, *((int *)priv)); + ipa_test_example_dummy++; + + return 0; +} + +static int ipa_test_example_test4(void *priv) +{ + IPA_UT_LOG("priv=0x%pK - value=0x%x\n", priv, *((int *)priv)); + ipa_test_example_dummy++; + + IPA_UT_TEST_FAIL_REPORT("failed on test"); + + return -EFAULT; +} + +/* Suite definition block */ +IPA_UT_DEFINE_SUITE_START(example, "Example suite", + ipa_test_example_suite_setup, ipa_test_example_teardown) +{ + IPA_UT_ADD_TEST(test1, "This is test number 1", + ipa_test_example_test1, false, IPA_HW_v1_0, IPA_HW_MAX), + + IPA_UT_ADD_TEST(test2, "This is test number 2", + ipa_test_example_test2, false, IPA_HW_v1_0, IPA_HW_MAX), + + IPA_UT_ADD_TEST(test3, "This is test number 3", + ipa_test_example_test3, false, IPA_HW_v1_1, IPA_HW_v2_6), + + IPA_UT_ADD_TEST(test4, "This is test number 4", + ipa_test_example_test4, false, IPA_HW_v1_1, IPA_HW_MAX), + +} IPA_UT_DEFINE_SUITE_END(example); diff --git a/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c new file mode 100644 index 000000000000..d37920e1fb52 --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c @@ -0,0 +1,330 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "ipa_ut_framework.h" +#include + +struct ipa_test_hw_stats_ctx { + u32 odu_prod_hdl; + u32 odu_cons_hdl; + u32 rt4_usb; + u32 rt6_usb; + u32 rt4_odu_cons; + u32 rt6_odu_cons; + atomic_t odu_pending; +}; + +static struct ipa_test_hw_stats_ctx *ctx; + +static int ipa_test_hw_stats_suite_setup(void **ppriv) +{ + IPA_UT_DBG("Start Setup\n"); + + if (!ctx) + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + + return 0; +} + +static int ipa_test_hw_stats_suite_teardown(void *priv) +{ + IPA_UT_DBG("Start Teardown\n"); + + return 0; +} + +static void odu_prod_notify(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + + switch (evt) { + case IPA_RECEIVE: + dev_kfree_skb_any(skb); + break; + case IPA_WRITE_DONE: + atomic_dec(&ctx->odu_pending); + dev_kfree_skb_any(skb); + break; + default: + IPA_UT_ERR("unexpected evt %d\n", evt); + } +} +static void odu_cons_notify(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + int ret; + + switch (evt) { + case IPA_RECEIVE: + if (atomic_read(&ctx->odu_pending) >= 64) + msleep(20); + atomic_inc(&ctx->odu_pending); + skb_put(skb, 100); + ret = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, NULL); + while (ret) { + msleep(100); + ret = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, NULL); + } + break; + case IPA_WRITE_DONE: + dev_kfree_skb_any(skb); + break; + default: + IPA_UT_ERR("unexpected evt %d\n", evt); + } +} + +static int ipa_test_hw_stats_configure(void *priv) +{ + struct ipa_sys_connect_params odu_prod_params; + struct ipa_sys_connect_params odu_emb_cons_params; + int res; + + /* first connect all additional pipe */ + memset(&odu_prod_params, 0, sizeof(odu_prod_params)); + memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params)); + + odu_prod_params.client = IPA_CLIENT_ODU_PROD; + odu_prod_params.desc_fifo_sz = 0x1000; + odu_prod_params.priv = NULL; + odu_prod_params.notify = odu_prod_notify; + res = ipa_setup_sys_pipe(&odu_prod_params, + &ctx->odu_prod_hdl); + if (res) { + IPA_UT_ERR("fail to setup sys pipe ODU_PROD %d\n", res); + return res; + } + + odu_emb_cons_params.client = IPA_CLIENT_ODU_EMB_CONS; + odu_emb_cons_params.desc_fifo_sz = 0x1000; + odu_emb_cons_params.priv = NULL; + odu_emb_cons_params.notify = odu_cons_notify; + res = ipa_setup_sys_pipe(&odu_emb_cons_params, + &ctx->odu_cons_hdl); + if (res) { + IPA_UT_ERR("fail to setup sys pipe ODU_EMB_CONS %d\n", res); + ipa_teardown_sys_pipe(ctx->odu_prod_hdl); + return res; + } + + IPA_UT_INFO("Configured. Please connect USB RNDIS now\n"); + + return 0; +} + +static int ipa_test_hw_stats_add_FnR(void *priv) +{ + struct ipa_ioc_add_rt_rule *rt_rule; + struct ipa_ioc_add_flt_rule *flt_rule; + struct ipa_ioc_get_rt_tbl rt_lookup; + int ret; + + rt_rule = kzalloc(sizeof(*rt_rule) + 1 * sizeof(struct ipa_rt_rule_add), + GFP_KERNEL); + if (!rt_rule) { + IPA_UT_DBG("no mem\n"); + return -ENOMEM; + } + + flt_rule = kzalloc(sizeof(*flt_rule) + + 1 * sizeof(struct ipa_flt_rule_add), GFP_KERNEL); + if (!flt_rule) { + IPA_UT_DBG("no mem\n"); + ret = -ENOMEM; + goto free_rt; + } + + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v4; + rt_lookup.ip = rt_rule->ip; + strlcpy(rt_rule->rt_tbl_name, "V4_RT_TO_USB_CONS", + IPA_RESOURCE_NAME_MAX); + strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX); + rt_rule->num_rules = 1; + rt_rule->rules[0].rule.dst = IPA_CLIENT_USB_CONS; + rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + rt_rule->rules[0].rule.attrib.dst_port = 5002; + rt_rule->rules[0].rule.hashable = true; + if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + if (ipa_get_rt_tbl(&rt_lookup)) { + IPA_UT_ERR("failed to query V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + ctx->rt4_usb = rt_lookup.hdl; + + memset(rt_rule, 0, sizeof(*rt_rule)); + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v6; + rt_lookup.ip = rt_rule->ip; + strlcpy(rt_rule->rt_tbl_name, "V6_RT_TO_USB_CONS", + IPA_RESOURCE_NAME_MAX); + strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX); + rt_rule->num_rules = 1; + rt_rule->rules[0].rule.dst = IPA_CLIENT_USB_CONS; + rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + rt_rule->rules[0].rule.attrib.dst_port = 5002; + rt_rule->rules[0].rule.hashable = true; + if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + if (ipa_get_rt_tbl(&rt_lookup)) { + IPA_UT_ERR("failed to query V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + ctx->rt6_usb = rt_lookup.hdl; + + memset(rt_rule, 0, sizeof(*rt_rule)); + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v4; + rt_lookup.ip = rt_rule->ip; + strlcpy(rt_rule->rt_tbl_name, "V4_RT_TO_ODU_CONS", + IPA_RESOURCE_NAME_MAX); + strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX); + rt_rule->num_rules = 1; + rt_rule->rules[0].rule.dst = IPA_CLIENT_ODU_EMB_CONS; + rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + rt_rule->rules[0].rule.attrib.dst_port = 5002; + rt_rule->rules[0].rule.hashable = true; + if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + if (ipa_get_rt_tbl(&rt_lookup)) { + IPA_UT_ERR("failed to query V4 rules\n"); + return -EFAULT; + } + ctx->rt4_odu_cons = rt_lookup.hdl; + + memset(rt_rule, 0, sizeof(*rt_rule)); + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v6; + rt_lookup.ip = rt_rule->ip; + strlcpy(rt_rule->rt_tbl_name, "V6_RT_TO_ODU_CONS", + IPA_RESOURCE_NAME_MAX); + strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX); + rt_rule->num_rules = 1; + rt_rule->rules[0].rule.dst = IPA_CLIENT_ODU_EMB_CONS; + rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + rt_rule->rules[0].rule.attrib.dst_port = 5002; + rt_rule->rules[0].rule.hashable = true; + if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + if (ipa_get_rt_tbl(&rt_lookup)) { + IPA_UT_ERR("failed to query V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + ctx->rt6_odu_cons = rt_lookup.hdl; + + flt_rule->commit = 1; + flt_rule->ip = IPA_IP_v4; + flt_rule->ep = IPA_CLIENT_USB_PROD; + flt_rule->num_rules = 1; + flt_rule->rules[0].at_rear = 1; + flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING; + flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + flt_rule->rules[0].rule.attrib.dst_port = 5002; + flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt4_odu_cons; + flt_rule->rules[0].rule.hashable = 1; + if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + + memset(flt_rule, 0, sizeof(*flt_rule)); + flt_rule->commit = 1; + flt_rule->ip = IPA_IP_v6; + flt_rule->ep = IPA_CLIENT_USB_PROD; + flt_rule->num_rules = 1; + flt_rule->rules[0].at_rear = 1; + flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING; + flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + flt_rule->rules[0].rule.attrib.dst_port = 5002; + flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt6_odu_cons; + flt_rule->rules[0].rule.hashable = 1; + if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V6 rules\n"); + ret = -EFAULT; + goto free_flt; + } + + memset(flt_rule, 0, sizeof(*flt_rule)); + flt_rule->commit = 1; + flt_rule->ip = IPA_IP_v4; + flt_rule->ep = IPA_CLIENT_ODU_PROD; + flt_rule->num_rules = 1; + flt_rule->rules[0].at_rear = 1; + flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING; + flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + flt_rule->rules[0].rule.attrib.dst_port = 5002; + flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt4_usb; + flt_rule->rules[0].rule.hashable = 1; + if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V4 rules\n"); + ret = -EFAULT; + goto free_flt; + } + + memset(flt_rule, 0, sizeof(*flt_rule)); + flt_rule->commit = 1; + flt_rule->ip = IPA_IP_v6; + flt_rule->ep = IPA_CLIENT_ODU_PROD; + flt_rule->num_rules = 1; + flt_rule->rules[0].at_rear = 1; + flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING; + flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT; + flt_rule->rules[0].rule.attrib.dst_port = 5002; + flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt6_usb; + flt_rule->rules[0].rule.hashable = 1; + if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) { + IPA_UT_ERR("failed to install V6 rules\n"); + ret = -EFAULT; + goto free_flt; + } + + IPA_UT_INFO( + "Rules added. Please start data transfer on ports 5001/5002\n"); + ret = 0; +free_flt: + kfree(flt_rule); +free_rt: + kfree(rt_rule); + return ret; + +} + +/* Suite definition block */ +IPA_UT_DEFINE_SUITE_START(hw_stats, "HW stats test", + ipa_test_hw_stats_suite_setup, ipa_test_hw_stats_suite_teardown) +{ + IPA_UT_ADD_TEST(configure, "Configure the setup", + ipa_test_hw_stats_configure, false, IPA_HW_v4_0, IPA_HW_MAX), + + IPA_UT_ADD_TEST(add_rules, "Add FLT and RT rules", + ipa_test_hw_stats_add_FnR, false, IPA_HW_v4_0, IPA_HW_MAX), + +} IPA_UT_DEFINE_SUITE_END(hw_stats); diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c new file mode 100644 index 000000000000..fed39d9b4c1d --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c @@ -0,0 +1,3322 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include "../ipa_v3/ipa_i.h" +#include "../../gsi/gsi.h" +#include "../../gsi/gsi_reg.h" +#include "ipa_ut_framework.h" + +#define IPA_MHI_TEST_NUM_CHANNELS 8 +#define IPA_MHI_TEST_NUM_EVENT_RINGS 8 +#define IPA_MHI_TEST_FIRST_CHANNEL_ID 100 +#define IPA_MHI_TEST_FIRST_EVENT_RING_ID 100 +#define IPA_MHI_TEST_LAST_CHANNEL_ID \ + (IPA_MHI_TEST_FIRST_CHANNEL_ID + IPA_MHI_TEST_NUM_CHANNELS - 1) +#define IPA_MHI_TEST_LAST_EVENT_RING_ID \ + (IPA_MHI_TEST_FIRST_EVENT_RING_ID + IPA_MHI_TEST_NUM_EVENT_RINGS - 1) +#define IPA_MHI_TEST_MAX_DATA_BUF_SIZE 1500 +#define IPA_MHI_TEST_SEQ_TYPE_DMA 0x00000000 + +#define IPA_MHI_TEST_LOOP_NUM 5 +#define IPA_MHI_RUN_TEST_UNIT_IN_LOOP(test_unit, rc, args...) \ + do { \ + int __i; \ + for (__i = 0; __i < IPA_MHI_TEST_LOOP_NUM; __i++) { \ + IPA_UT_LOG(#test_unit " START iter %d\n", __i); \ + rc = test_unit(args); \ + if (!rc) \ + continue; \ + IPA_UT_LOG(#test_unit " failed %d\n", rc); \ + break; \ + } \ + } while (0) + +/** + * check for MSI interrupt for one or both channels: + * OUT channel MSI my be missed as it + * will be overwritten by the IN channel MSI + */ +#define IPA_MHI_TEST_CHECK_MSI_INTR(__both, __timeout) \ + do { \ + int i; \ + for (i = 0; i < 20; i++) { \ + if (*((u32 *)test_mhi_ctx->msi.base) == \ + (0x10000000 | \ + (IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1))) { \ + __timeout = false; \ + break; \ + } \ + if (__both && (*((u32 *)test_mhi_ctx->msi.base) == \ + (0x10000000 | \ + (IPA_MHI_TEST_FIRST_EVENT_RING_ID)))) { \ + /* sleep to be sure IN MSI is generated */ \ + msleep(20); \ + __timeout = false; \ + break; \ + } \ + msleep(20); \ + } \ + } while (0) + +static DECLARE_COMPLETION(mhi_test_ready_comp); +static DECLARE_COMPLETION(mhi_test_wakeup_comp); + +/** + * enum ipa_mhi_ring_elements_type - MHI ring elements types. + */ +enum ipa_mhi_ring_elements_type { + IPA_MHI_RING_ELEMENT_NO_OP = 1, + IPA_MHI_RING_ELEMENT_TRANSFER = 2 +}; + +/** + * enum ipa_mhi_channel_direction - MHI channel directions + */ +enum ipa_mhi_channel_direction { + IPA_MHI_OUT_CHAHNNEL = 1, + IPA_MHI_IN_CHAHNNEL = 2, +}; + +/** + * struct ipa_mhi_channel_context_array - MHI Channel context array entry + * + * mapping is taken from MHI spec + */ +struct ipa_mhi_channel_context_array { + u32 chstate:8; /*0-7*/ + u32 brsmode:2; /*8-9*/ + u32 pollcfg:6; /*10-15*/ + u32 reserved:16; /*16-31*/ + u32 chtype; /*channel type (inbound/outbound)*/ + u32 erindex; /*event ring index*/ + u64 rbase; /*ring base address in the host addr spc*/ + u64 rlen; /*ring length in bytes*/ + u64 rp; /*read pointer in the host system addr spc*/ + u64 wp; /*write pointer in the host system addr spc*/ +} __packed; + +/** + * struct ipa_mhi_event_context_array - MGI event ring context array entry + * + * mapping is taken from MHI spec + */ +struct ipa_mhi_event_context_array { + u16 intmodc; + u16 intmodt;/* Interrupt moderation timer (in microseconds) */ + u32 ertype; + u32 msivec; /* MSI vector for interrupt (MSI data)*/ + u64 rbase; /* ring base address in host address space*/ + u64 rlen; /* ring length in bytes*/ + u64 rp; /* read pointer in the host system address space*/ + u64 wp; /* write pointer in the host system address space*/ +} __packed; + +/** + * + * struct ipa_mhi_mmio_register_set - MHI configuration registers, + * control registers, status registers, pointers to doorbell arrays, + * pointers to channel and event context arrays. + * + * The structure is defined in mhi spec (register names are taken from there). + * Only values accessed by HWP or test are documented + */ +struct ipa_mhi_mmio_register_set { + u32 mhireglen; + u32 reserved_08_04; + u32 mhiver; + u32 reserved_10_0c; + struct mhicfg { + u8 nch; + u8 reserved_15_8; + u8 ner; + u8 reserved_31_23; + } __packed mhicfg; + + u32 reserved_18_14; + u32 chdboff; + u32 reserved_20_1C; + u32 erdboff; + u32 reserved_28_24; + u32 bhioff; + u32 reserved_30_2C; + u32 debugoff; + u32 reserved_38_34; + + struct mhictrl { + u32 rs : 1; + u32 reset : 1; + u32 reserved_7_2 : 6; + u32 mhistate : 8; + u32 reserved_31_16 : 16; + } __packed mhictrl; + + u64 reserved_40_3c; + u32 reserved_44_40; + + struct mhistatus { + u32 ready : 1; + u32 reserved_3_2 : 1; + u32 syserr : 1; + u32 reserved_7_3 : 5; + u32 mhistate : 8; + u32 reserved_31_16 : 16; + } __packed mhistatus; + + /** + * Register is not accessed by HWP. + * In test register carries the handle for + * the buffer of channel context array + */ + u32 reserved_50_4c; + + u32 mhierror; + + /** + * Register is not accessed by HWP. + * In test register carries the handle for + * the buffer of event ring context array + */ + u32 reserved_58_54; + + /** + * 64-bit pointer to the channel context array in the host memory space + * host sets the pointer to the channel context array during + * initialization. + */ + u64 ccabap; + /** + * 64-bit pointer to the event context array in the host memory space + * host sets the pointer to the event context array during + * initialization + */ + u64 ecabap; + /** + * Register is not accessed by HWP. + * In test register carries the pointer of virtual address + * for the buffer of channel context array + */ + u64 crcbap; + /** + * Register is not accessed by HWP. + * In test register carries the pointer of virtual address + * for the buffer of event ring context array + */ + u64 crdb; + + u64 reserved_80_78; + + struct mhiaddr { + /** + * Base address (64-bit) of the memory region in + * the host address space where the MHI control + * data structures are allocated by the host, + * including channel context array, event context array, + * and rings. + * The device uses this information to set up its internal + * address translation tables. + * value must be aligned to 4 Kbytes. + */ + u64 mhicrtlbase; + /** + * Upper limit address (64-bit) of the memory region in + * the host address space where the MHI control + * data structures are allocated by the host. + * The device uses this information to setup its internal + * address translation tables. + * The most significant 32 bits of MHICTRLBASE and + * MHICTRLLIMIT registers must be equal. + */ + u64 mhictrllimit; + u64 reserved_18_10; + /** + * Base address (64-bit) of the memory region in + * the host address space where the MHI data buffers + * are allocated by the host. + * The device uses this information to setup its + * internal address translation tables. + * value must be aligned to 4 Kbytes. + */ + u64 mhidatabase; + /** + * Upper limit address (64-bit) of the memory region in + * the host address space where the MHI data buffers + * are allocated by the host. + * The device uses this information to setup its + * internal address translation tables. + * The most significant 32 bits of MHIDATABASE and + * MHIDATALIMIT registers must be equal. + */ + u64 mhidatalimit; + u64 reserved_30_28; + } __packed mhiaddr; + +} __packed; + +/** + * struct ipa_mhi_event_ring_element - MHI Event ring element + * + * mapping is taken from MHI spec + */ +struct ipa_mhi_event_ring_element { + /** + * pointer to ring element that generated event in + * the host system address space + */ + u64 ptr; + union { + struct { + u32 len : 24; + u32 code : 8; + } __packed bits; + u32 dword; + } __packed dword_8; + u16 reserved; + u8 type; + u8 chid; +} __packed; + +/** + * struct ipa_mhi_transfer_ring_element - MHI Transfer ring element + * + * mapping is taken from MHI spec + */ +struct ipa_mhi_transfer_ring_element { + u64 ptr; /*pointer to buffer in the host system address space*/ + u16 len; /*transaction length in bytes*/ + u16 reserved0; + union { + struct { + u16 chain : 1; + u16 reserved_7_1 : 7; + u16 ieob : 1; + u16 ieot : 1; + u16 bei : 1; + u16 reserved_15_11 : 5; + } __packed bits; + u16 word; + } __packed word_C; + u8 type; + u8 reserved1; +} __packed; + +/** + * struct ipa_test_mhi_context - MHI test context + */ +struct ipa_test_mhi_context { + void __iomem *gsi_mmio; + struct ipa_mem_buffer msi; + struct ipa_mem_buffer ch_ctx_array; + struct ipa_mem_buffer ev_ctx_array; + struct ipa_mem_buffer mmio_buf; + struct ipa_mem_buffer xfer_ring_bufs[IPA_MHI_TEST_NUM_CHANNELS]; + struct ipa_mem_buffer ev_ring_bufs[IPA_MHI_TEST_NUM_EVENT_RINGS]; + struct ipa_mem_buffer in_buffer; + struct ipa_mem_buffer out_buffer; + u32 prod_hdl; + u32 cons_hdl; + u32 test_prod_hdl; +}; + +static struct ipa_test_mhi_context *test_mhi_ctx; + +static void ipa_mhi_test_cb(void *priv, + enum ipa_mhi_event_type event, unsigned long data) +{ + IPA_UT_DBG("Entry\n"); + + if (event == IPA_MHI_EVENT_DATA_AVAILABLE) + complete_all(&mhi_test_wakeup_comp); + else if (event == IPA_MHI_EVENT_READY) + complete_all(&mhi_test_ready_comp); + else + WARN_ON(1); +} + +static void ipa_test_mhi_free_mmio_space(void) +{ + IPA_UT_DBG("Entry\n"); + + if (!test_mhi_ctx) + return; + + dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->mmio_buf.size, + test_mhi_ctx->mmio_buf.base, + test_mhi_ctx->mmio_buf.phys_base); + + dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->ev_ctx_array.size, + test_mhi_ctx->ev_ctx_array.base, + test_mhi_ctx->ev_ctx_array.phys_base); + + dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->ch_ctx_array.size, + test_mhi_ctx->ch_ctx_array.base, + test_mhi_ctx->ch_ctx_array.phys_base); + + dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->msi.size, + test_mhi_ctx->msi.base, test_mhi_ctx->msi.phys_base); +} + +static int ipa_test_mhi_alloc_mmio_space(void) +{ + int rc = 0; + struct ipa_mem_buffer *msi; + struct ipa_mem_buffer *ch_ctx_array; + struct ipa_mem_buffer *ev_ctx_array; + struct ipa_mem_buffer *mmio_buf; + struct ipa_mhi_mmio_register_set *p_mmio; + + IPA_UT_DBG("Entry\n"); + + msi = &test_mhi_ctx->msi; + ch_ctx_array = &test_mhi_ctx->ch_ctx_array; + ev_ctx_array = &test_mhi_ctx->ev_ctx_array; + mmio_buf = &test_mhi_ctx->mmio_buf; + + /* Allocate MSI */ + msi->size = 4; + msi->base = dma_alloc_coherent(ipa3_ctx->pdev, msi->size, + &msi->phys_base, GFP_KERNEL); + if (!msi->base) { + IPA_UT_ERR("no mem for msi\n"); + return -ENOMEM; + } + + IPA_UT_DBG("msi: base 0x%pK phys_addr 0x%pad size %d\n", + msi->base, &msi->phys_base, msi->size); + + /* allocate buffer for channel context */ + ch_ctx_array->size = sizeof(struct ipa_mhi_channel_context_array) * + IPA_MHI_TEST_NUM_CHANNELS; + ch_ctx_array->base = dma_alloc_coherent(ipa3_ctx->pdev, + ch_ctx_array->size, &ch_ctx_array->phys_base, GFP_KERNEL); + if (!ch_ctx_array->base) { + IPA_UT_ERR("no mem for ch ctx array\n"); + rc = -ENOMEM; + goto fail_free_msi; + } + IPA_UT_DBG("channel ctx array: base 0x%pK phys_addr %pad size %d\n", + ch_ctx_array->base, &ch_ctx_array->phys_base, + ch_ctx_array->size); + + /* allocate buffer for event context */ + ev_ctx_array->size = sizeof(struct ipa_mhi_event_context_array) * + IPA_MHI_TEST_NUM_EVENT_RINGS; + ev_ctx_array->base = dma_alloc_coherent(ipa3_ctx->pdev, + ev_ctx_array->size, &ev_ctx_array->phys_base, GFP_KERNEL); + if (!ev_ctx_array->base) { + IPA_UT_ERR("no mem for ev ctx array\n"); + rc = -ENOMEM; + goto fail_free_ch_ctx_arr; + } + IPA_UT_DBG("event ctx array: base 0x%pK phys_addr %pad size %d\n", + ev_ctx_array->base, &ev_ctx_array->phys_base, + ev_ctx_array->size); + + /* allocate buffer for mmio */ + mmio_buf->size = sizeof(struct ipa_mhi_mmio_register_set); + mmio_buf->base = dma_alloc_coherent(ipa3_ctx->pdev, mmio_buf->size, + &mmio_buf->phys_base, GFP_KERNEL); + if (!mmio_buf->base) { + IPA_UT_ERR("no mem for mmio buf\n"); + rc = -ENOMEM; + goto fail_free_ev_ctx_arr; + } + IPA_UT_DBG("mmio buffer: base 0x%pK phys_addr %pad size %d\n", + mmio_buf->base, &mmio_buf->phys_base, mmio_buf->size); + + /* initlize table */ + p_mmio = (struct ipa_mhi_mmio_register_set *)mmio_buf->base; + + /** + * 64-bit pointer to the channel context array in the host memory space; + * Host sets the pointer to the channel context array + * during initialization. + */ + p_mmio->ccabap = (u32)ch_ctx_array->phys_base - + (IPA_MHI_TEST_FIRST_CHANNEL_ID * + sizeof(struct ipa_mhi_channel_context_array)); + IPA_UT_DBG("pMmio->ccabap 0x%llx\n", p_mmio->ccabap); + + /** + * 64-bit pointer to the event context array in the host memory space; + * Host sets the pointer to the event context array + * during initialization + */ + p_mmio->ecabap = (u32)ev_ctx_array->phys_base - + (IPA_MHI_TEST_FIRST_EVENT_RING_ID * + sizeof(struct ipa_mhi_event_context_array)); + IPA_UT_DBG("pMmio->ecabap 0x%llx\n", p_mmio->ecabap); + + /** + * Register is not accessed by HWP. + * In test register carries the pointer of + * virtual address for the buffer of channel context array + */ + p_mmio->crcbap = (unsigned long)ch_ctx_array->base; + + /** + * Register is not accessed by HWP. + * In test register carries the pointer of + * virtual address for the buffer of channel context array + */ + p_mmio->crdb = (unsigned long)ev_ctx_array->base; + + /* test is running only on device. no need to translate addresses */ + p_mmio->mhiaddr.mhicrtlbase = 0x04; + p_mmio->mhiaddr.mhictrllimit = 0xFFFFFFFF; + p_mmio->mhiaddr.mhidatabase = 0x04; + p_mmio->mhiaddr.mhidatalimit = 0xFFFFFFFF; + + return rc; + +fail_free_ev_ctx_arr: + dma_free_coherent(ipa3_ctx->pdev, ev_ctx_array->size, + ev_ctx_array->base, ev_ctx_array->phys_base); + ev_ctx_array->base = NULL; +fail_free_ch_ctx_arr: + dma_free_coherent(ipa3_ctx->pdev, ch_ctx_array->size, + ch_ctx_array->base, ch_ctx_array->phys_base); + ch_ctx_array->base = NULL; +fail_free_msi: + dma_free_coherent(ipa3_ctx->pdev, msi->size, msi->base, + msi->phys_base); + msi->base = NULL; + return rc; +} + +static void ipa_mhi_test_destroy_channel_context( + struct ipa_mem_buffer transfer_ring_bufs[], + struct ipa_mem_buffer event_ring_bufs[], + u8 channel_id, + u8 event_ring_id) +{ + u32 ev_ring_idx; + u32 ch_idx; + + IPA_UT_DBG("Entry\n"); + + if ((channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) || + (channel_id > IPA_MHI_TEST_LAST_CHANNEL_ID)) { + IPA_UT_ERR("channal_id invalid %d\n", channel_id); + return; + } + + if ((event_ring_id < IPA_MHI_TEST_FIRST_EVENT_RING_ID) || + (event_ring_id > IPA_MHI_TEST_LAST_EVENT_RING_ID)) { + IPA_UT_ERR("event_ring_id invalid %d\n", event_ring_id); + return; + } + + ch_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID; + ev_ring_idx = event_ring_id - IPA_MHI_TEST_FIRST_EVENT_RING_ID; + + if (transfer_ring_bufs[ch_idx].base) { + dma_free_coherent(ipa3_ctx->pdev, + transfer_ring_bufs[ch_idx].size, + transfer_ring_bufs[ch_idx].base, + transfer_ring_bufs[ch_idx].phys_base); + transfer_ring_bufs[ch_idx].base = NULL; + } + + if (event_ring_bufs[ev_ring_idx].base) { + dma_free_coherent(ipa3_ctx->pdev, + event_ring_bufs[ev_ring_idx].size, + event_ring_bufs[ev_ring_idx].base, + event_ring_bufs[ev_ring_idx].phys_base); + event_ring_bufs[ev_ring_idx].base = NULL; + } +} + +static int ipa_mhi_test_config_channel_context( + struct ipa_mem_buffer *mmio, + struct ipa_mem_buffer transfer_ring_bufs[], + struct ipa_mem_buffer event_ring_bufs[], + u8 channel_id, + u8 event_ring_id, + u16 transfer_ring_size, + u16 event_ring_size, + u8 ch_type) +{ + struct ipa_mhi_mmio_register_set *p_mmio; + struct ipa_mhi_channel_context_array *p_channels; + struct ipa_mhi_event_context_array *p_events; + u32 ev_ring_idx; + u32 ch_idx; + + IPA_UT_DBG("Entry\n"); + + if ((channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) || + (channel_id > IPA_MHI_TEST_LAST_CHANNEL_ID)) { + IPA_UT_DBG("channal_id invalid %d\n", channel_id); + return -EFAULT; + } + + if ((event_ring_id < IPA_MHI_TEST_FIRST_EVENT_RING_ID) || + (event_ring_id > IPA_MHI_TEST_LAST_EVENT_RING_ID)) { + IPA_UT_DBG("event_ring_id invalid %d\n", event_ring_id); + return -EFAULT; + } + + p_mmio = (struct ipa_mhi_mmio_register_set *)mmio->base; + p_channels = + (struct ipa_mhi_channel_context_array *) + ((unsigned long)p_mmio->crcbap); + p_events = (struct ipa_mhi_event_context_array *) + ((unsigned long)p_mmio->crdb); + + IPA_UT_DBG("p_mmio: %pK p_channels: %pK p_events: %pK\n", + p_mmio, p_channels, p_events); + + ch_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID; + ev_ring_idx = event_ring_id - IPA_MHI_TEST_FIRST_EVENT_RING_ID; + + IPA_UT_DBG("ch_idx: %u ev_ring_idx: %u\n", ch_idx, ev_ring_idx); + if (transfer_ring_bufs[ch_idx].base) { + IPA_UT_ERR("ChannelId %d is already allocated\n", channel_id); + return -EFAULT; + } + + /* allocate and init event ring if needed */ + if (!event_ring_bufs[ev_ring_idx].base) { + IPA_UT_LOG("Configuring event ring...\n"); + event_ring_bufs[ev_ring_idx].size = + event_ring_size * + sizeof(struct ipa_mhi_event_ring_element); + event_ring_bufs[ev_ring_idx].base = + dma_alloc_coherent(ipa3_ctx->pdev, + event_ring_bufs[ev_ring_idx].size, + &event_ring_bufs[ev_ring_idx].phys_base, + GFP_KERNEL); + if (!event_ring_bufs[ev_ring_idx].base) { + IPA_UT_ERR("no mem for ev ring buf\n"); + return -ENOMEM; + } + p_events[ev_ring_idx].intmodc = 1; + p_events[ev_ring_idx].intmodt = 0; + p_events[ev_ring_idx].msivec = event_ring_id; + p_events[ev_ring_idx].rbase = + (u32)event_ring_bufs[ev_ring_idx].phys_base; + p_events[ev_ring_idx].rlen = + event_ring_bufs[ev_ring_idx].size; + p_events[ev_ring_idx].rp = + (u32)event_ring_bufs[ev_ring_idx].phys_base; + p_events[ev_ring_idx].wp = + (u32)event_ring_bufs[ev_ring_idx].phys_base; + } else { + IPA_UT_LOG("Skip configuring event ring - already done\n"); + } + + transfer_ring_bufs[ch_idx].size = + transfer_ring_size * + sizeof(struct ipa_mhi_transfer_ring_element); + transfer_ring_bufs[ch_idx].base = + dma_alloc_coherent(ipa3_ctx->pdev, + transfer_ring_bufs[ch_idx].size, + &transfer_ring_bufs[ch_idx].phys_base, + GFP_KERNEL); + if (!transfer_ring_bufs[ch_idx].base) { + IPA_UT_ERR("no mem for xfer ring buf\n"); + dma_free_coherent(ipa3_ctx->pdev, + event_ring_bufs[ev_ring_idx].size, + event_ring_bufs[ev_ring_idx].base, + event_ring_bufs[ev_ring_idx].phys_base); + event_ring_bufs[ev_ring_idx].base = NULL; + return -ENOMEM; + } + + p_channels[ch_idx].erindex = event_ring_id; + p_channels[ch_idx].rbase = (u32)transfer_ring_bufs[ch_idx].phys_base; + p_channels[ch_idx].rlen = transfer_ring_bufs[ch_idx].size; + p_channels[ch_idx].rp = (u32)transfer_ring_bufs[ch_idx].phys_base; + p_channels[ch_idx].wp = (u32)transfer_ring_bufs[ch_idx].phys_base; + p_channels[ch_idx].chtype = ch_type; + p_channels[ch_idx].brsmode = IPA_MHI_BURST_MODE_DEFAULT; + p_channels[ch_idx].pollcfg = 0; + + return 0; +} + +static void ipa_mhi_test_destroy_data_structures(void) +{ + IPA_UT_DBG("Entry\n"); + + /* Destroy OUT data buffer */ + if (test_mhi_ctx->out_buffer.base) { + dma_free_coherent(ipa3_ctx->pdev, + test_mhi_ctx->out_buffer.size, + test_mhi_ctx->out_buffer.base, + test_mhi_ctx->out_buffer.phys_base); + test_mhi_ctx->out_buffer.base = NULL; + } + + /* Destroy IN data buffer */ + if (test_mhi_ctx->in_buffer.base) { + dma_free_coherent(ipa3_ctx->pdev, + test_mhi_ctx->in_buffer.size, + test_mhi_ctx->in_buffer.base, + test_mhi_ctx->in_buffer.phys_base); + test_mhi_ctx->in_buffer.base = NULL; + } + + /* Destroy IN channel ctx */ + ipa_mhi_test_destroy_channel_context( + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1); + + /* Destroy OUT channel ctx */ + ipa_mhi_test_destroy_channel_context( + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + IPA_MHI_TEST_FIRST_EVENT_RING_ID); +} + +static int ipa_mhi_test_setup_data_structures(void) +{ + int rc = 0; + + IPA_UT_DBG("Entry\n"); + + /* Config OUT Channel Context */ + rc = ipa_mhi_test_config_channel_context( + &test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + IPA_MHI_TEST_FIRST_EVENT_RING_ID, + 0x100, + 0x80, + IPA_MHI_OUT_CHAHNNEL); + if (rc) { + IPA_UT_ERR("Fail to config OUT ch ctx - err %d", rc); + return rc; + } + + /* Config IN Channel Context */ + rc = ipa_mhi_test_config_channel_context( + &test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1, + 0x100, + 0x80, + IPA_MHI_IN_CHAHNNEL); + if (rc) { + IPA_UT_ERR("Fail to config IN ch ctx - err %d", rc); + goto fail_destroy_out_ch_ctx; + } + + /* allocate IN data buffer */ + test_mhi_ctx->in_buffer.size = IPA_MHI_TEST_MAX_DATA_BUF_SIZE; + test_mhi_ctx->in_buffer.base = dma_alloc_coherent( + ipa3_ctx->pdev, test_mhi_ctx->in_buffer.size, + &test_mhi_ctx->in_buffer.phys_base, GFP_KERNEL); + if (!test_mhi_ctx->in_buffer.base) { + IPA_UT_ERR("no mem for In data buffer\n"); + rc = -ENOMEM; + goto fail_destroy_in_ch_ctx; + } + memset(test_mhi_ctx->in_buffer.base, 0, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + + /* allocate OUT data buffer */ + test_mhi_ctx->out_buffer.size = IPA_MHI_TEST_MAX_DATA_BUF_SIZE; + test_mhi_ctx->out_buffer.base = dma_alloc_coherent( + ipa3_ctx->pdev, test_mhi_ctx->out_buffer.size, + &test_mhi_ctx->out_buffer.phys_base, GFP_KERNEL); + if (!test_mhi_ctx->out_buffer.base) { + IPA_UT_ERR("no mem for Out data buffer\n"); + rc = -EFAULT; + goto fail_destroy_in_data_buf; + } + memset(test_mhi_ctx->out_buffer.base, 0, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + + return 0; + +fail_destroy_in_data_buf: + dma_free_coherent(ipa3_ctx->pdev, + test_mhi_ctx->in_buffer.size, + test_mhi_ctx->in_buffer.base, + test_mhi_ctx->in_buffer.phys_base); + test_mhi_ctx->in_buffer.base = NULL; +fail_destroy_in_ch_ctx: + ipa_mhi_test_destroy_channel_context( + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1); +fail_destroy_out_ch_ctx: + ipa_mhi_test_destroy_channel_context( + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + IPA_MHI_TEST_FIRST_EVENT_RING_ID); + return 0; +} + +/** + * ipa_test_mhi_suite_setup() - Suite setup function + */ +static int ipa_test_mhi_suite_setup(void **ppriv) +{ + int rc = 0; + struct ipa_sys_connect_params sys_in; + + IPA_UT_DBG("Start Setup\n"); + + if (!gsi_ctx) { + IPA_UT_ERR("No GSI ctx\n"); + return -EINVAL; + } + + if (!ipa3_ctx) { + IPA_UT_ERR("No IPA ctx\n"); + return -EINVAL; + } + + test_mhi_ctx = kzalloc(sizeof(struct ipa_test_mhi_context), + GFP_KERNEL); + if (!test_mhi_ctx) { + IPA_UT_ERR("failed allocated ctx\n"); + return -ENOMEM; + } + + test_mhi_ctx->gsi_mmio = ioremap_nocache(gsi_ctx->per.phys_addr, + gsi_ctx->per.size); + if (!test_mhi_ctx) { + IPA_UT_ERR("failed to remap GSI HW size=%lu\n", + gsi_ctx->per.size); + rc = -EFAULT; + goto fail_free_ctx; + } + + rc = ipa_test_mhi_alloc_mmio_space(); + if (rc) { + IPA_UT_ERR("failed to alloc mmio space"); + goto fail_iounmap; + } + + rc = ipa_mhi_test_setup_data_structures(); + if (rc) { + IPA_UT_ERR("failed to setup data structures"); + goto fail_free_mmio_spc; + } + + /* connect PROD pipe for remote wakeup */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_TEST_PROD; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS; + if (ipa_setup_sys_pipe(&sys_in, &test_mhi_ctx->test_prod_hdl)) { + IPA_UT_ERR("setup sys pipe failed.\n"); + goto fail_destroy_data_structures; + } + + *ppriv = test_mhi_ctx; + return 0; + +fail_destroy_data_structures: + ipa_mhi_test_destroy_data_structures(); +fail_free_mmio_spc: + ipa_test_mhi_free_mmio_space(); +fail_iounmap: + iounmap(test_mhi_ctx->gsi_mmio); +fail_free_ctx: + kfree(test_mhi_ctx); + test_mhi_ctx = NULL; + return rc; +} + +/** + * ipa_test_mhi_suite_teardown() - Suite teardown function + */ +static int ipa_test_mhi_suite_teardown(void *priv) +{ + IPA_UT_DBG("Start Teardown\n"); + + if (!test_mhi_ctx) + return 0; + + ipa_teardown_sys_pipe(test_mhi_ctx->test_prod_hdl); + ipa_mhi_test_destroy_data_structures(); + ipa_test_mhi_free_mmio_space(); + iounmap(test_mhi_ctx->gsi_mmio); + kfree(test_mhi_ctx); + test_mhi_ctx = NULL; + + return 0; +} + +/** + * ipa_mhi_test_initialize_driver() - MHI init and possibly start and connect + * + * To be run during tests + * 1. MHI init (Ready state) + * 2. Conditional MHO start and connect (M0 state) + */ +static int ipa_mhi_test_initialize_driver(bool skip_start_and_conn) +{ + int rc = 0; + struct ipa_mhi_init_params init_params; + struct ipa_mhi_start_params start_params; + struct ipa_mhi_connect_params prod_params; + struct ipa_mhi_connect_params cons_params; + struct ipa_mhi_mmio_register_set *p_mmio; + struct ipa_mhi_channel_context_array *p_ch_ctx_array; + bool is_dma; + u64 phys_addr; + + IPA_UT_LOG("Entry\n"); + + p_mmio = test_mhi_ctx->mmio_buf.base; + + /* start IPA MHI */ + memset(&init_params, 0, sizeof(init_params)); + init_params.msi.addr_low = test_mhi_ctx->msi.phys_base; + init_params.msi.data = 0x10000000; + init_params.msi.mask = ~0x10000000; + /* MMIO not needed for GSI */ + init_params.first_ch_idx = IPA_MHI_TEST_FIRST_CHANNEL_ID; + init_params.first_er_idx = IPA_MHI_TEST_FIRST_EVENT_RING_ID; + init_params.assert_bit40 = false; + init_params.notify = ipa_mhi_test_cb; + init_params.priv = NULL; + init_params.test_mode = true; + + rc = ipa_mhi_init(&init_params); + if (rc) { + IPA_UT_LOG("ipa_mhi_init failed %d\n", rc); + return rc; + } + + IPA_UT_LOG("Wait async ready event\n"); + if (wait_for_completion_timeout(&mhi_test_ready_comp, 10 * HZ) == 0) { + IPA_UT_LOG("timeout waiting for READY event"); + IPA_UT_TEST_FAIL_REPORT("failed waiting for state ready"); + return -ETIME; + } + + if (ipa_mhi_is_using_dma(&is_dma)) { + IPA_UT_LOG("is_dma checkign failed. Is MHI loaded?\n"); + IPA_UT_TEST_FAIL_REPORT("failed checking using dma"); + return -EPERM; + } + + if (is_dma) { + IPA_UT_LOG("init ipa_dma\n"); + rc = ipa_dma_init(); + if (rc && rc != -EFAULT) { + IPA_UT_LOG("ipa_dma_init failed, %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("failed init dma"); + return rc; + } + IPA_UT_LOG("enable ipa_dma\n"); + rc = ipa_dma_enable(); + if (rc && rc != -EPERM) { + IPA_UT_LOG("ipa_dma_enable failed, %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("failed enable dma"); + return rc; + } + } + + if (!skip_start_and_conn) { + memset(&start_params, 0, sizeof(start_params)); + start_params.channel_context_array_addr = p_mmio->ccabap; + start_params.event_context_array_addr = p_mmio->ecabap; + + IPA_UT_LOG("BEFORE mhi_start\n"); + rc = ipa_mhi_start(&start_params); + if (rc) { + IPA_UT_LOG("mhi_start failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail start mhi"); + return rc; + } + IPA_UT_LOG("AFTER mhi_start\n"); + + phys_addr = p_mmio->ccabap + (IPA_MHI_TEST_FIRST_CHANNEL_ID * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + IPA_UT_LOG("ch: %d base: 0x%pK phys_addr 0x%llx chstate: %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + p_ch_ctx_array, phys_addr, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + + memset(&prod_params, 0, sizeof(prod_params)); + prod_params.sys.client = IPA_CLIENT_MHI_PROD; + prod_params.sys.ipa_ep_cfg.mode.mode = IPA_DMA; + prod_params.sys.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS; + prod_params.sys.ipa_ep_cfg.seq.seq_type = + IPA_MHI_TEST_SEQ_TYPE_DMA; + prod_params.sys.ipa_ep_cfg.seq.set_dynamic = true; + prod_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID; + IPA_UT_LOG("BEFORE connect_pipe (PROD): client:%d ch_id:%u\n", + prod_params.sys.client, prod_params.channel_id); + rc = ipa_mhi_connect_pipe(&prod_params, + &test_mhi_ctx->prod_hdl); + if (rc) { + IPA_UT_LOG("mhi_connect_pipe failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail connect PROD pipe"); + return rc; + } + + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("MHI_PROD: chstate is not RUN chstate:%s\n", + ipa_mhi_get_state_str( + p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not run"); + return -EFAULT; + } + + phys_addr = p_mmio->ccabap + + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + IPA_UT_LOG("ch: %d base: 0x%pK phys_addr 0x%llx chstate: %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + p_ch_ctx_array, phys_addr, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + + memset(&cons_params, 0, sizeof(cons_params)); + cons_params.sys.client = IPA_CLIENT_MHI_CONS; + cons_params.sys.skip_ep_cfg = true; + cons_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID + 1; + IPA_UT_LOG("BEFORE connect_pipe (CONS): client:%d ch_id:%u\n", + cons_params.sys.client, cons_params.channel_id); + rc = ipa_mhi_connect_pipe(&cons_params, + &test_mhi_ctx->cons_hdl); + if (rc) { + IPA_UT_LOG("mhi_connect_pipe failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail connect CONS pipe"); + return rc; + } + + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("MHI_CONS: chstate is not RUN chstate:%s\n", + ipa_mhi_get_state_str( + p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not run"); + return -EFAULT; + } + } + + return 0; +} + +/** + * To be run during test + * 1. MHI destroy + * 2. re-configure the channels + */ +static int ipa_mhi_test_destroy(struct ipa_test_mhi_context *ctx) +{ + struct ipa_mhi_mmio_register_set *p_mmio; + u64 phys_addr; + struct ipa_mhi_channel_context_array *p_ch_ctx_array; + int rc; + + IPA_UT_LOG("Entry\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("Input err invalid ctx\n"); + return -EINVAL; + } + + p_mmio = ctx->mmio_buf.base; + + phys_addr = p_mmio->ccabap + + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = ctx->ch_ctx_array.base + + (phys_addr - ctx->ch_ctx_array.phys_base); + IPA_UT_LOG("channel id %d (CONS): chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + + phys_addr = p_mmio->ccabap + + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = ctx->ch_ctx_array.base + + (phys_addr - ctx->ch_ctx_array.phys_base); + IPA_UT_LOG("channel id %d (PROD): chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + + IPA_UT_LOG("MHI Destroy\n"); + ipa_mhi_destroy(); + IPA_UT_LOG("Post MHI Destroy\n"); + + ctx->prod_hdl = 0; + ctx->cons_hdl = 0; + + dma_free_coherent(ipa3_ctx->pdev, ctx->xfer_ring_bufs[1].size, + ctx->xfer_ring_bufs[1].base, ctx->xfer_ring_bufs[1].phys_base); + ctx->xfer_ring_bufs[1].base = NULL; + + IPA_UT_LOG("config channel context for channel %d (MHI CONS)\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1); + rc = ipa_mhi_test_config_channel_context( + &ctx->mmio_buf, + ctx->xfer_ring_bufs, + ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1, + 0x100, + 0x80, + IPA_MHI_IN_CHAHNNEL); + if (rc) { + IPA_UT_LOG("config channel context failed %d, channel %d\n", + rc, IPA_MHI_TEST_FIRST_CHANNEL_ID + 1); + IPA_UT_TEST_FAIL_REPORT("fail config CONS channel ctx"); + return -EFAULT; + } + + dma_free_coherent(ipa3_ctx->pdev, ctx->xfer_ring_bufs[0].size, + ctx->xfer_ring_bufs[0].base, ctx->xfer_ring_bufs[0].phys_base); + ctx->xfer_ring_bufs[0].base = NULL; + + IPA_UT_LOG("config channel context for channel %d (MHI PROD)\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID); + rc = ipa_mhi_test_config_channel_context( + &ctx->mmio_buf, + ctx->xfer_ring_bufs, + ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + IPA_MHI_TEST_FIRST_EVENT_RING_ID, + 0x100, + 0x80, + IPA_MHI_OUT_CHAHNNEL); + if (rc) { + IPA_UT_LOG("config channel context failed %d, channel %d\n", + rc, IPA_MHI_TEST_FIRST_CHANNEL_ID); + IPA_UT_TEST_FAIL_REPORT("fail config PROD channel ctx"); + return -EFAULT; + } + + return 0; +} + +/** + * To be run during test + * 1. Destroy + * 2. Initialize (to Ready or M0 states) + */ +static int ipa_mhi_test_reset(struct ipa_test_mhi_context *ctx, + bool skip_start_and_conn) +{ + int rc; + + IPA_UT_LOG("Entry\n"); + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("destroy fail"); + return rc; + } + + rc = ipa_mhi_test_initialize_driver(skip_start_and_conn); + if (rc) { + IPA_UT_LOG("driver init failed skip_start_and_con=%d rc=%d\n", + skip_start_and_conn, rc); + IPA_UT_TEST_FAIL_REPORT("init fail"); + return rc; + } + + return 0; +} + +/** + * To be run during test + * 1. disconnect cons channel + * 2. config cons channel + * 3. disconnect prod channel + * 4. config prod channel + * 5. connect prod + * 6. connect cons + */ +static int ipa_mhi_test_channel_reset(void) +{ + int rc; + struct ipa_mhi_connect_params prod_params; + struct ipa_mhi_connect_params cons_params; + struct ipa_mhi_mmio_register_set *p_mmio; + struct ipa_mhi_channel_context_array *p_ch_ctx_array; + u64 phys_addr; + + p_mmio = test_mhi_ctx->mmio_buf.base; + + IPA_UT_LOG("Before pipe disconnect (CONS) client hdl=%u=\n", + test_mhi_ctx->cons_hdl); + rc = ipa_mhi_disconnect_pipe(test_mhi_ctx->cons_hdl); + if (rc) { + IPA_UT_LOG("disconnect_pipe failed (CONS) %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("CONS pipe disconnect fail"); + return -EFAULT; + } + test_mhi_ctx->cons_hdl = 0; + + phys_addr = p_mmio->ccabap + + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_DISABLE) { + IPA_UT_LOG("chstate is not disabled! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not disabled"); + return -EFAULT; + } + + dma_free_coherent(ipa3_ctx->pdev, + test_mhi_ctx->xfer_ring_bufs[1].size, + test_mhi_ctx->xfer_ring_bufs[1].base, + test_mhi_ctx->xfer_ring_bufs[1].phys_base); + test_mhi_ctx->xfer_ring_bufs[1].base = NULL; + rc = ipa_mhi_test_config_channel_context( + &test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1, + 0x100, + 0x80, + IPA_MHI_IN_CHAHNNEL); + if (rc) { + IPA_UT_LOG("config_channel_context IN failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail config CONS channel context"); + return -EFAULT; + } + IPA_UT_LOG("Before pipe disconnect (CONS) client hdl=%u=\n", + test_mhi_ctx->prod_hdl); + rc = ipa_mhi_disconnect_pipe(test_mhi_ctx->prod_hdl); + if (rc) { + IPA_UT_LOG("disconnect_pipe failed (PROD) %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("PROD pipe disconnect fail"); + return -EFAULT; + } + test_mhi_ctx->prod_hdl = 0; + + phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_DISABLE) { + IPA_UT_LOG("chstate is not disabled! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not disabled"); + return -EFAULT; + } + + dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->xfer_ring_bufs[0].size, + test_mhi_ctx->xfer_ring_bufs[0].base, + test_mhi_ctx->xfer_ring_bufs[0].phys_base); + test_mhi_ctx->xfer_ring_bufs[0].base = NULL; + rc = ipa_mhi_test_config_channel_context( + &test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + IPA_MHI_TEST_FIRST_EVENT_RING_ID, + 0x100, + 0x80, + IPA_MHI_OUT_CHAHNNEL); + if (rc) { + IPA_UT_LOG("config_channel_context OUT failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not disabled"); + return -EFAULT; + } + + memset(&prod_params, 0, sizeof(prod_params)); + prod_params.sys.client = IPA_CLIENT_MHI_PROD; + prod_params.sys.ipa_ep_cfg.mode.mode = IPA_DMA; + prod_params.sys.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS; + prod_params.sys.ipa_ep_cfg.seq.seq_type = IPA_MHI_TEST_SEQ_TYPE_DMA; + prod_params.sys.ipa_ep_cfg.seq.set_dynamic = true; + prod_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID; + IPA_UT_LOG("BEFORE connect PROD\n"); + rc = ipa_mhi_connect_pipe(&prod_params, &test_mhi_ctx->prod_hdl); + if (rc) { + IPA_UT_LOG("connect_pipe failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail connect PROD pipe"); + return rc; + } + + phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("chstate is not run! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not run"); + return -EFAULT; + } + + memset(&cons_params, 0, sizeof(cons_params)); + cons_params.sys.client = IPA_CLIENT_MHI_CONS; + cons_params.sys.skip_ep_cfg = true; + cons_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID + 1; + IPA_UT_LOG("BEFORE connect CONS\n"); + rc = ipa_mhi_connect_pipe(&cons_params, &test_mhi_ctx->cons_hdl); + if (rc) { + IPA_UT_LOG("ipa_mhi_connect_pipe failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail connect CONS pipe"); + return rc; + } + + phys_addr = p_mmio->ccabap + + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("chstate is not run! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not run"); + return -EFAULT; + } + + return 0; +} + +/** + * To be run during test + * Send data + */ +static int ipa_mhi_test_q_transfer_re(struct ipa_mem_buffer *mmio, + struct ipa_mem_buffer xfer_ring_bufs[], + struct ipa_mem_buffer ev_ring_bufs[], + u8 channel_id, + struct ipa_mem_buffer buf_array[], + int buf_array_size, + bool ieob, + bool ieot, + bool bei, + bool trigger_db) +{ + struct ipa_mhi_transfer_ring_element *curr_re; + struct ipa_mhi_mmio_register_set *p_mmio; + struct ipa_mhi_channel_context_array *p_channels; + struct ipa_mhi_event_context_array *p_events; + u32 channel_idx; + u32 event_ring_index; + u32 wp_ofst; + u32 rp_ofst; + u32 next_wp_ofst; + int i; + u32 num_of_ed_to_queue; + + IPA_UT_LOG("Entry\n"); + + p_mmio = (struct ipa_mhi_mmio_register_set *)mmio->base; + p_channels = (struct ipa_mhi_channel_context_array *) + ((unsigned long)p_mmio->crcbap); + p_events = (struct ipa_mhi_event_context_array *) + ((unsigned long)p_mmio->crdb); + + if (ieob) + num_of_ed_to_queue = buf_array_size; + else + num_of_ed_to_queue = ieot ? 1 : 0; + + if (channel_id >= + (IPA_MHI_TEST_FIRST_CHANNEL_ID + IPA_MHI_TEST_NUM_CHANNELS) || + channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) { + IPA_UT_LOG("Invalid Channel ID %d\n", channel_id); + return -EFAULT; + } + + channel_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID; + + if (!xfer_ring_bufs[channel_idx].base) { + IPA_UT_LOG("Channel is not allocated\n"); + return -EFAULT; + } + if (p_channels[channel_idx].brsmode == IPA_MHI_BURST_MODE_DEFAULT || + p_channels[channel_idx].brsmode == IPA_MHI_BURST_MODE_ENABLE) + num_of_ed_to_queue += 1; /* for OOB/DB mode event */ + + /* First queue EDs */ + event_ring_index = p_channels[channel_idx].erindex - + IPA_MHI_TEST_FIRST_EVENT_RING_ID; + + wp_ofst = (u32)(p_events[event_ring_index].wp - + p_events[event_ring_index].rbase); + + if (p_events[event_ring_index].rlen & 0xFFFFFFFF00000000) { + IPA_UT_LOG("invalid ev rlen %llu\n", + p_events[event_ring_index].rlen); + return -EFAULT; + } + + next_wp_ofst = (wp_ofst + num_of_ed_to_queue * + sizeof(struct ipa_mhi_event_ring_element)) % + (u32)p_events[event_ring_index].rlen; + + /* set next WP */ + p_events[event_ring_index].wp = + (u32)p_events[event_ring_index].rbase + next_wp_ofst; + + /* write value to event ring doorbell */ + IPA_UT_LOG("DB to event 0x%llx: base %pa ofst 0x%x\n", + p_events[event_ring_index].wp, + &(gsi_ctx->per.phys_addr), GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS( + event_ring_index + IPA_MHI_GSI_ER_START, 0)); + iowrite32(p_events[event_ring_index].wp, + test_mhi_ctx->gsi_mmio + + GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS( + event_ring_index + IPA_MHI_GSI_ER_START, 0)); + + for (i = 0; i < buf_array_size; i++) { + /* calculate virtual pointer for current WP and RP */ + wp_ofst = (u32)(p_channels[channel_idx].wp - + p_channels[channel_idx].rbase); + rp_ofst = (u32)(p_channels[channel_idx].rp - + p_channels[channel_idx].rbase); + (void)rp_ofst; + curr_re = (struct ipa_mhi_transfer_ring_element *) + ((unsigned long)xfer_ring_bufs[channel_idx].base + + wp_ofst); + if (p_channels[channel_idx].rlen & 0xFFFFFFFF00000000) { + IPA_UT_LOG("invalid ch rlen %llu\n", + p_channels[channel_idx].rlen); + return -EFAULT; + } + next_wp_ofst = (wp_ofst + + sizeof(struct ipa_mhi_transfer_ring_element)) % + (u32)p_channels[channel_idx].rlen; + + /* write current RE */ + curr_re->type = IPA_MHI_RING_ELEMENT_TRANSFER; + curr_re->len = (u16)buf_array[i].size; + curr_re->ptr = (u32)buf_array[i].phys_base; + curr_re->word_C.bits.bei = bei; + curr_re->word_C.bits.ieob = ieob; + curr_re->word_C.bits.ieot = ieot; + + /* set next WP */ + p_channels[channel_idx].wp = + p_channels[channel_idx].rbase + next_wp_ofst; + + if (i == (buf_array_size - 1)) { + /* last buffer */ + curr_re->word_C.bits.chain = 0; + if (trigger_db) { + IPA_UT_LOG( + "DB to channel 0x%llx: base %pa ofst 0x%x\n" + , p_channels[channel_idx].wp + , &(gsi_ctx->per.phys_addr) + , GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS( + channel_idx, 0)); + iowrite32(p_channels[channel_idx].wp, + test_mhi_ctx->gsi_mmio + + GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS( + channel_idx, 0)); + } + } else { + curr_re->word_C.bits.chain = 1; + } + } + + return 0; +} + +/** + * To be run during test + * Send data in loopback (from In to OUT) and compare + */ +static int ipa_mhi_test_loopback_data_transfer(void) +{ + struct ipa_mem_buffer *p_mmio; + int i; + int rc; + static int val; + bool timeout = true; + + IPA_UT_LOG("Entry\n"); + + p_mmio = &test_mhi_ctx->mmio_buf; + + /* invalidate spare register value (for msi) */ + memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size); + + val++; + + memset(test_mhi_ctx->in_buffer.base, 0, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) + memset(test_mhi_ctx->out_buffer.base + i, (val + i) & 0xFF, 1); + + /* queue RE for IN side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(p_mmio, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + &test_mhi_ctx->in_buffer, + 1, + true, + true, + false, + true); + + if (rc) { + IPA_UT_LOG("q_transfer_re failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re"); + return rc; + } + + /* queue REs for OUT side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(p_mmio, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + &test_mhi_ctx->out_buffer, + 1, + true, + true, + false, + true); + + if (rc) { + IPA_UT_LOG("q_transfer_re failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail OUT q xfer re"); + return rc; + } + + IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout); + if (timeout) { + IPA_UT_LOG("transfer timeout. MSI = 0x%x\n", + *((u32 *)test_mhi_ctx->msi.base)); + IPA_UT_TEST_FAIL_REPORT("xfter timeout"); + return -EFAULT; + } + + /* compare the two buffers */ + if (memcmp(test_mhi_ctx->in_buffer.base, test_mhi_ctx->out_buffer.base, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) { + IPA_UT_LOG("buffer are not equal\n"); + IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer"); + return -EFAULT; + } + + return 0; +} + +/** + * To be run during test + * Do suspend and check channel states to be suspend if should success + */ +static int ipa_mhi_test_suspend(bool force, bool should_success) +{ + int rc; + struct ipa_mhi_mmio_register_set *p_mmio; + struct ipa_mhi_channel_context_array *p_ch_ctx_array; + u64 phys_addr; + + IPA_UT_LOG("Entry\n"); + + rc = ipa_mhi_suspend(force); + if (should_success && rc != 0) { + IPA_UT_LOG("ipa_mhi_suspend failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("suspend failed"); + return -EFAULT; + } + + if (!should_success && rc != -EAGAIN) { + IPA_UT_LOG("ipa_mhi_suspenddid not return -EAGAIN fail %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("suspend succeeded unexpectedly"); + return -EFAULT; + } + + p_mmio = test_mhi_ctx->mmio_buf.base; + + phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (should_success) { + if (p_ch_ctx_array->chstate != + IPA_HW_MHI_CHANNEL_STATE_SUSPEND) { + IPA_UT_LOG("chstate is not suspend. ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("channel state not suspend"); + return -EFAULT; + } + if (!force && p_ch_ctx_array->rp != p_ch_ctx_array->wp) { + IPA_UT_LOG("rp not updated ch %d rp 0x%llx wp 0x%llx\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + p_ch_ctx_array->rp, p_ch_ctx_array->wp); + IPA_UT_TEST_FAIL_REPORT("rp was not updated"); + return -EFAULT; + } + } else { + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("chstate is not running! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("channel state not run"); + return -EFAULT; + } + } + + phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (should_success) { + if (p_ch_ctx_array->chstate != + IPA_HW_MHI_CHANNEL_STATE_SUSPEND) { + IPA_UT_LOG("chstate is not running! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("channel state not suspend"); + return -EFAULT; + } + if (!force && p_ch_ctx_array->rp != p_ch_ctx_array->wp) { + IPA_UT_LOG("rp not updated ch %d rp 0x%llx wp 0x%llx\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + p_ch_ctx_array->rp, p_ch_ctx_array->wp); + IPA_UT_TEST_FAIL_REPORT("rp was not updated"); + return -EFAULT; + } + } else { + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("chstate is not running! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("channel state not run"); + return -EFAULT; + } + } + + return 0; +} + +/** + * To be run during test + * Do resume and check channel state to be running + */ +static int ipa_test_mhi_resume(void) +{ + int rc; + struct ipa_mhi_mmio_register_set *p_mmio; + struct ipa_mhi_channel_context_array *p_ch_ctx_array; + u64 phys_addr; + + rc = ipa_mhi_resume(); + if (rc) { + IPA_UT_LOG("resume failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("resume failed"); + return -EFAULT; + } + + p_mmio = test_mhi_ctx->mmio_buf.base; + + phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("chstate is not running! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("channel state not run"); + return -EFAULT; + } + + phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) { + IPA_UT_LOG("chstate is not running! ch %d chstate %s\n", + IPA_MHI_TEST_FIRST_CHANNEL_ID, + ipa_mhi_get_state_str(p_ch_ctx_array->chstate)); + IPA_UT_TEST_FAIL_REPORT("channel state not run"); + return -EFAULT; + } + + return 0; +} + +/** + * To be run during test + * 1. suspend + * 2. queue RE for IN and OUT and send data + * 3. should get MSI timeout due to suspend + * 4. resume + * 5. should get the MSIs now + * 6. comapre the IN and OUT buffers + */ +static int ipa_mhi_test_suspend_resume(void) +{ + int rc; + int i; + bool timeout = true; + + IPA_UT_LOG("Entry\n"); + + IPA_UT_LOG("BEFORE suspend\n"); + rc = ipa_mhi_test_suspend(false, true); + if (rc) { + IPA_UT_LOG("suspend failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("suspend failed"); + return rc; + } + IPA_UT_LOG("AFTER suspend\n"); + + /* invalidate spare register value (for msi) */ + memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size); + + memset(test_mhi_ctx->in_buffer.base, 0, IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) + memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1); + + /* queue RE for IN side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + &test_mhi_ctx->in_buffer, + 1, + true, + true, + false, + true); + if (rc) { + IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re"); + return rc; + } + + /* queue REs for OUT side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + &test_mhi_ctx->out_buffer, + 1, + true, + true, + false, + true); + + if (rc) { + IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail OUT q xfer re"); + return rc; + } + + IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout); + if (!timeout) { + IPA_UT_LOG("Error: transfer success on suspend\n"); + IPA_UT_TEST_FAIL_REPORT("xfer suceeded unexpectedly"); + return -EFAULT; + } + + IPA_UT_LOG("BEFORE resume\n"); + rc = ipa_test_mhi_resume(); + if (rc) { + IPA_UT_LOG("ipa_mhi_resume failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("resume fail"); + return rc; + } + IPA_UT_LOG("AFTER resume\n"); + + IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout); + if (timeout) { + IPA_UT_LOG("Error: transfer timeout\n"); + IPA_UT_TEST_FAIL_REPORT("xfer timeout"); + return -EFAULT; + } + + /* compare the two buffers */ + if (memcmp(test_mhi_ctx->in_buffer.base, + test_mhi_ctx->out_buffer.base, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) { + IPA_UT_LOG("Error: buffers are not equal\n"); + IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer"); + return -EFAULT; + } + + return 0; +} + +/** + * To be run during test + * 1. enable aggregation + * 2. queue IN RE (ring element) + * 3. allocate skb with data + * 4. send it (this will create open aggr frame) + */ +static int ipa_mhi_test_create_aggr_open_frame(void) +{ + struct ipa_ep_cfg_aggr ep_aggr; + struct sk_buff *skb; + int rc; + int i; + u32 aggr_state_active; + + IPA_UT_LOG("Entry\n"); + + memset(&ep_aggr, 0, sizeof(ep_aggr)); + ep_aggr.aggr_en = IPA_ENABLE_AGGR; + ep_aggr.aggr = IPA_GENERIC; + ep_aggr.aggr_pkt_limit = 2; + + rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr); + if (rc) { + IPA_UT_LOG("failed to configure aggr"); + IPA_UT_TEST_FAIL_REPORT("failed to configure aggr"); + return rc; + } + + /* invalidate spare register value (for msi) */ + memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size); + + /* queue RE for IN side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + &test_mhi_ctx->in_buffer, + 1, + true, + true, + false, + true); + if (rc) { + IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re"); + return rc; + } + + skb = dev_alloc_skb(IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + if (!skb) { + IPA_UT_LOG("non mem for skb\n"); + IPA_UT_TEST_FAIL_REPORT("fail alloc skb"); + return -ENOMEM; + } + skb_put(skb, IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) { + memset(skb->data + i, i & 0xFF, 1); + memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1); + } + + rc = ipa_tx_dp(IPA_CLIENT_TEST_PROD, skb, NULL); + if (rc) { + IPA_UT_LOG("ipa_tx_dp failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail"); + return rc; + } + + msleep(20); + + aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE); + IPA_UT_LOG("IPA_STATE_AGGR_ACTIVE 0x%x\n", aggr_state_active); + if (aggr_state_active == 0) { + IPA_UT_LOG("No aggregation frame open!\n"); + IPA_UT_TEST_FAIL_REPORT("No aggregation frame open"); + return -EFAULT; + } + + return 0; +} + +/** + * To be run during test + * 1. create open aggr by sending data + * 2. suspend - if force it should succeed, otherwize it fails + * 3. if force - wait for wakeup event - it should arrive + * 4. if force - resume + * 5. force close the aggr. + * 6. wait for MSI - it should arrive + * 7. compare IN and OUT buffers + * 8. disable aggr. + */ +static int ipa_mhi_test_suspend_aggr_open(bool force) +{ + int rc; + struct ipa_ep_cfg_aggr ep_aggr; + bool timeout = true; + + IPA_UT_LOG("Entry\n"); + + rc = ipa_mhi_test_create_aggr_open_frame(); + if (rc) { + IPA_UT_LOG("failed create open aggr\n"); + IPA_UT_TEST_FAIL_REPORT("fail create open aggr"); + return rc; + } + + if (force) + reinit_completion(&mhi_test_wakeup_comp); + + IPA_UT_LOG("BEFORE suspend\n"); + /** + * if suspend force, then suspend should succeed. + * otherwize it should fail due to open aggr. + */ + rc = ipa_mhi_test_suspend(force, force); + if (rc) { + IPA_UT_LOG("suspend failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("suspend fail"); + return rc; + } + IPA_UT_LOG("AFTER suspend\n"); + + if (force) { + if (!wait_for_completion_timeout(&mhi_test_wakeup_comp, HZ)) { + IPA_UT_LOG("timeout waiting for wakeup event\n"); + IPA_UT_TEST_FAIL_REPORT("timeout waitinf wakeup event"); + return -ETIME; + } + + IPA_UT_LOG("BEFORE resume\n"); + rc = ipa_test_mhi_resume(); + if (rc) { + IPA_UT_LOG("resume failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("resume failed"); + return rc; + } + IPA_UT_LOG("AFTER resume\n"); + } + + ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << test_mhi_ctx->cons_hdl)); + + IPA_MHI_TEST_CHECK_MSI_INTR(false, timeout); + if (timeout) { + IPA_UT_LOG("fail: transfer not completed\n"); + IPA_UT_TEST_FAIL_REPORT("timeout on transferring data"); + return -EFAULT; + } + + /* compare the two buffers */ + if (memcmp(test_mhi_ctx->in_buffer.base, + test_mhi_ctx->out_buffer.base, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) { + IPA_UT_LOG("fail: buffer are not equal\n"); + IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer"); + return -EFAULT; + } + + memset(&ep_aggr, 0, sizeof(ep_aggr)); + rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr); + if (rc) { + IPA_UT_LOG("failed to configure aggr"); + IPA_UT_TEST_FAIL_REPORT("fail to disable aggr"); + return rc; + } + + return 0; +} + +/** + * To be run during test + * 1. suspend + * 2. queue IN RE (ring element) + * 3. allocate skb with data + * 4. send it (this will create open aggr frame) + * 5. wait for wakeup event - it should arrive + * 6. resume + * 7. wait for MSI - it should arrive + * 8. compare IN and OUT buffers + */ +static int ipa_mhi_test_suspend_host_wakeup(void) +{ + int rc; + int i; + bool timeout = true; + struct sk_buff *skb; + + reinit_completion(&mhi_test_wakeup_comp); + + IPA_UT_LOG("BEFORE suspend\n"); + rc = ipa_mhi_test_suspend(false, true); + if (rc) { + IPA_UT_LOG("suspend failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("suspend fail"); + return rc; + } + IPA_UT_LOG("AFTER suspend\n"); + + /* invalidate spare register value (for msi) */ + memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size); + + memset(test_mhi_ctx->in_buffer.base, 0, IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + /* queue RE for IN side and trigger doorbell*/ + rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + &test_mhi_ctx->in_buffer, + 1, + true, + true, + false, + true); + + if (rc) { + IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re"); + return rc; + } + + skb = dev_alloc_skb(IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + if (!skb) { + IPA_UT_LOG("non mem for skb\n"); + IPA_UT_TEST_FAIL_REPORT("no mem for skb"); + return -ENOMEM; + } + skb_put(skb, IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) { + memset(skb->data + i, i & 0xFF, 1); + memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1); + } + + rc = ipa_tx_dp(IPA_CLIENT_TEST_PROD, skb, NULL); + if (rc) { + IPA_UT_LOG("ipa_tx_dp failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail"); + return rc; + } + + if (wait_for_completion_timeout(&mhi_test_wakeup_comp, HZ) == 0) { + IPA_UT_LOG("timeout waiting for wakeup event\n"); + IPA_UT_TEST_FAIL_REPORT("timeout waiting for wakeup event"); + return -ETIME; + } + + IPA_UT_LOG("BEFORE resume\n"); + rc = ipa_test_mhi_resume(); + if (rc) { + IPA_UT_LOG("resume failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("resume fail"); + return rc; + } + IPA_UT_LOG("AFTER resume\n"); + + /* check for MSI interrupt one channels */ + IPA_MHI_TEST_CHECK_MSI_INTR(false, timeout); + if (timeout) { + IPA_UT_LOG("fail: transfer timeout\n"); + IPA_UT_TEST_FAIL_REPORT("timeout on xfer"); + return -EFAULT; + } + + /* compare the two buffers */ + if (memcmp(test_mhi_ctx->in_buffer.base, + test_mhi_ctx->out_buffer.base, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) { + IPA_UT_LOG("fail: buffer are not equal\n"); + IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer"); + return -EFAULT; + } + + return 0; +} + +/** + * To be run during test + * 1. queue OUT RE/buffer + * 2. wait for MSI on OUT + * 3. Do 1. and 2. till got MSI wait timeout (ch full / holb) + */ +static int ipa_mhi_test_create_full_channel(int *submitted_packets) +{ + int i; + bool timeout = true; + int rc; + + if (!submitted_packets) { + IPA_UT_LOG("Input error\n"); + return -EINVAL; + } + + *submitted_packets = 0; + + for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) + memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1); + + do { + /* invalidate spare register value (for msi) */ + memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size); + + IPA_UT_LOG("submitting OUT buffer\n"); + timeout = true; + /* queue REs for OUT side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID, + &test_mhi_ctx->out_buffer, + 1, + true, + true, + false, + true); + if (rc) { + IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("fail OUT q re"); + return rc; + } + (*submitted_packets)++; + + IPA_UT_LOG("waiting for MSI\n"); + for (i = 0; i < 10; i++) { + if (*((u32 *)test_mhi_ctx->msi.base) == + (0x10000000 | + (IPA_MHI_TEST_FIRST_EVENT_RING_ID))) { + IPA_UT_LOG("got MSI\n"); + timeout = false; + break; + } + msleep(20); + } + } while (!timeout); + + return 0; +} + +/** + * To be run during test + * 1. queue OUT RE/buffer + * 2. wait for MSI on OUT + * 3. Do 1. and 2. till got MSI wait timeout (ch full) + * 4. suspend - it should fail with -EAGAIN - M1 is rejected + * 5. foreach submitted pkt, do the next steps + * 6. queue IN RE/buffer + * 7. wait for MSI + * 8. compare IN and OUT buffers + */ +static int ipa_mhi_test_suspend_full_channel(bool force) +{ + int rc; + bool timeout; + int submitted_packets = 0; + + rc = ipa_mhi_test_create_full_channel(&submitted_packets); + if (rc) { + IPA_UT_LOG("fail create full channel\n"); + IPA_UT_TEST_FAIL_REPORT("fail create full channel"); + return rc; + } + + IPA_UT_LOG("BEFORE suspend\n"); + rc = ipa_mhi_test_suspend(force, false); + if (rc) { + IPA_UT_LOG("ipa_mhi_suspend did not returned -EAGAIN. rc %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("test suspend fail"); + return -EFAULT; + } + IPA_UT_LOG("AFTER suspend\n"); + + while (submitted_packets) { + memset(test_mhi_ctx->in_buffer.base, 0, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE); + + /* invalidate spare register value (for msi) */ + memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size); + + timeout = true; + /* queue RE for IN side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + &test_mhi_ctx->in_buffer, + 1, + true, + true, + false, + true); + if (rc) { + IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", + rc); + IPA_UT_TEST_FAIL_REPORT("fail IN q re"); + return rc; + } + + IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout); + if (timeout) { + IPA_UT_LOG("transfer failed - timeout\n"); + IPA_UT_TEST_FAIL_REPORT("timeout on xfer"); + return -EFAULT; + } + + /* compare the two buffers */ + if (memcmp(test_mhi_ctx->in_buffer.base, + test_mhi_ctx->out_buffer.base, + IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) { + IPA_UT_LOG("buffer are not equal\n"); + IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer"); + return -EFAULT; + } + + submitted_packets--; + } + + return 0; +} + +/** + * To be called from test + * 1. suspend + * 2. reset to M0 state + */ +static int ipa_mhi_test_suspend_and_reset(struct ipa_test_mhi_context *ctx) +{ + int rc; + + IPA_UT_LOG("BEFORE suspend\n"); + rc = ipa_mhi_test_suspend(false, true); + if (rc) { + IPA_UT_LOG("suspend failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("suspend fail"); + return rc; + } + IPA_UT_LOG("AFTER suspend\n"); + + rc = ipa_mhi_test_reset(ctx, false); + if (rc) { + IPA_UT_LOG("reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("reset fail"); + return rc; + } + + return 0; +} + +/** + * To be run during test + * 1. manualy update wp + * 2. suspend - should succeed + * 3. restore wp value + */ +static int ipa_mhi_test_suspend_wp_update(void) +{ + int rc; + struct ipa_mhi_mmio_register_set *p_mmio; + struct ipa_mhi_channel_context_array *p_ch_ctx_array; + u64 old_wp; + u64 phys_addr; + + /* simulate a write by updating the wp */ + p_mmio = test_mhi_ctx->mmio_buf.base; + phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) * + sizeof(struct ipa_mhi_channel_context_array)); + p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base + + (phys_addr - test_mhi_ctx->ch_ctx_array.phys_base); + old_wp = p_ch_ctx_array->wp; + p_ch_ctx_array->wp += 16; + + IPA_UT_LOG("BEFORE suspend\n"); + rc = ipa_mhi_test_suspend(false, false); + if (rc) { + IPA_UT_LOG("suspend failed rc %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("suspend fail"); + p_ch_ctx_array->wp = old_wp; + return rc; + } + IPA_UT_LOG("AFTER suspend\n"); + + p_ch_ctx_array->wp = old_wp; + + return 0; +} + +/** + * To be run during test + * 1. create open aggr by sending data + * 2. channel reset (disconnect/connet) + * 3. validate no aggr. open after reset + * 4. disable aggr. + */ +static int ipa_mhi_test_channel_reset_aggr_open(void) +{ + int rc; + u32 aggr_state_active; + struct ipa_ep_cfg_aggr ep_aggr; + + IPA_UT_LOG("Entry\n"); + + rc = ipa_mhi_test_create_aggr_open_frame(); + if (rc) { + IPA_UT_LOG("failed create open aggr rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail creare open aggr frame"); + return rc; + } + + rc = ipa_mhi_test_channel_reset(); + if (rc) { + IPA_UT_LOG("channel reset failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("channel reset fail"); + return rc; + } + + aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE); + IPADBG("IPA_STATE_AGGR_ACTIVE 0x%x\n", aggr_state_active); + if (aggr_state_active != 0) { + IPA_UT_LOG("aggregation frame open after reset!\n"); + IPA_UT_LOG("IPA_STATE_AGGR_ACTIVE 0x%x\n", aggr_state_active); + IPA_UT_TEST_FAIL_REPORT("open aggr after reset"); + return -EFAULT; + } + + memset(&ep_aggr, 0, sizeof(ep_aggr)); + rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr); + if (rc) { + IPA_UT_LOG("failed to configure aggr"); + IPA_UT_TEST_FAIL_REPORT("fail to disable aggr"); + return rc; + } + + return rc; +} + +/** + * To be run during test + * 1. queue OUT RE/buffer + * 2. wait for MSI on OUT + * 3. Do 1. and 2. till got MSI wait timeout (ch full) + * 4. channel reset + * disconnect and reconnect the prod and cons + * 5. queue IN RE/buffer and ring DB + * 6. wait for MSI - should get timeout as channels were reset + * 7. reset again + */ +static int ipa_mhi_test_channel_reset_ipa_holb(void) +{ + int rc; + int submitted_packets = 0; + bool timeout; + + IPA_UT_LOG("Entry\n"); + + rc = ipa_mhi_test_create_full_channel(&submitted_packets); + if (rc) { + IPA_UT_LOG("fail create full channel rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail create full channel"); + return rc; + } + + rc = ipa_mhi_test_channel_reset(); + if (rc) { + IPA_UT_LOG("channel reset failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("channel reset fail"); + return rc; + } + + /* invalidate spare register value (for msi) */ + memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size); + timeout = true; + /* queue RE for IN side and trigger doorbell */ + rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf, + test_mhi_ctx->xfer_ring_bufs, + test_mhi_ctx->ev_ring_bufs, + IPA_MHI_TEST_FIRST_CHANNEL_ID + 1, + &test_mhi_ctx->in_buffer, + 1, + true, + true, + false, + true); + + if (rc) { + IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail IN q re"); + return rc; + } + submitted_packets--; + + IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout); + if (!timeout) { + IPA_UT_LOG("transfer succeed although we had reset\n"); + IPA_UT_TEST_FAIL_REPORT("xfer succeed although we had reset"); + return -EFAULT; + } + + rc = ipa_mhi_test_channel_reset(); + if (rc) { + IPA_UT_LOG("channel reset failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("channel reset fail"); + return rc; + } + + return rc; +} + + +/** + * TEST: mhi reset in READY state + * 1. init to ready state (without start and connect) + * 2. reset (destroy and re-init) + * 2. destroy + */ +static int ipa_mhi_test_reset_ready_state(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(true); + if (rc) { + IPA_UT_LOG("init to Ready state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("fail to init to ready state"); + return rc; + } + + rc = ipa_mhi_test_reset(ctx, true); + if (rc) { + IPA_UT_LOG("reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi reset in M0 state + * 1. init to M0 state (with start and connect) + * 2. reset (destroy and re-init) + * 2. destroy + */ +static int ipa_mhi_test_reset_m0_state(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT + ("fail to init to M0 state (w/ start and connect)"); + return rc; + } + + rc = ipa_mhi_test_reset(ctx, false); + if (rc) { + IPA_UT_LOG("reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi in-loop reset in M0 state + * 1. init to M0 state (with start and connect) + * 2. reset (destroy and re-init) in-loop + * 3. destroy + */ +static int ipa_mhi_test_inloop_reset_m0_state(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT + ("fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_reset, rc, ctx, false); + if (rc) { + IPA_UT_LOG("in-loop reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT( + "reset (destroy/re-init) in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data with reset + * 1. init to M0 state (with start and connect) + * 2. reset (destroy and re-init) + * 3. loopback data + * 4. reset (destroy and re-init) + * 5. loopback data again + * 6. destroy + */ +static int ipa_mhi_test_loopback_data_with_reset(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + rc = ipa_mhi_test_reset(ctx, false); + if (rc) { + IPA_UT_LOG("reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_reset(ctx, false); + if (rc) { + IPA_UT_LOG("reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi reset in suspend state + * 1. init to M0 state (with start and connect) + * 2. suspend + * 3. reset (destroy and re-init) + * 4. destroy + */ +static int ipa_mhi_test_reset_on_suspend(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return -EFAULT; + } + + rc = ipa_mhi_test_suspend_and_reset(ctx); + if (rc) { + IPA_UT_LOG("suspend and reset failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("suspend and then reset failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed %d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return -EFAULT; + } + + return 0; +} + +/** + * TEST: mhi in-loop reset in suspend state + * 1. init to M0 state (with start and connect) + * 2. suspend + * 3. reset (destroy and re-init) + * 4. Do 2 and 3 in loop + * 3. destroy + */ +static int ipa_mhi_test_inloop_reset_on_suspend(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_and_reset, rc, ctx); + if (rc) { + IPA_UT_LOG("in-loop reset in suspend failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("fail to in-loop reset while suspend"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data with reset + * 1. init to M0 state (with start and connect) + * 2. suspend + * 3. reset (destroy and re-init) + * 4. loopback data + * 5. suspend + * 5. reset (destroy and re-init) + * 6. destroy + */ +static int ipa_mhi_test_loopback_data_with_reset_on_suspend(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + rc = ipa_mhi_test_suspend_and_reset(ctx); + if (rc) { + IPA_UT_LOG("suspend and reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("fail to suspend and then reset"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_suspend_and_reset(ctx); + if (rc) { + IPA_UT_LOG("suspend and reset failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("fail to suspend and then reset"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop suspend/resume + * 1. init to M0 state (with start and connect) + * 2. in loop suspend/resume + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_suspend_resume(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_resume, rc); + if (rc) { + IPA_UT_LOG("suspend resume failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT("in loop suspend/resume failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop suspend/resume with aggr open + * 1. init to M0 state (with start and connect) + * 2. in loop suspend/resume with open aggr. + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_suspend_resume_aggr_open(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_aggr_open, + rc, false); + if (rc) { + IPA_UT_LOG("suspend resume with aggr open failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop suspend/resume with open aggr failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop force suspend/resume with aggr open + * 1. init to M0 state (with start and connect) + * 2. in loop force suspend/resume with open aggr. + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_force_suspend_resume_aggr_open(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_aggr_open, + rc, true); + if (rc) { + IPA_UT_LOG("force suspend resume with aggr open failed rc=%d", + rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop force suspend/resume with open aggr failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop suspend/host wakeup resume + * 1. init to M0 state (with start and connect) + * 2. in loop suspend/resume with host wakeup + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_suspend_host_wakeup(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_host_wakeup, rc); + if (rc) { + IPA_UT_LOG("suspend host wakeup resume failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop suspend/resume with hsot wakeup failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop rejected suspend as full channel + * 1. init to M0 state (with start and connect) + * 2. in loop rejrected suspend + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_reject_suspend_full_channel(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_full_channel, + rc, false); + if (rc) { + IPA_UT_LOG("full channel rejected suspend failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop rejected suspend due to full channel failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop rejected force suspend as full channel + * 1. init to M0 state (with start and connect) + * 2. in loop force rejected suspend + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_reject_force_suspend_full_channel(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_full_channel, + rc, true); + if (rc) { + IPA_UT_LOG("full channel rejected force suspend failed rc=%d", + rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop force rejected suspend as full ch failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop suspend after wp manual update + * 1. init to M0 state (with start and connect) + * 2. in loop suspend after wp update + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_suspend_resume_wp_update(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_wp_update, rc); + if (rc) { + IPA_UT_LOG("suspend after wp update failed rc=%d", rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop suspend after wp update failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop channel reset (disconnect/connect) + * 1. init to M0 state (with start and connect) + * 2. in loop channel reset (disconnect/connect) + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_channel_reset(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset, rc); + if (rc) { + IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d", + rc); + IPA_UT_TEST_FAIL_REPORT("in loop channel reset failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop channel reset (disconnect/connect) + * 1. init to M0 state (with start and connect) + * 2. in loop channel reset (disconnect/connect) with open aggr + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_channel_reset_aggr_open(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset_aggr_open, rc); + if (rc) { + IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d", + rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop channel reset with open aggr failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/** + * TEST: mhi loopback data after in loop channel reset (disconnect/connect) + * 1. init to M0 state (with start and connect) + * 2. in loop channel reset (disconnect/connect) with channel in HOLB + * 3. loopback data + * 4. destroy + */ +static int ipa_mhi_test_in_loop_channel_reset_ipa_holb(void *priv) +{ + int rc; + struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv; + + IPA_UT_LOG("Test Start\n"); + + if (unlikely(!ctx)) { + IPA_UT_LOG("No context"); + return -EFAULT; + } + + rc = ipa_mhi_test_initialize_driver(false); + if (rc) { + IPA_UT_LOG("init to M0 state failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT( + "fail to init to M0 state (w/ start and connect)"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset_ipa_holb, rc); + if (rc) { + IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d", + rc); + IPA_UT_TEST_FAIL_REPORT( + "in loop channel reset with channel HOLB failed"); + return rc; + } + + IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc); + if (rc) { + IPA_UT_LOG("data loopback failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed"); + return rc; + } + + rc = ipa_mhi_test_destroy(ctx); + if (rc) { + IPA_UT_LOG("destroy failed rc=%d\n", rc); + IPA_UT_TEST_FAIL_REPORT("destroy failed"); + return rc; + } + + return 0; +} + +/* Suite definition block */ +IPA_UT_DEFINE_SUITE_START(mhi, "MHI for GSI", + ipa_test_mhi_suite_setup, ipa_test_mhi_suite_teardown) +{ + IPA_UT_ADD_TEST(reset_ready_state, + "reset test in Ready state", + ipa_mhi_test_reset_ready_state, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(reset_m0_state, + "reset test in M0 state", + ipa_mhi_test_reset_m0_state, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(inloop_reset_m0_state, + "several reset iterations in M0 state", + ipa_mhi_test_inloop_reset_m0_state, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(loopback_data_with_reset_on_m0, + "reset before and after loopback data in M0 state", + ipa_mhi_test_loopback_data_with_reset, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(reset_on_suspend, + "reset test in suspend state", + ipa_mhi_test_reset_on_suspend, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(inloop_reset_on_suspend, + "several reset iterations in suspend state", + ipa_mhi_test_inloop_reset_on_suspend, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(loopback_data_with_reset_on_suspend, + "reset before and after loopback data in suspend state", + ipa_mhi_test_loopback_data_with_reset_on_suspend, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(suspend_resume, + "several suspend/resume iterations", + ipa_mhi_test_in_loop_suspend_resume, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(suspend_resume_with_open_aggr, + "several suspend/resume iterations with open aggregation frame", + ipa_mhi_test_in_loop_suspend_resume_aggr_open, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(force_suspend_resume_with_open_aggr, + "several force suspend/resume iterations with open aggregation frame", + ipa_mhi_test_in_loop_force_suspend_resume_aggr_open, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(suspend_resume_with_host_wakeup, + "several suspend and host wakeup resume iterations", + ipa_mhi_test_in_loop_suspend_host_wakeup, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(reject_suspend_channel_full, + "several rejected suspend iterations due to full channel", + ipa_mhi_test_in_loop_reject_suspend_full_channel, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(reject_force_suspend_channel_full, + "several rejected force suspend iterations due to full channel", + ipa_mhi_test_in_loop_reject_force_suspend_full_channel, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(suspend_resume_manual_wp_update, + "several suspend/resume iterations with after simulating writing by wp manual update", + ipa_mhi_test_in_loop_suspend_resume_wp_update, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(channel_reset, + "several channel reset (disconnect/connect) iterations", + ipa_mhi_test_in_loop_channel_reset, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(channel_reset_aggr_open, + "several channel reset (disconnect/connect) iterations with open aggregation frame", + ipa_mhi_test_in_loop_channel_reset_aggr_open, + true, IPA_HW_v3_0, IPA_HW_MAX), + IPA_UT_ADD_TEST(channel_reset_ipa_holb, + "several channel reset (disconnect/connect) iterations with channel in HOLB state", + ipa_mhi_test_in_loop_channel_reset_ipa_holb, + true, IPA_HW_v3_0, IPA_HW_MAX), +} IPA_UT_DEFINE_SUITE_END(mhi); + diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.c b/drivers/platform/msm/ipa/test/ipa_ut_framework.c new file mode 100644 index 000000000000..0b5fe0b7a03c --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.c @@ -0,0 +1,1016 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include "../ipa_v3/ipa_i.h" +#include "ipa_ut_framework.h" +#include "ipa_ut_suite_list.h" +#include "ipa_ut_i.h" + + +#define IPA_UT_DEBUG_WRITE_BUF_SIZE 256 +#define IPA_UT_DEBUG_READ_BUF_SIZE 1024 + +#define IPA_UT_READ_WRITE_DBG_FILE_MODE 0664 + +/** + * struct ipa_ut_context - I/S context + * @inited: Will wait till IPA is ready. Will create the enable file + * @enabled: All tests and suite debugfs files are created + * @lock: Lock for mutual exclustion + * @ipa_dbgfs_root: IPA root debugfs folder + * @test_dbgfs_root: UT root debugfs folder. Sub-folder of IPA root + * @test_dbgfs_suites: Suites root debugfs folder. Sub-folder of UT root + */ +struct ipa_ut_context { + bool inited; + bool enabled; + struct mutex lock; + struct dentry *ipa_dbgfs_root; + struct dentry *test_dbgfs_root; + struct dentry *test_dbgfs_suites; +}; + +static ssize_t ipa_ut_dbgfs_enable_read(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos); +static ssize_t ipa_ut_dbgfs_enable_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos); +static ssize_t ipa_ut_dbgfs_test_read(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos); +static ssize_t ipa_ut_dbgfs_test_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos); +static int ipa_ut_dbgfs_all_test_open(struct inode *inode, + struct file *filp); +static int ipa_ut_dbgfs_regression_test_open(struct inode *inode, + struct file *filp); +static ssize_t ipa_ut_dbgfs_meta_test_read(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos); +static ssize_t ipa_ut_dbgfs_meta_test_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos); + + +static const struct file_operations ipa_ut_dbgfs_enable_fops = { + .read = ipa_ut_dbgfs_enable_read, + .write = ipa_ut_dbgfs_enable_write, +}; +static const struct file_operations ipa_ut_dbgfs_test_fops = { + .read = ipa_ut_dbgfs_test_read, + .write = ipa_ut_dbgfs_test_write, +}; +static const struct file_operations ipa_ut_dbgfs_all_test_fops = { + .open = ipa_ut_dbgfs_all_test_open, + .read = ipa_ut_dbgfs_meta_test_read, + .write = ipa_ut_dbgfs_meta_test_write, +}; +static const struct file_operations ipa_ut_dbgfs_regression_test_fops = { + .open = ipa_ut_dbgfs_regression_test_open, + .read = ipa_ut_dbgfs_meta_test_read, + .write = ipa_ut_dbgfs_meta_test_write, +}; + +static struct ipa_ut_context *ipa_ut_ctx; +char *_IPA_UT_TEST_LOG_BUF_NAME; +struct ipa_ut_tst_fail_report + _IPA_UT_TEST_FAIL_REPORT_DATA[_IPA_UT_TEST_FAIL_REPORT_SIZE]; +u32 _IPA_UT_TEST_FAIL_REPORT_IDX; + +/** + * ipa_ut_print_log_buf() - Dump given buffer via kernel error mechanism + * @buf: Buffer to print + * + * Tokenize the string according to new-line and then print + * + * Note: Assumes lock acquired + */ +static void ipa_ut_print_log_buf(char *buf) +{ + char *token; + + if (!buf) { + IPA_UT_ERR("Input error - no buf\n"); + return; + } + + for (token = strsep(&buf, "\n"); token; token = strsep(&buf, "\n")) + pr_err("%s\n", token); +} + +/** + * ipa_ut_dump_fail_report_stack() - dump the report info stack via kernel err + * + * Note: Assumes lock acquired + */ +static void ipa_ut_dump_fail_report_stack(void) +{ + int i; + + IPA_UT_DBG("Entry\n"); + + if (_IPA_UT_TEST_FAIL_REPORT_IDX == 0) { + IPA_UT_DBG("no report info\n"); + return; + } + + for (i = 0 ; i < _IPA_UT_TEST_FAIL_REPORT_IDX; i++) { + if (i == 0) + pr_err("***** FAIL INFO STACK *****:\n"); + else + pr_err("Called From:\n"); + + pr_err("\tFILE = %s\n\tFUNC = %s()\n\tLINE = %d\n", + _IPA_UT_TEST_FAIL_REPORT_DATA[i].file, + _IPA_UT_TEST_FAIL_REPORT_DATA[i].func, + _IPA_UT_TEST_FAIL_REPORT_DATA[i].line); + pr_err("\t%s\n", _IPA_UT_TEST_FAIL_REPORT_DATA[i].info); + } +} + +/** + * ipa_ut_show_suite_exec_summary() - Show tests run summary + * @suite: suite to print its running summary + * + * Print list of succeeded tests, failed tests and skipped tests + * + * Note: Assumes lock acquired + */ +static void ipa_ut_show_suite_exec_summary(const struct ipa_ut_suite *suite) +{ + int i; + + IPA_UT_DBG("Entry\n"); + + ipa_assert_on(!suite); + + pr_info("\n\n"); + pr_info("\t Suite '%s' summary\n", suite->meta_data->name); + pr_info("===========================\n"); + pr_info("Successful tests\n"); + pr_info("----------------\n"); + for (i = 0 ; i < suite->tests_cnt ; i++) { + if (suite->tests[i].res != IPA_UT_TEST_RES_SUCCESS) + continue; + pr_info("\t%s\n", suite->tests[i].name); + } + pr_info("\nFailed tests\n"); + pr_info("------------\n"); + for (i = 0 ; i < suite->tests_cnt ; i++) { + if (suite->tests[i].res != IPA_UT_TEST_RES_FAIL) + continue; + pr_info("\t%s\n", suite->tests[i].name); + } + pr_info("\nSkipped tests\n"); + pr_info("-------------\n"); + for (i = 0 ; i < suite->tests_cnt ; i++) { + if (suite->tests[i].res != IPA_UT_TEST_RES_SKIP) + continue; + pr_info("\t%s\n", suite->tests[i].name); + } + pr_info("\n"); +} + +/** + * ipa_ut_dbgfs_meta_test_write() - Debugfs write func for a for a meta test + * @params: write fops + * + * Used to run all/regression tests in a suite + * Create log buffer that the test can use to store ongoing logs + * IPA clocks need to be voted. + * Run setup() once before running the tests and teardown() once after + * If no such call-backs then ignore it; if failed then fail the suite + * Print tests progress during running + * Test log and fail report will be showed only if the test failed. + * Finally show Summary of the suite tests running + * + * Note: If test supported IPA H/W version mismatch, skip it + * If a test lack run function, skip it + * If test doesn't belong to regression and it is regression run, skip it + * Note: Running mode: Do not stop running on failure + * + * Return: Negative in failure, given characters amount in success + */ +static ssize_t ipa_ut_dbgfs_meta_test_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct ipa_ut_suite *suite; + int i; + enum ipa_hw_type ipa_ver; + int rc = 0; + long meta_type; + bool tst_fail = false; + + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + suite = file->f_inode->i_private; + ipa_assert_on(!suite); + meta_type = (long)(file->private_data); + IPA_UT_DBG("Meta test type %ld\n", meta_type); + + _IPA_UT_TEST_LOG_BUF_NAME = kzalloc(_IPA_UT_TEST_LOG_BUF_SIZE, + GFP_KERNEL); + if (!_IPA_UT_TEST_LOG_BUF_NAME) { + IPA_UT_ERR("failed to allocate %d bytes\n", + _IPA_UT_TEST_LOG_BUF_SIZE); + rc = -ENOMEM; + goto unlock_mutex; + } + + if (!suite->tests_cnt || !suite->tests) { + pr_info("No tests for suite '%s'\n", suite->meta_data->name); + goto free_mem; + } + + ipa_ver = ipa_get_hw_type(); + + IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT"); + + if (suite->meta_data->setup) { + pr_info("*** Suite '%s': Run setup ***\n", + suite->meta_data->name); + rc = suite->meta_data->setup(&suite->meta_data->priv); + if (rc) { + IPA_UT_ERR("Setup failed for suite %s\n", + suite->meta_data->name); + rc = -EFAULT; + goto release_clock; + } + } else { + pr_info("*** Suite '%s': No Setup ***\n", + suite->meta_data->name); + } + + pr_info("*** Suite '%s': Run %s tests ***\n\n", + suite->meta_data->name, + meta_type == IPA_UT_META_TEST_REGRESSION ? "regression" : "all" + ); + for (i = 0 ; i < suite->tests_cnt ; i++) { + if (meta_type == IPA_UT_META_TEST_REGRESSION && + !suite->tests[i].run_in_regression) { + pr_info( + "*** Test '%s': Skip - Not in regression ***\n\n" + , suite->tests[i].name); + suite->tests[i].res = IPA_UT_TEST_RES_SKIP; + continue; + } + if (suite->tests[i].min_ipa_hw_ver > ipa_ver || + suite->tests[i].max_ipa_hw_ver < ipa_ver) { + pr_info( + "*** Test '%s': Skip - IPA VER mismatch ***\n\n" + , suite->tests[i].name); + suite->tests[i].res = IPA_UT_TEST_RES_SKIP; + continue; + } + if (!suite->tests[i].run) { + pr_info( + "*** Test '%s': Skip - No Run function ***\n\n" + , suite->tests[i].name); + suite->tests[i].res = IPA_UT_TEST_RES_SKIP; + continue; + } + + _IPA_UT_TEST_LOG_BUF_NAME[0] = '\0'; + _IPA_UT_TEST_FAIL_REPORT_IDX = 0; + pr_info("*** Test '%s': Running... ***\n", + suite->tests[i].name); + rc = suite->tests[i].run(suite->meta_data->priv); + if (rc) { + tst_fail = true; + suite->tests[i].res = IPA_UT_TEST_RES_FAIL; + ipa_ut_print_log_buf(_IPA_UT_TEST_LOG_BUF_NAME); + } else { + suite->tests[i].res = IPA_UT_TEST_RES_SUCCESS; + } + + pr_info(">>>>>>**** TEST '%s': %s ****<<<<<<\n", + suite->tests[i].name, tst_fail ? "FAIL" : "SUCCESS"); + + if (tst_fail) + ipa_ut_dump_fail_report_stack(); + + pr_info("\n"); + } + + if (suite->meta_data->teardown) { + pr_info("*** Suite '%s': Run Teardown ***\n", + suite->meta_data->name); + rc = suite->meta_data->teardown(suite->meta_data->priv); + if (rc) { + IPA_UT_ERR("Teardown failed for suite %s\n", + suite->meta_data->name); + rc = -EFAULT; + goto release_clock; + } + } else { + pr_info("*** Suite '%s': No Teardown ***\n", + suite->meta_data->name); + } + + ipa_ut_show_suite_exec_summary(suite); + +release_clock: + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT"); +free_mem: + kfree(_IPA_UT_TEST_LOG_BUF_NAME); + _IPA_UT_TEST_LOG_BUF_NAME = NULL; +unlock_mutex: + mutex_unlock(&ipa_ut_ctx->lock); + return ((!rc && !tst_fail) ? count : -EFAULT); +} + +/** + * ipa_ut_dbgfs_meta_test_read() - Debugfs read func for a meta test + * @params: read fops + * + * Meta test, is a test that describes other test or bunch of tests. + * for example, the 'all' test. Running this test will run all + * the tests in the suite. + * + * Show information regard the suite. E.g. name and description + * If regression - List the regression tests names + * + * Return: Amount of characters written to user space buffer + */ +static ssize_t ipa_ut_dbgfs_meta_test_read(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + char *buf; + struct ipa_ut_suite *suite; + int nbytes; + ssize_t cnt; + long meta_type; + int i; + + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + suite = file->f_inode->i_private; + ipa_assert_on(!suite); + meta_type = (long)(file->private_data); + IPA_UT_DBG("Meta test type %ld\n", meta_type); + + buf = kmalloc(IPA_UT_DEBUG_READ_BUF_SIZE, GFP_KERNEL); + if (!buf) { + IPA_UT_ERR("failed to allocate %d bytes\n", + IPA_UT_DEBUG_READ_BUF_SIZE); + cnt = 0; + goto unlock_mutex; + } + + if (meta_type == IPA_UT_META_TEST_ALL) { + nbytes = scnprintf(buf, IPA_UT_DEBUG_READ_BUF_SIZE, + "\tMeta-test running all the tests in the suite:\n" + "\tSuite Name: %s\n" + "\tDescription: %s\n" + "\tNumber of test in suite: %zu\n", + suite->meta_data->name, + suite->meta_data->desc ?: "", + suite->tests_cnt); + } else { + nbytes = scnprintf(buf, IPA_UT_DEBUG_READ_BUF_SIZE, + "\tMeta-test running regression tests in the suite:\n" + "\tSuite Name: %s\n" + "\tDescription: %s\n" + "\tRegression tests:\n", + suite->meta_data->name, + suite->meta_data->desc ?: ""); + for (i = 0 ; i < suite->tests_cnt ; i++) { + if (!suite->tests[i].run_in_regression) + continue; + nbytes += scnprintf(buf + nbytes, + IPA_UT_DEBUG_READ_BUF_SIZE - nbytes, + "\t\t%s\n", suite->tests[i].name); + } + } + + cnt = simple_read_from_buffer(ubuf, count, ppos, buf, nbytes); + kfree(buf); + +unlock_mutex: + mutex_unlock(&ipa_ut_ctx->lock); + return cnt; +} + +/** + * ipa_ut_dbgfs_regression_test_open() - Debugfs open function for + * 'regression' tests + * @params: open fops + * + * Mark "Regression tests" for meta-tests later operations. + * + * Return: Zero (always success). + */ +static int ipa_ut_dbgfs_regression_test_open(struct inode *inode, + struct file *filp) +{ + IPA_UT_DBG("Entry\n"); + + filp->private_data = (void *)(IPA_UT_META_TEST_REGRESSION); + + return 0; +} + +/** + * ipa_ut_dbgfs_all_test_open() - Debugfs open function for 'all' tests + * @params: open fops + * + * Mark "All tests" for meta-tests later operations. + * + * Return: Zero (always success). + */ +static int ipa_ut_dbgfs_all_test_open(struct inode *inode, + struct file *filp) +{ + IPA_UT_DBG("Entry\n"); + + filp->private_data = (void *)(IPA_UT_META_TEST_ALL); + + return 0; +} + +/** + * ipa_ut_dbgfs_test_write() - Debugfs write function for a test + * @params: write fops + * + * Used to run a test. + * Create log buffer that the test can use to store ongoing logs + * IPA clocks need to be voted. + * Run setup() before the test and teardown() after the tests. + * If no such call-backs then ignore it; if failed then fail the test + * If all succeeds, no printing to user + * If failed, test logs and failure report will be printed to user + * + * Note: Test must has run function and it's supported IPA H/W version + * must be matching. Otherwise test will fail. + * + * Return: Negative in failure, given characters amount in success + */ +static ssize_t ipa_ut_dbgfs_test_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct ipa_ut_test *test; + struct ipa_ut_suite *suite; + bool tst_fail = false; + int rc = 0; + enum ipa_hw_type ipa_ver; + + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + test = file->f_inode->i_private; + ipa_assert_on(!test); + + _IPA_UT_TEST_LOG_BUF_NAME = kzalloc(_IPA_UT_TEST_LOG_BUF_SIZE, + GFP_KERNEL); + if (!_IPA_UT_TEST_LOG_BUF_NAME) { + IPA_UT_ERR("failed to allocate %d bytes\n", + _IPA_UT_TEST_LOG_BUF_SIZE); + rc = -ENOMEM; + goto unlock_mutex; + } + + if (!test->run) { + IPA_UT_ERR("*** Test %s - No run func ***\n", + test->name); + rc = -EFAULT; + goto free_mem; + } + + ipa_ver = ipa_get_hw_type(); + if (test->min_ipa_hw_ver > ipa_ver || + test->max_ipa_hw_ver < ipa_ver) { + IPA_UT_ERR("Cannot run test %s on IPA HW Ver %s\n", + test->name, ipa_get_version_string(ipa_ver)); + rc = -EFAULT; + goto free_mem; + } + + suite = test->suite; + if (!suite || !suite->meta_data) { + IPA_UT_ERR("test %s with invalid suite\n", test->name); + rc = -EINVAL; + goto free_mem; + } + + IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT"); + + if (suite->meta_data->setup) { + IPA_UT_DBG("*** Suite '%s': Run setup ***\n", + suite->meta_data->name); + rc = suite->meta_data->setup(&suite->meta_data->priv); + if (rc) { + IPA_UT_ERR("Setup failed for suite %s\n", + suite->meta_data->name); + rc = -EFAULT; + goto release_clock; + } + } else { + IPA_UT_DBG("*** Suite '%s': No Setup ***\n", + suite->meta_data->name); + } + + IPA_UT_DBG("*** Test '%s': Running... ***\n", test->name); + _IPA_UT_TEST_FAIL_REPORT_IDX = 0; + rc = test->run(suite->meta_data->priv); + if (rc) + tst_fail = true; + IPA_UT_DBG("*** Test %s - ***\n", tst_fail ? "FAIL" : "SUCCESS"); + if (tst_fail) { + pr_info("=================>>>>>>>>>>>\n"); + ipa_ut_print_log_buf(_IPA_UT_TEST_LOG_BUF_NAME); + pr_info("**** TEST %s FAILED ****\n", test->name); + ipa_ut_dump_fail_report_stack(); + pr_info("<<<<<<<<<<<=================\n"); + } + + if (suite->meta_data->teardown) { + IPA_UT_DBG("*** Suite '%s': Run Teardown ***\n", + suite->meta_data->name); + rc = suite->meta_data->teardown(suite->meta_data->priv); + if (rc) { + IPA_UT_ERR("Teardown failed for suite %s\n", + suite->meta_data->name); + rc = -EFAULT; + goto release_clock; + } + } else { + IPA_UT_DBG("*** Suite '%s': No Teardown ***\n", + suite->meta_data->name); + } + +release_clock: + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT"); +free_mem: + kfree(_IPA_UT_TEST_LOG_BUF_NAME); + _IPA_UT_TEST_LOG_BUF_NAME = NULL; +unlock_mutex: + mutex_unlock(&ipa_ut_ctx->lock); + return ((!rc && !tst_fail) ? count : -EFAULT); +} + +/** + * ipa_ut_dbgfs_test_read() - Debugfs read function for a test + * @params: read fops + * + * print information regard the test. E.g. name and description + * + * Return: Amount of characters written to user space buffer + */ +static ssize_t ipa_ut_dbgfs_test_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + char *buf; + struct ipa_ut_test *test; + int nbytes; + ssize_t cnt; + + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + test = file->f_inode->i_private; + ipa_assert_on(!test); + + buf = kmalloc(IPA_UT_DEBUG_READ_BUF_SIZE, GFP_KERNEL); + if (!buf) { + IPA_UT_ERR("failed to allocate %d bytes\n", + IPA_UT_DEBUG_READ_BUF_SIZE); + cnt = 0; + goto unlock_mutex; + } + + nbytes = scnprintf(buf, IPA_UT_DEBUG_READ_BUF_SIZE, + "\t Test Name: %s\n" + "\t Description: %s\n" + "\t Suite Name: %s\n" + "\t Run In Regression: %s\n" + "\t Supported IPA versions: [%s -> %s]\n", + test->name, test->desc ?: "", test->suite->meta_data->name, + test->run_in_regression ? "Yes" : "No", + ipa_get_version_string(test->min_ipa_hw_ver), + test->max_ipa_hw_ver == IPA_HW_MAX ? "MAX" : + ipa_get_version_string(test->max_ipa_hw_ver)); + + if (nbytes > count) + IPA_UT_ERR("User buf too small - return partial info\n"); + + cnt = simple_read_from_buffer(ubuf, count, ppos, buf, nbytes); + kfree(buf); + +unlock_mutex: + mutex_unlock(&ipa_ut_ctx->lock); + return cnt; +} + +/** + * ipa_ut_framework_load_suites() - Load tests and expose them to user space + * + * Creates debugfs folder for each suite and then file for each test in it. + * Create debugfs "all" file for each suite for meta-test to run all tests. + * + * Note: Assumes lock acquired + * + * Return: Zero in success, otherwise in failure + */ +int ipa_ut_framework_load_suites(void) +{ + int suite_idx; + int tst_idx; + struct ipa_ut_suite *suite; + struct dentry *s_dent; + struct dentry *f_dent; + + IPA_UT_DBG("Entry\n"); + + for (suite_idx = IPA_UT_SUITE_FIRST_INDEX; + suite_idx < IPA_UT_SUITES_COUNT; suite_idx++) { + suite = IPA_UT_GET_SUITE(suite_idx); + + if (!suite->meta_data->name) { + IPA_UT_ERR("No suite name\n"); + return -EFAULT; + } + + s_dent = debugfs_create_dir(suite->meta_data->name, + ipa_ut_ctx->test_dbgfs_suites); + + if (!s_dent || IS_ERR(s_dent)) { + IPA_UT_ERR("fail create dbg entry - suite %s\n", + suite->meta_data->name); + return -EFAULT; + } + + for (tst_idx = 0; tst_idx < suite->tests_cnt ; tst_idx++) { + if (!suite->tests[tst_idx].name) { + IPA_UT_ERR("No test name on suite %s\n", + suite->meta_data->name); + return -EFAULT; + } + f_dent = debugfs_create_file( + suite->tests[tst_idx].name, + IPA_UT_READ_WRITE_DBG_FILE_MODE, s_dent, + &suite->tests[tst_idx], + &ipa_ut_dbgfs_test_fops); + if (!f_dent || IS_ERR(f_dent)) { + IPA_UT_ERR("fail create dbg entry - tst %s\n", + suite->tests[tst_idx].name); + return -EFAULT; + } + } + + /* entry for meta-test all to run all tests in suites */ + f_dent = debugfs_create_file(_IPA_UT_RUN_ALL_TEST_NAME, + IPA_UT_READ_WRITE_DBG_FILE_MODE, s_dent, + suite, &ipa_ut_dbgfs_all_test_fops); + if (!f_dent || IS_ERR(f_dent)) { + IPA_UT_ERR("fail to create dbg entry - %s\n", + _IPA_UT_RUN_ALL_TEST_NAME); + return -EFAULT; + } + + /* + * entry for meta-test regression to run all regression + * tests in suites + */ + f_dent = debugfs_create_file(_IPA_UT_RUN_REGRESSION_TEST_NAME, + IPA_UT_READ_WRITE_DBG_FILE_MODE, s_dent, + suite, &ipa_ut_dbgfs_regression_test_fops); + if (!f_dent || IS_ERR(f_dent)) { + IPA_UT_ERR("fail to create dbg entry - %s\n", + _IPA_UT_RUN_ALL_TEST_NAME); + return -EFAULT; + } + } + + return 0; +} + +/** + * ipa_ut_framework_enable() - Enable the framework + * + * Creates the tests and suites debugfs entries and load them. + * This will expose the tests to user space. + * + * Return: Zero in success, otherwise in failure + */ +static int ipa_ut_framework_enable(void) +{ + int ret = 0; + + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + + if (ipa_ut_ctx->enabled) { + IPA_UT_ERR("Already enabled\n"); + goto unlock_mutex; + } + + ipa_ut_ctx->test_dbgfs_suites = debugfs_create_dir("suites", + ipa_ut_ctx->test_dbgfs_root); + if (!ipa_ut_ctx->test_dbgfs_suites || + IS_ERR(ipa_ut_ctx->test_dbgfs_suites)) { + IPA_UT_ERR("failed to create suites debugfs dir\n"); + ret = -EFAULT; + goto unlock_mutex; + } + + if (ipa_ut_framework_load_suites()) { + IPA_UT_ERR("failed to load the suites into debugfs\n"); + ret = -EFAULT; + goto fail_clean_suites_dbgfs; + } + + ipa_ut_ctx->enabled = true; + goto unlock_mutex; + +fail_clean_suites_dbgfs: + debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_suites); +unlock_mutex: + mutex_unlock(&ipa_ut_ctx->lock); + return ret; +} + +/** + * ipa_ut_framework_disable() - Disable the framework + * + * Remove the tests and suites debugfs exposure. + * + * Return: Zero in success, otherwise in failure + */ +static int ipa_ut_framework_disable(void) +{ + int ret = 0; + + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + + if (!ipa_ut_ctx->enabled) { + IPA_UT_ERR("Already disabled\n"); + goto unlock_mutex; + } + + debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_suites); + + ipa_ut_ctx->enabled = false; + +unlock_mutex: + mutex_unlock(&ipa_ut_ctx->lock); + return ret; +} + +/** + * ipa_ut_dbgfs_enable_write() - Debugfs enable file write fops + * @params: write fops + * + * Input should be number. If 0, then disable. Otherwise enable. + * + * Return: if failed then negative value, if succeeds, amount of given chars + */ +static ssize_t ipa_ut_dbgfs_enable_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + char lcl_buf[IPA_UT_DEBUG_WRITE_BUF_SIZE]; + s8 option = 0; + int ret; + + IPA_UT_DBG("Entry\n"); + + if (sizeof(lcl_buf) < count + 1) { + IPA_UT_ERR("No enough space\n"); + return -E2BIG; + } + + if (copy_from_user(lcl_buf, buf, count)) { + IPA_UT_ERR("fail to copy buf from user space\n"); + return -EFAULT; + } + + lcl_buf[count] = '\0'; + if (kstrtos8(lcl_buf, 0, &option)) { + IPA_UT_ERR("fail convert str to s8\n"); + return -EINVAL; + } + + if (option == 0) + ret = ipa_ut_framework_disable(); + else + ret = ipa_ut_framework_enable(); + + return ret ?: count; +} + +/** + * ipa_ut_dbgfs_enable_read() - Debugfs enable file read fops + * @params: read fops + * + * To show to user space if the I/S is enabled or disabled. + * + * Return: amount of characters returned to user space + */ +static ssize_t ipa_ut_dbgfs_enable_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + const char *status; + + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + status = ipa_ut_ctx->enabled ? + "Enabled - Write 0 to disable\n" : + "Disabled - Write 1 to enable\n"; + mutex_unlock(&ipa_ut_ctx->lock); + return simple_read_from_buffer(ubuf, count, ppos, + status, strlen(status)); +} + +/** + * ipa_ut_framework_init() - Unit-tests framework initialization + * + * Complete tests initialization: Each tests needs to point to it's + * corresponing suite. + * Creates the framework debugfs root directory under IPA directory. + * Create enable debugfs file - to enable/disable the framework. + * + * Return: Zero in success, otherwise in failure + */ +static int ipa_ut_framework_init(void) +{ + struct dentry *dfile_enable; + int ret; + int suite_idx; + int test_idx; + struct ipa_ut_suite *suite; + + IPA_UT_DBG("Entry\n"); + + ipa_assert_on(!ipa_ut_ctx); + + ipa_ut_ctx->ipa_dbgfs_root = ipa_debugfs_get_root(); + if (!ipa_ut_ctx->ipa_dbgfs_root) { + IPA_UT_ERR("No IPA debugfs root entry\n"); + return -EFAULT; + } + + mutex_lock(&ipa_ut_ctx->lock); + + /* tests needs to point to their corresponding suites structures */ + for (suite_idx = IPA_UT_SUITE_FIRST_INDEX; + suite_idx < IPA_UT_SUITES_COUNT; suite_idx++) { + suite = IPA_UT_GET_SUITE(suite_idx); + ipa_assert_on(!suite); + if (!suite->tests) { + IPA_UT_DBG("No tests for suite %s\n", + suite->meta_data->name); + continue; + } + for (test_idx = 0; test_idx < suite->tests_cnt; test_idx++) { + suite->tests[test_idx].suite = suite; + IPA_UT_DBG("Updating test %s info for suite %s\n", + suite->tests[test_idx].name, + suite->meta_data->name); + } + } + + ipa_ut_ctx->test_dbgfs_root = debugfs_create_dir("test", + ipa_ut_ctx->ipa_dbgfs_root); + if (!ipa_ut_ctx->test_dbgfs_root || + IS_ERR(ipa_ut_ctx->test_dbgfs_root)) { + IPA_UT_ERR("failed to create test debugfs dir\n"); + ret = -EFAULT; + goto unlock_mutex; + } + + dfile_enable = debugfs_create_file("enable", + IPA_UT_READ_WRITE_DBG_FILE_MODE, + ipa_ut_ctx->test_dbgfs_root, 0, &ipa_ut_dbgfs_enable_fops); + if (!dfile_enable || IS_ERR(dfile_enable)) { + IPA_UT_ERR("failed to create enable debugfs file\n"); + ret = -EFAULT; + goto fail_clean_dbgfs; + } + + _IPA_UT_TEST_FAIL_REPORT_IDX = 0; + ipa_ut_ctx->inited = true; + IPA_UT_DBG("Done\n"); + ret = 0; + goto unlock_mutex; + +fail_clean_dbgfs: + debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_root); +unlock_mutex: + mutex_unlock(&ipa_ut_ctx->lock); + return ret; +} + +/** + * ipa_ut_framework_destroy() - Destroy the UT framework info + * + * Disable it if enabled. + * Remove the debugfs entries using the root entry + */ +static void ipa_ut_framework_destroy(void) +{ + IPA_UT_DBG("Entry\n"); + + mutex_lock(&ipa_ut_ctx->lock); + if (ipa_ut_ctx->enabled) + ipa_ut_framework_disable(); + if (ipa_ut_ctx->inited) + debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_root); + mutex_unlock(&ipa_ut_ctx->lock); +} + +/** + * ipa_ut_ipa_ready_cb() - IPA ready CB + * + * Once IPA is ready starting initializing the unit-test framework + */ +static void ipa_ut_ipa_ready_cb(void *user_data) +{ + IPA_UT_DBG("Entry\n"); + (void)ipa_ut_framework_init(); +} + +/** + * ipa_ut_module_init() - Module init + * + * Create the framework context, wait for IPA driver readiness + * and Initialize it. + * If IPA driver already ready, continue initialization immediately. + * if not, wait for IPA ready notification by IPA driver context + */ +static int __init ipa_ut_module_init(void) +{ + int ret; + + IPA_UT_INFO("Loading IPA test module...\n"); + + ipa_ut_ctx = kzalloc(sizeof(struct ipa_ut_context), GFP_KERNEL); + if (!ipa_ut_ctx) { + IPA_UT_ERR("Failed to allocate ctx\n"); + return -ENOMEM; + } + mutex_init(&ipa_ut_ctx->lock); + + if (!ipa_is_ready()) { + IPA_UT_DBG("IPA driver not ready, registering callback\n"); + ret = ipa_register_ipa_ready_cb(ipa_ut_ipa_ready_cb, NULL); + + /* + * If we received -EEXIST, IPA has initialized. So we need + * to continue the initing process. + */ + if (ret != -EEXIST) { + if (ret) { + IPA_UT_ERR("IPA CB reg failed - %d\n", ret); + kfree(ipa_ut_ctx); + ipa_ut_ctx = NULL; + } + return ret; + } + } + + ret = ipa_ut_framework_init(); + if (ret) { + IPA_UT_ERR("framework init failed\n"); + kfree(ipa_ut_ctx); + ipa_ut_ctx = NULL; + } + return ret; +} + +/** + * ipa_ut_module_exit() - Module exit function + * + * Destroys the Framework and removes its context + */ +static void ipa_ut_module_exit(void) +{ + IPA_UT_DBG("Entry\n"); + + if (!ipa_ut_ctx) + return; + + ipa_ut_framework_destroy(); + kfree(ipa_ut_ctx); + ipa_ut_ctx = NULL; +} + +module_init(ipa_ut_module_init); +module_exit(ipa_ut_module_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA Unit Test module"); diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.h b/drivers/platform/msm/ipa/test/ipa_ut_framework.h new file mode 100644 index 000000000000..4fcd9f341c69 --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.h @@ -0,0 +1,240 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_UT_FRAMEWORK_H_ +#define _IPA_UT_FRAMEWORK_H_ + +#include +#include "../ipa_common_i.h" +#include "ipa_ut_i.h" + +#define IPA_UT_DRV_NAME "ipa_ut" + +#define IPA_UT_DBG(fmt, args...) \ + do { \ + pr_debug(IPA_UT_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_UT_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_UT_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_UT_ERR(fmt, args...) \ + do { \ + pr_err(IPA_UT_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_UT_INFO(fmt, args...) \ + do { \ + pr_info(IPA_UT_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +/** + * struct ipa_ut_tst_fail_report - Information on test failure + * @valid: When a test posts a report, valid will be marked true + * @file: File name containing the failed test. + * @line: Number of line in the file where the test failed. + * @func: Function where the test failed in. + * @info: Information about the failure. + */ +struct ipa_ut_tst_fail_report { + bool valid; + const char *file; + int line; + const char *func; + const char *info; +}; + +/** + * Report on test failure + * To be used by tests to report a point were a test fail. + * Failures are saved in a stack manner. + * Dumping the failure info will dump the fail reports + * from all the function in the calling stack + */ +#define IPA_UT_TEST_FAIL_REPORT(__info) \ + do { \ + extern struct ipa_ut_tst_fail_report \ + _IPA_UT_TEST_FAIL_REPORT_DATA \ + [_IPA_UT_TEST_FAIL_REPORT_SIZE]; \ + extern u32 _IPA_UT_TEST_FAIL_REPORT_IDX; \ + struct ipa_ut_tst_fail_report *entry; \ + if (_IPA_UT_TEST_FAIL_REPORT_IDX >= \ + _IPA_UT_TEST_FAIL_REPORT_SIZE) \ + break; \ + entry = &(_IPA_UT_TEST_FAIL_REPORT_DATA \ + [_IPA_UT_TEST_FAIL_REPORT_IDX]); \ + entry->file = __FILENAME__; \ + entry->line = __LINE__; \ + entry->func = __func__; \ + if (__info) \ + entry->info = __info; \ + else \ + entry->info = ""; \ + _IPA_UT_TEST_FAIL_REPORT_IDX++; \ + } while (0) + +/** + * To be used by tests to log progress and ongoing information + * Logs are not printed to user, but saved to a buffer. + * I/S shall print the buffer at different occasions - e.g. in test failure + */ +#define IPA_UT_LOG(fmt, args...) \ + do { \ + extern char *_IPA_UT_TEST_LOG_BUF_NAME; \ + char __buf[512]; \ + IPA_UT_DBG(fmt, ## args); \ + if (!_IPA_UT_TEST_LOG_BUF_NAME) {\ + pr_err(IPA_UT_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + break; \ + } \ + scnprintf(__buf, sizeof(__buf), \ + " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + strlcat(_IPA_UT_TEST_LOG_BUF_NAME, __buf, \ + _IPA_UT_TEST_LOG_BUF_SIZE); \ + } while (0) + +/** + * struct ipa_ut_suite_meta - Suite meta-data + * @name: Suite unique name + * @desc: Suite description + * @setup: Setup Call-back of the suite + * @teardown: Teardown Call-back of the suite + * @priv: Private pointer of the suite + * + * Setup/Teardown will be called once for the suite when running a tests of it. + * priv field is shared between the Setup/Teardown and the tests + */ +struct ipa_ut_suite_meta { + char *name; + char *desc; + int (*setup)(void **ppriv); + int (*teardown)(void *priv); + void *priv; +}; + +/* Test suite data structure declaration */ +struct ipa_ut_suite; + +/** + * struct ipa_ut_test - Test information + * @name: Test name + * @desc: Test description + * @run: Test execution call-back + * @run_in_regression: To run this test as part of regression? + * @min_ipa_hw_ver: Minimum IPA H/W version where the test is supported? + * @max_ipa_hw_ver: Maximum IPA H/W version where the test is supported? + * @suite: Pointer to suite containing this test + * @res: Test execution result. Will be updated after running a test as part + * of suite tests run + */ +struct ipa_ut_test { + char *name; + char *desc; + int (*run)(void *priv); + bool run_in_regression; + int min_ipa_hw_ver; + int max_ipa_hw_ver; + struct ipa_ut_suite *suite; + enum ipa_ut_test_result res; +}; + +/** + * struct ipa_ut_suite - Suite information + * @meta_data: Pointer to meta-data structure of the suite + * @tests: Pointer to array of tests belongs to the suite + * @tests_cnt: Number of tests + */ +struct ipa_ut_suite { + struct ipa_ut_suite_meta *meta_data; + struct ipa_ut_test *tests; + size_t tests_cnt; +}; + + +/** + * Add a test to a suite. + * Will add entry to tests array and update its info with + * the given info, thus adding new test. + */ +#define IPA_UT_ADD_TEST(__name, __desc, __run, __run_in_regression, \ + __min_ipa_hw_ver, __max_ipa__hw_ver) \ + { \ + .name = #__name, \ + .desc = __desc, \ + .run = __run, \ + .run_in_regression = __run_in_regression, \ + .min_ipa_hw_ver = __min_ipa_hw_ver, \ + .max_ipa_hw_ver = __max_ipa__hw_ver, \ + .suite = NULL, \ + } + +/** + * Declare a suite + * Every suite need to be declared before it is registered. + */ +#define IPA_UT_DECLARE_SUITE(__name) \ + extern struct ipa_ut_suite _IPA_UT_SUITE_DATA(__name) + +/** + * Register a suite + * Registering a suite is mandatory so it will be considered. + */ +#define IPA_UT_REGISTER_SUITE(__name) \ + (&_IPA_UT_SUITE_DATA(__name)) + +/** + * Start/End suite definition + * Will create the suite global structures and adds adding tests to it. + * Use IPA_UT_ADD_TEST() with these macros to add tests when defining + * a suite + */ +#define IPA_UT_DEFINE_SUITE_START(__name, __desc, __setup, __teardown) \ + static struct ipa_ut_suite_meta _IPA_UT_SUITE_META_DATA(__name) = \ + { \ + .name = #__name, \ + .desc = __desc, \ + .setup = __setup, \ + .teardown = __teardown, \ + }; \ + static struct ipa_ut_test _IPA_UT_SUITE_TESTS(__name)[] = +#define IPA_UT_DEFINE_SUITE_END(__name) \ + ; \ + struct ipa_ut_suite _IPA_UT_SUITE_DATA(__name) = \ + { \ + .meta_data = &_IPA_UT_SUITE_META_DATA(__name), \ + .tests = _IPA_UT_SUITE_TESTS(__name), \ + .tests_cnt = ARRAY_SIZE(_IPA_UT_SUITE_TESTS(__name)), \ + } + +#endif /* _IPA_UT_FRAMEWORK_H_ */ diff --git a/drivers/platform/msm/ipa/test/ipa_ut_i.h b/drivers/platform/msm/ipa/test/ipa_ut_i.h new file mode 100644 index 000000000000..d72e81d596f2 --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_ut_i.h @@ -0,0 +1,88 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_UT_I_H_ +#define _IPA_UT_I_H_ + +/* Suite data global structure name */ +#define _IPA_UT_SUITE_DATA(__name) ipa_ut_ ##__name ##_data + +/* Suite meta-data global structure name */ +#define _IPA_UT_SUITE_META_DATA(__name) ipa_ut_ ##__name ##_meta_data + +/* Suite global array of tests */ +#define _IPA_UT_SUITE_TESTS(__name) ipa_ut_ ##__name ##_tests + +/* Global array of all suites */ +#define _IPA_UT_ALL_SUITES ipa_ut_all_suites_data + +/* Meta-test "all" name - test to run all tests in given suite */ +#define _IPA_UT_RUN_ALL_TEST_NAME "all" + +/** + * Meta-test "regression" name - + * test to run all regression tests in given suite + */ +#define _IPA_UT_RUN_REGRESSION_TEST_NAME "regression" + + +/* Test Log buffer name and size */ +#define _IPA_UT_TEST_LOG_BUF_NAME ipa_ut_tst_log_buf +#define _IPA_UT_TEST_LOG_BUF_SIZE 8192 + +/* Global structure for test fail execution result information */ +#define _IPA_UT_TEST_FAIL_REPORT_DATA ipa_ut_tst_fail_report_data +#define _IPA_UT_TEST_FAIL_REPORT_SIZE 5 +#define _IPA_UT_TEST_FAIL_REPORT_IDX ipa_ut_tst_fail_report_data_index + +/* Start/End definitions of the array of suites */ +#define IPA_UT_DEFINE_ALL_SUITES_START \ + static struct ipa_ut_suite *_IPA_UT_ALL_SUITES[] = +#define IPA_UT_DEFINE_ALL_SUITES_END + +/** + * Suites iterator - Array-like container + * First index, number of elements and element fetcher + */ +#define IPA_UT_SUITE_FIRST_INDEX 0 +#define IPA_UT_SUITES_COUNT \ + ARRAY_SIZE(_IPA_UT_ALL_SUITES) +#define IPA_UT_GET_SUITE(__index) \ + _IPA_UT_ALL_SUITES[__index] + +/** + * enum ipa_ut_test_result - Test execution result + * @IPA_UT_TEST_RES_FAIL: Test executed and failed + * @IPA_UT_TEST_RES_SUCCESS: Test executed and succeeded + * @IPA_UT_TEST_RES_SKIP: Test was not executed. + * + * When running all tests in a suite, a specific test could + * be skipped and not executed. For example due to mismatch of + * IPA H/W version. + */ +enum ipa_ut_test_result { + IPA_UT_TEST_RES_FAIL, + IPA_UT_TEST_RES_SUCCESS, + IPA_UT_TEST_RES_SKIP, +}; + +/** + * enum ipa_ut_meta_test_type - Type of suite meta-test + * @IPA_UT_META_TEST_ALL: Represents all tests in suite + * @IPA_UT_META_TEST_REGRESSION: Represents all regression tests in suite + */ +enum ipa_ut_meta_test_type { + IPA_UT_META_TEST_ALL, + IPA_UT_META_TEST_REGRESSION, +}; + +#endif /* _IPA_UT_I_H_ */ diff --git a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h new file mode 100644 index 000000000000..823edcf5b5bd --- /dev/null +++ b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h @@ -0,0 +1,41 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_UT_SUITE_LIST_H_ +#define _IPA_UT_SUITE_LIST_H_ + +#include "ipa_ut_framework.h" +#include "ipa_ut_i.h" + +/** + * Declare every suite here so that it will be found later below + * No importance for order. + */ +IPA_UT_DECLARE_SUITE(mhi); +IPA_UT_DECLARE_SUITE(dma); +IPA_UT_DECLARE_SUITE(example); +IPA_UT_DECLARE_SUITE(hw_stats); + + +/** + * Register every suite inside the below block. + * Unregistered suites will be ignored + */ +IPA_UT_DEFINE_ALL_SUITES_START +{ + IPA_UT_REGISTER_SUITE(mhi), + IPA_UT_REGISTER_SUITE(dma), + IPA_UT_REGISTER_SUITE(example), + IPA_UT_REGISTER_SUITE(hw_stats), +} IPA_UT_DEFINE_ALL_SUITES_END; + +#endif /* _IPA_UT_SUITE_LIST_H_ */ diff --git a/include/linux/ecm_ipa.h b/include/linux/ecm_ipa.h new file mode 100644 index 000000000000..2833de6e6fd4 --- /dev/null +++ b/include/linux/ecm_ipa.h @@ -0,0 +1,95 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ECM_IPA_H_ +#define _ECM_IPA_H_ + +#include + +/* + * @priv: private data given upon ipa_connect + * @evt: event enum, should be IPA_WRITE_DONE + * @data: for tx path the data field is the sent socket buffer. + */ +typedef void (*ecm_ipa_callback)(void *priv, + enum ipa_dp_evt_type evt, + unsigned long data); + +/* + * struct ecm_ipa_params - parameters for ecm_ipa initialization API + * + * @device_ready_notify: callback supplied by USB core driver. + * This callback shall be called by the Netdev once the device + * is ready to receive data from tethered PC. + * @ecm_ipa_rx_dp_notify: ecm_ipa will set this callback (out parameter). + * this callback shall be supplied for ipa_connect upon pipe + * connection (USB->IPA), once IPA driver receive data packets + * from USB pipe destined for Apps this callback will be called. + * @ecm_ipa_tx_dp_notify: ecm_ipa will set this callback (out parameter). + * this callback shall be supplied for ipa_connect upon pipe + * connection (IPA->USB), once IPA driver send packets destined + * for USB, IPA BAM will notify for Tx-complete. + * @priv: ecm_ipa will set this pointer (out parameter). + * This pointer will hold the network device for later interaction + * with ecm_ipa APIs + * @host_ethaddr: host Ethernet address in network order + * @device_ethaddr: device Ethernet address in network order + * @skip_ep_cfg: boolean field that determines if Apps-processor + * should or should not configure this end-point. + */ +struct ecm_ipa_params { + void (*device_ready_notify)(void); + ecm_ipa_callback ecm_ipa_rx_dp_notify; + ecm_ipa_callback ecm_ipa_tx_dp_notify; + u8 host_ethaddr[ETH_ALEN]; + u8 device_ethaddr[ETH_ALEN]; + void *private; + bool skip_ep_cfg; +}; + + +#ifdef CONFIG_ECM_IPA + +int ecm_ipa_init(struct ecm_ipa_params *params); + +int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, + void *priv); + +int ecm_ipa_disconnect(void *priv); + +void ecm_ipa_cleanup(void *priv); + +#else /* CONFIG_ECM_IPA*/ + +static inline int ecm_ipa_init(struct ecm_ipa_params *params) +{ + return 0; +} + +static inline int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, + void *priv) +{ + return 0; +} + +static inline int ecm_ipa_disconnect(void *priv) +{ + return 0; +} + +static inline void ecm_ipa_cleanup(void *priv) +{ + +} +#endif /* CONFIG_ECM_IPA*/ + +#endif /* _ECM_IPA_H_ */ diff --git a/include/linux/ipa.h b/include/linux/ipa.h new file mode 100644 index 000000000000..7923f4caf180 --- /dev/null +++ b/include/linux/ipa.h @@ -0,0 +1,2204 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_H_ +#define _IPA_H_ + +#include +#include +#include +#include +#include "linux/msm_gsi.h" + +#define IPA_APPS_MAX_BW_IN_MBPS 700 +/** + * enum ipa_transport_type + * transport type: either GSI or SPS + */ +enum ipa_transport_type { + IPA_TRANSPORT_TYPE_SPS, + IPA_TRANSPORT_TYPE_GSI +}; + +/** + * enum ipa_nat_en_type - NAT setting type in IPA end-point + */ +enum ipa_nat_en_type { + IPA_BYPASS_NAT, + IPA_SRC_NAT, + IPA_DST_NAT, +}; + +/** + * enum ipa_ipv6ct_en_type - IPv6CT setting type in IPA end-point + */ +enum ipa_ipv6ct_en_type { + IPA_BYPASS_IPV6CT, + IPA_ENABLE_IPV6CT, +}; + +/** + * enum ipa_mode_type - mode setting type in IPA end-point + * @BASIC: basic mode + * @ENABLE_FRAMING_HDLC: not currently supported + * @ENABLE_DEFRAMING_HDLC: not currently supported + * @DMA: all data arriving IPA will not go through IPA logic blocks, this + * allows IPA to work as DMA for specific pipes. + */ +enum ipa_mode_type { + IPA_BASIC, + IPA_ENABLE_FRAMING_HDLC, + IPA_ENABLE_DEFRAMING_HDLC, + IPA_DMA, +}; + +/** + * enum ipa_aggr_en_type - aggregation setting type in IPA + * end-point + */ +enum ipa_aggr_en_type { + IPA_BYPASS_AGGR, + IPA_ENABLE_AGGR, + IPA_ENABLE_DEAGGR, +}; + +/** + * enum ipa_aggr_type - type of aggregation in IPA end-point + */ +enum ipa_aggr_type { + IPA_MBIM_16 = 0, + IPA_HDLC = 1, + IPA_TLP = 2, + IPA_RNDIS = 3, + IPA_GENERIC = 4, + IPA_QCMAP = 6, +}; + +/** + * enum ipa_aggr_mode - global aggregation mode + */ +enum ipa_aggr_mode { + IPA_MBIM_AGGR, + IPA_QCNCM_AGGR, +}; + +/** + * enum ipa_dp_evt_type - type of event client callback is + * invoked for on data path + * @IPA_RECEIVE: data is struct sk_buff + * @IPA_WRITE_DONE: data is struct sk_buff + */ +enum ipa_dp_evt_type { + IPA_RECEIVE, + IPA_WRITE_DONE, + IPA_CLIENT_START_POLL, + IPA_CLIENT_COMP_NAPI, +}; + +/** + * enum hdr_total_len_or_pad_type - type of value held by TOTAL_LEN_OR_PAD + * field in header configuration register. + * @IPA_HDR_PAD: field is used as padding length + * @IPA_HDR_TOTAL_LEN: field is used as total length + */ +enum hdr_total_len_or_pad_type { + IPA_HDR_PAD = 0, + IPA_HDR_TOTAL_LEN = 1, +}; + +/** + * struct ipa_ep_cfg_nat - NAT configuration in IPA end-point + * @nat_en: This defines the default NAT mode for the pipe: in case of + * filter miss - the default NAT mode defines the NATing operation + * on the packet. Valid for Input Pipes only (IPA consumer) + */ +struct ipa_ep_cfg_nat { + enum ipa_nat_en_type nat_en; +}; + +/** + * struct ipa_ep_cfg_conn_track - IPv6 Connection tracking configuration in + * IPA end-point + * @conn_track_en: Defines speculative conn_track action, means if specific + * pipe needs to have UL/DL IPv6 Connection Tracking or Bybass + * IPv6 Connection Tracking. 0: Bypass IPv6 Connection Tracking + * 1: IPv6 UL/DL Connection Tracking. + * Valid for Input Pipes only (IPA consumer) + */ +struct ipa_ep_cfg_conn_track { + enum ipa_ipv6ct_en_type conn_track_en; +}; + +/** + * struct ipa_ep_cfg_hdr - header configuration in IPA end-point + * + * @hdr_len:Header length in bytes to be added/removed. Assuming + * header len is constant per endpoint. Valid for + * both Input and Output Pipes + * @hdr_ofst_metadata_valid: 0: Metadata_Ofst value is invalid, i.e., no + * metadata within header. + * 1: Metadata_Ofst value is valid, i.e., metadata + * within header is in offset Metadata_Ofst Valid + * for Input Pipes only (IPA Consumer) (for output + * pipes, metadata already set within the header) + * @hdr_ofst_metadata: Offset within header in which metadata resides + * Size of metadata - 4bytes + * Example - Stream ID/SSID/mux ID. + * Valid for Input Pipes only (IPA Consumer) (for output + * pipes, metadata already set within the header) + * @hdr_additional_const_len: Defines the constant length that should be added + * to the payload length in order for IPA to update + * correctly the length field within the header + * (valid only in case Hdr_Ofst_Pkt_Size_Valid=1) + * Valid for Output Pipes (IPA Producer) + * @hdr_ofst_pkt_size_valid: 0: Hdr_Ofst_Pkt_Size value is invalid, i.e., no + * length field within the inserted header + * 1: Hdr_Ofst_Pkt_Size value is valid, i.e., a + * packet length field resides within the header + * Valid for Output Pipes (IPA Producer) + * @hdr_ofst_pkt_size: Offset within header in which packet size reside. Upon + * Header Insertion, IPA will update this field within the + * header with the packet length . Assumption is that + * header length field size is constant and is 2Bytes + * Valid for Output Pipes (IPA Producer) + * @hdr_a5_mux: Determines whether A5 Mux header should be added to the packet. + * This bit is valid only when Hdr_En=01(Header Insertion) + * SW should set this bit for IPA-to-A5 pipes. + * 0: Do not insert A5 Mux Header + * 1: Insert A5 Mux Header + * Valid for Output Pipes (IPA Producer) + * @hdr_remove_additional: bool switch, remove more of the header + * based on the aggregation configuration (register + * HDR_LEN_INC_DEAGG_HDR) + * @hdr_metadata_reg_valid: bool switch, metadata from + * register INIT_HDR_METADATA_n is valid. + * (relevant only for IPA Consumer pipes) + */ +struct ipa_ep_cfg_hdr { + u32 hdr_len; + u32 hdr_ofst_metadata_valid; + u32 hdr_ofst_metadata; + u32 hdr_additional_const_len; + u32 hdr_ofst_pkt_size_valid; + u32 hdr_ofst_pkt_size; + u32 hdr_a5_mux; + u32 hdr_remove_additional; + u32 hdr_metadata_reg_valid; +}; + +/** + * struct ipa_ep_cfg_hdr_ext - extended header configuration in IPA end-point + * @hdr_pad_to_alignment: Pad packet to specified alignment + * (2^pad to alignment value), i.e. value of 3 means pad to 2^3 = 8 bytes + * alignment. Alignment is to 0,2 up to 32 bytes (IPAv2 does not support 64 + * byte alignment). Valid for Output Pipes only (IPA Producer). + * @hdr_total_len_or_pad_offset: Offset to length field containing either + * total length or pad length, per hdr_total_len_or_pad config + * @hdr_payload_len_inc_padding: 0-IPA_ENDP_INIT_HDR_n's + * HDR_OFST_PKT_SIZE does + * not includes padding bytes size, payload_len = packet length, + * 1-IPA_ENDP_INIT_HDR_n's HDR_OFST_PKT_SIZE includes + * padding bytes size, payload_len = packet length + padding + * @hdr_total_len_or_pad: field is used as PAD length ot as Total length + * (header + packet + padding) + * @hdr_total_len_or_pad_valid: 0-Ignore TOTAL_LEN_OR_PAD field, 1-Process + * TOTAL_LEN_OR_PAD field + * @hdr_little_endian: 0-Big Endian, 1-Little Endian + */ +struct ipa_ep_cfg_hdr_ext { + u32 hdr_pad_to_alignment; + u32 hdr_total_len_or_pad_offset; + bool hdr_payload_len_inc_padding; + enum hdr_total_len_or_pad_type hdr_total_len_or_pad; + bool hdr_total_len_or_pad_valid; + bool hdr_little_endian; +}; + +/** + * struct ipa_ep_cfg_mode - mode configuration in IPA end-point + * @mode: Valid for Input Pipes only (IPA Consumer) + * @dst: This parameter specifies the output pipe to which the packets + * will be routed to. + * This parameter is valid for Mode=DMA and not valid for + * Mode=Basic + * Valid for Input Pipes only (IPA Consumer) + */ +struct ipa_ep_cfg_mode { + enum ipa_mode_type mode; + enum ipa_client_type dst; +}; + +/** + * struct ipa_ep_cfg_aggr - aggregation configuration in IPA end-point + * + * @aggr_en: Valid for both Input and Output Pipes + * @aggr: aggregation type (Valid for both Input and Output Pipes) + * @aggr_byte_limit: Limit of aggregated packet size in KB (<=32KB) When set + * to 0, there is no size limitation on the aggregation. + * When both, Aggr_Byte_Limit and Aggr_Time_Limit are set + * to 0, there is no aggregation, every packet is sent + * independently according to the aggregation structure + * Valid for Output Pipes only (IPA Producer ) + * @aggr_time_limit: Timer to close aggregated packet (<=32ms) When set to 0, + * there is no time limitation on the aggregation. When + * both, Aggr_Byte_Limit and Aggr_Time_Limit are set to 0, + * there is no aggregation, every packet is sent + * independently according to the aggregation structure + * Valid for Output Pipes only (IPA Producer) + * @aggr_pkt_limit: Defines if EOF close aggregation or not. if set to false + * HW closes aggregation (sends EOT) only based on its + * aggregation config (byte/time limit, etc). if set to + * true EOF closes aggregation in addition to HW based + * aggregation closure. Valid for Output Pipes only (IPA + * Producer). EOF affects only Pipes configured for + * generic aggregation. + * @aggr_hard_byte_limit_en: If set to 1, byte-limit aggregation for this + * pipe will apply a hard-limit behavior which will not + * allow frames to be closed with more than byte-limit + * bytes. If set to 0, previous byte-limit behavior + * will apply - frames close once a packet causes the + * accumulated byte-count to cross the byte-limit + * threshold (closed frame will contain that packet). + * @aggr_sw_eof_active: 0: EOF does not close aggregation. HW closes aggregation + * (sends EOT) only based on its aggregation config + * (byte/time limit, etc). + * 1: EOF closes aggregation in addition to HW based + * aggregation closure. Valid for Output Pipes only (IPA + * Producer). EOF affects only Pipes configured for generic + * aggregation. + */ +struct ipa_ep_cfg_aggr { + enum ipa_aggr_en_type aggr_en; + enum ipa_aggr_type aggr; + u32 aggr_byte_limit; + u32 aggr_time_limit; + u32 aggr_pkt_limit; + u32 aggr_hard_byte_limit_en; + bool aggr_sw_eof_active; +}; + +/** + * struct ipa_ep_cfg_route - route configuration in IPA end-point + * @rt_tbl_hdl: Defines the default routing table index to be used in case there + * is no filter rule matching, valid for Input Pipes only (IPA + * Consumer). Clients should set this to 0 which will cause default + * v4 and v6 routes setup internally by IPA driver to be used for + * this end-point + */ +struct ipa_ep_cfg_route { + u32 rt_tbl_hdl; +}; + +/** + * struct ipa_ep_cfg_holb - head of line blocking configuration in IPA end-point + * @en: enable(1 => ok to drop pkt)/disable(0 => never drop pkt) + * @tmr_val: duration in units of 128 IPA clk clock cyles [0,511], 1 clk=1.28us + * IPAv2.5 support 32 bit HOLB timeout value, previous versions + * supports 16 bit + */ +struct ipa_ep_cfg_holb { + u16 en; + u32 tmr_val; +}; + +/** + * struct ipa_ep_cfg_deaggr - deaggregation configuration in IPA end-point + * @deaggr_hdr_len: Deaggregation Header length in bytes. Valid only for Input + * Pipes, which are configured for 'Generic' deaggregation. + * @packet_offset_valid: - 0: PACKET_OFFSET is not used, 1: PACKET_OFFSET is + * used. + * @packet_offset_location: Location of packet offset field, which specifies + * the offset to the packet from the start of the packet offset field. + * @max_packet_len: DEAGGR Max Packet Length in Bytes. A Packet with higher + * size wil be treated as an error. 0 - Packet Length is not Bound, + * IPA should not check for a Max Packet Length. + */ +struct ipa_ep_cfg_deaggr { + u32 deaggr_hdr_len; + bool packet_offset_valid; + u32 packet_offset_location; + u32 max_packet_len; +}; + +/** + * enum ipa_cs_offload - checksum offload setting + */ +enum ipa_cs_offload { + IPA_DISABLE_CS_OFFLOAD, + IPA_ENABLE_CS_OFFLOAD_UL, + IPA_ENABLE_CS_OFFLOAD_DL, + IPA_CS_RSVD +}; + +/** + * struct ipa_ep_cfg_cfg - IPA ENDP_INIT Configuration register + * @frag_offload_en: - 0 - IP packet fragment handling is disabled. IP packet + * fragments should be sent to SW. SW is responsible for + * configuring filter rules, and IP packet filter exception should be + * used to send all fragments to SW. 1 - IP packet fragment + * handling is enabled. IPA checks for fragments and uses frag + * rules table for processing fragments. Valid only for Input Pipes + * (IPA Consumer) + * @cs_offload_en: Checksum offload enable: 00: Disable checksum offload, 01: + * Enable checksum calculation offload (UL) - For output pipe + * (IPA producer) specifies that checksum trailer is to be added. + * For input pipe (IPA consumer) specifies presence of checksum + * header and IPA checksum calculation accordingly. 10: Enable + * checksum calculation offload (DL) - For output pipe (IPA + * producer) specifies that checksum trailer is to be added. For + * input pipe (IPA consumer) specifies IPA checksum calculation. + * 11: Reserved + * @cs_metadata_hdr_offset: Offset in Words (4 bytes) within header in which + * checksum meta info header (4 bytes) starts (UL). Values are 0-15, which + * mean 0 - 60 byte checksum header offset. Valid for input + * pipes only (IPA consumer) + * @gen_qmb_master_sel: Select bit for ENDP GEN-QMB master. This is used to + * separate DDR & PCIe transactions in-order to limit them as + * a group (using MAX_WRITES/READS limiation). Valid for input and + * output pipes (IPA consumer+producer) + */ +struct ipa_ep_cfg_cfg { + bool frag_offload_en; + enum ipa_cs_offload cs_offload_en; + u8 cs_metadata_hdr_offset; + u8 gen_qmb_master_sel; +}; + +/** + * struct ipa_ep_cfg_metadata_mask - Endpoint initialization hdr metadata mask + * @metadata_mask: Mask specifying which metadata bits to write to + * IPA_ENDP_INIT_HDR_n.s HDR_OFST_METADATA. Only + * masked metadata bits (set to 1) will be written. Valid for Output + * Pipes only (IPA Producer) + */ +struct ipa_ep_cfg_metadata_mask { + u32 metadata_mask; +}; + +/** + * struct ipa_ep_cfg_metadata - Meta Data configuration in IPA end-point + * @md: This defines the meta data from tx data descriptor + * @qmap_id: qmap id + */ +struct ipa_ep_cfg_metadata { + u32 qmap_id; +}; + +/** + * struct ipa_ep_cfg_seq - HPS/DPS sequencer type configuration in IPA end-point + * @set_dynamic: 0 - HPS/DPS seq type is configured statically, + * 1 - HPS/DPS seq type is set to seq_type + * @seq_type: HPS/DPS sequencer type configuration + */ +struct ipa_ep_cfg_seq { + bool set_dynamic; + int seq_type; +}; + +/** + * struct ipa_ep_cfg - configuration of IPA end-point + * @nat: NAT parmeters + * @conn_track: IPv6CT parmeters + * @hdr: Header parameters + * @hdr_ext: Extended header parameters + * @mode: Mode parameters + * @aggr: Aggregation parameters + * @deaggr: Deaggregation params + * @route: Routing parameters + * @cfg: Configuration register data + * @metadata_mask: Hdr metadata mask + * @meta: Meta Data + * @seq: HPS/DPS sequencers configuration + */ +struct ipa_ep_cfg { + struct ipa_ep_cfg_nat nat; + struct ipa_ep_cfg_conn_track conn_track; + struct ipa_ep_cfg_hdr hdr; + struct ipa_ep_cfg_hdr_ext hdr_ext; + struct ipa_ep_cfg_mode mode; + struct ipa_ep_cfg_aggr aggr; + struct ipa_ep_cfg_deaggr deaggr; + struct ipa_ep_cfg_route route; + struct ipa_ep_cfg_cfg cfg; + struct ipa_ep_cfg_metadata_mask metadata_mask; + struct ipa_ep_cfg_metadata meta; + struct ipa_ep_cfg_seq seq; +}; + +/** + * struct ipa_ep_cfg_ctrl - Control configuration in IPA end-point + * @ipa_ep_suspend: 0 - ENDP is enabled, 1 - ENDP is suspended (disabled). + * Valid for PROD Endpoints + * @ipa_ep_delay: 0 - ENDP is free-running, 1 - ENDP is delayed. + * SW controls the data flow of an endpoint usind this bit. + * Valid for CONS Endpoints + */ +struct ipa_ep_cfg_ctrl { + bool ipa_ep_suspend; + bool ipa_ep_delay; +}; + +/** + * x should be in bytes + */ +#define IPA_NUM_OF_FIFO_DESC(x) (x/sizeof(struct sps_iovec)) +typedef void (*ipa_notify_cb)(void *priv, enum ipa_dp_evt_type evt, + unsigned long data); + +/** + * enum ipa_wdi_meter_evt_type - type of event client callback is + * for AP+STA mode metering + * @IPA_GET_WDI_SAP_STATS: get IPA_stats betwen SAP and STA - + * use ipa_get_wdi_sap_stats structure + * @IPA_SET_WIFI_QUOTA: set quota limit on STA - + * use ipa_set_wifi_quota structure + */ +enum ipa_wdi_meter_evt_type { + IPA_GET_WDI_SAP_STATS, + IPA_SET_WIFI_QUOTA, +}; + +struct ipa_get_wdi_sap_stats { + /* indicate to reset stats after query */ + uint8_t reset_stats; + /* indicate valid stats from wlan-fw */ + uint8_t stats_valid; + /* Tx: SAP->STA */ + uint64_t ipv4_tx_packets; + uint64_t ipv4_tx_bytes; + /* Rx: STA->SAP */ + uint64_t ipv4_rx_packets; + uint64_t ipv4_rx_bytes; + uint64_t ipv6_tx_packets; + uint64_t ipv6_tx_bytes; + uint64_t ipv6_rx_packets; + uint64_t ipv6_rx_bytes; +}; + +/** + * struct ipa_set_wifi_quota - structure used for + * IPA_SET_WIFI_QUOTA. + * + * @quota_bytes: Quota (in bytes) for the STA interface. + * @set_quota: Indicate whether to set the quota (use 1) or + * unset the quota. + * + */ +struct ipa_set_wifi_quota { + uint64_t quota_bytes; + uint8_t set_quota; + /* indicate valid quota set from wlan-fw */ + uint8_t set_valid; +}; + +typedef void (*ipa_wdi_meter_notifier_cb)(enum ipa_wdi_meter_evt_type evt, + void *data); + + +/** + * struct ipa_tx_intf - interface tx properties + * @num_props: number of tx properties + * @prop: the tx properties array + */ +struct ipa_tx_intf { + u32 num_props; + struct ipa_ioc_tx_intf_prop *prop; +}; + +/** + * struct ipa_rx_intf - interface rx properties + * @num_props: number of rx properties + * @prop: the rx properties array + */ +struct ipa_rx_intf { + u32 num_props; + struct ipa_ioc_rx_intf_prop *prop; +}; + +/** + * struct ipa_ext_intf - interface ext properties + * @excp_pipe_valid: is next field valid? + * @excp_pipe: exception packets should be routed to this pipe + * @num_props: number of ext properties + * @prop: the ext properties array + */ +struct ipa_ext_intf { + bool excp_pipe_valid; + enum ipa_client_type excp_pipe; + u32 num_props; + struct ipa_ioc_ext_intf_prop *prop; +}; + +/** + * struct ipa_sys_connect_params - information needed to setup an IPA end-point + * in system-BAM mode + * @ipa_ep_cfg: IPA EP configuration + * @client: the type of client who "owns" the EP + * @desc_fifo_sz: size of desc FIFO. This number is used to allocate the desc + * fifo for BAM. For GSI, this size is used by IPA driver as a + * baseline to calculate the GSI ring size in the following way: + * For PROD pipes, GSI ring is 4 * desc_fifo_sz. + For PROD pipes, GSI ring is 2 * desc_fifo_sz. + * @priv: callback cookie + * @notify: callback + * priv - callback cookie + * evt - type of event + * data - data relevant to event. May not be valid. See event_type + * enum for valid cases. + * @skip_ep_cfg: boolean field that determines if EP should be configured + * by IPA driver + * @keep_ipa_awake: when true, IPA will not be clock gated + * @napi_enabled: when true, IPA call client callback to start polling + */ +struct ipa_sys_connect_params { + struct ipa_ep_cfg ipa_ep_cfg; + enum ipa_client_type client; + u32 desc_fifo_sz; + void *priv; + ipa_notify_cb notify; + bool skip_ep_cfg; + bool keep_ipa_awake; + bool napi_enabled; + bool recycle_enabled; +}; + +/** + * struct ipa_tx_meta - meta-data for the TX packet + * @dma_address: dma mapped address of TX packet + * @dma_address_valid: is above field valid? + */ +struct ipa_tx_meta { + u8 pkt_init_dst_ep; + bool pkt_init_dst_ep_valid; + bool pkt_init_dst_ep_remote; + dma_addr_t dma_address; + bool dma_address_valid; +}; + +/** + * typedef ipa_msg_free_fn - callback function + * @param buff - [in] the message payload to free + * @param len - [in] size of message payload + * @param type - [in] the message type + * + * Message callback registered by kernel client with IPA driver to + * free message payload after IPA driver processing is complete + * + * No return value + */ +typedef void (*ipa_msg_free_fn)(void *buff, u32 len, u32 type); + +/** + * typedef ipa_msg_pull_fn - callback function + * @param buff - [in] where to copy message payload + * @param len - [in] size of buffer to copy payload into + * @param type - [in] the message type + * + * Message callback registered by kernel client with IPA driver for + * IPA driver to pull messages from the kernel client upon demand from + * user-space + * + * Returns how many bytes were copied into the buffer. + */ +typedef int (*ipa_msg_pull_fn)(void *buff, u32 len, u32 type); + +/** + * enum ipa_voltage_level - IPA Voltage levels + */ +enum ipa_voltage_level { + IPA_VOLTAGE_UNSPECIFIED, + IPA_VOLTAGE_SVS = IPA_VOLTAGE_UNSPECIFIED, + IPA_VOLTAGE_NOMINAL, + IPA_VOLTAGE_TURBO, + IPA_VOLTAGE_MAX, +}; + +/** + * enum ipa_rm_event - IPA RM events + * + * Indicate the resource state change + */ +enum ipa_rm_event { + IPA_RM_RESOURCE_GRANTED, + IPA_RM_RESOURCE_RELEASED +}; + +typedef void (*ipa_rm_notify_cb)(void *user_data, + enum ipa_rm_event event, + unsigned long data); +/** + * struct ipa_rm_register_params - information needed to + * register IPA RM client with IPA RM + * + * @user_data: IPA RM client provided information + * to be passed to notify_cb callback below + * @notify_cb: callback which is called by resource + * to notify the IPA RM client about its state + * change IPA RM client is expected to perform non + * blocking operations only in notify_cb and + * release notification context as soon as + * possible. + */ +struct ipa_rm_register_params { + void *user_data; + ipa_rm_notify_cb notify_cb; +}; + +/** + * struct ipa_rm_create_params - information needed to initialize + * the resource + * @name: resource name + * @floor_voltage: floor voltage needed for client to operate in maximum + * bandwidth. + * @reg_params: register parameters, contains are ignored + * for consumer resource NULL should be provided + * for consumer resource + * @request_resource: function which should be called to request resource, + * NULL should be provided for producer resource + * @release_resource: function which should be called to release resource, + * NULL should be provided for producer resource + * + * IPA RM client is expected to perform non blocking operations only + * in request_resource and release_resource functions and + * release notification context as soon as possible. + */ +struct ipa_rm_create_params { + enum ipa_rm_resource_name name; + enum ipa_voltage_level floor_voltage; + struct ipa_rm_register_params reg_params; + int (*request_resource)(void); + int (*release_resource)(void); +}; + +/** + * struct ipa_rm_perf_profile - information regarding IPA RM client performance + * profile + * + * @max_bandwidth_mbps: maximum bandwidth need of the client in Mbps + */ +struct ipa_rm_perf_profile { + u32 max_supported_bandwidth_mbps; +}; + +#define A2_MUX_HDR_NAME_V4_PREF "dmux_hdr_v4_" +#define A2_MUX_HDR_NAME_V6_PREF "dmux_hdr_v6_" + +/** + * enum teth_tethering_mode - Tethering mode (Rmnet / MBIM) + */ +enum teth_tethering_mode { + TETH_TETHERING_MODE_RMNET, + TETH_TETHERING_MODE_MBIM, + TETH_TETHERING_MODE_MAX, +}; + +/** + * teth_bridge_init_params - Parameters used for in/out USB API + * @usb_notify_cb: Callback function which should be used by the caller. + * Output parameter. + * @private_data: Data for the callback function. Should be used by the + * caller. Output parameter. + * @skip_ep_cfg: boolean field that determines if Apps-processor + * should or should not confiugre this end-point. + */ +struct teth_bridge_init_params { + ipa_notify_cb usb_notify_cb; + void *private_data; + enum ipa_client_type client; + bool skip_ep_cfg; +}; + +/** + * struct teth_bridge_connect_params - Parameters used in teth_bridge_connect() + * @ipa_usb_pipe_hdl: IPA to USB pipe handle, returned from ipa_connect() + * @usb_ipa_pipe_hdl: USB to IPA pipe handle, returned from ipa_connect() + * @tethering_mode: Rmnet or MBIM + * @ipa_client_type: IPA "client" name (IPA_CLIENT_USB#_PROD) + */ +struct teth_bridge_connect_params { + u32 ipa_usb_pipe_hdl; + u32 usb_ipa_pipe_hdl; + enum teth_tethering_mode tethering_mode; + enum ipa_client_type client_type; +}; + +/** + * struct ipa_tx_data_desc - information needed + * to send data packet to HW link: link to data descriptors + * priv: client specific private data + * @pyld_buffer: pointer to the data buffer that holds frame + * @pyld_len: length of the data packet + */ +struct ipa_tx_data_desc { + struct list_head link; + void *priv; + void *pyld_buffer; + u16 pyld_len; +}; + +/** + * struct ipa_rx_data - information needed + * to send to wlan driver on receiving data from ipa hw + * @skb: skb + * @dma_addr: DMA address of this Rx packet + */ +struct ipa_rx_data { + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +/** + * enum ipa_irq_type - IPA Interrupt Type + * Used to register handlers for IPA interrupts + * + * Below enum is a logical mapping and not the actual interrupt bit in HW + */ +enum ipa_irq_type { + IPA_BAD_SNOC_ACCESS_IRQ, + IPA_EOT_COAL_IRQ, + IPA_UC_IRQ_0, + IPA_UC_IRQ_1, + IPA_UC_IRQ_2, + IPA_UC_IRQ_3, + IPA_UC_IN_Q_NOT_EMPTY_IRQ, + IPA_UC_RX_CMD_Q_NOT_FULL_IRQ, + IPA_UC_TX_CMD_Q_NOT_FULL_IRQ, + IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ, + IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ, + IPA_RX_ERR_IRQ, + IPA_DEAGGR_ERR_IRQ, + IPA_TX_ERR_IRQ, + IPA_STEP_MODE_IRQ, + IPA_PROC_ERR_IRQ, + IPA_TX_SUSPEND_IRQ, + IPA_TX_HOLB_DROP_IRQ, + IPA_BAM_IDLE_IRQ, + IPA_GSI_IDLE_IRQ = IPA_BAM_IDLE_IRQ, + IPA_IRQ_MAX +}; + +/** + * struct ipa_tx_suspend_irq_data - interrupt data for IPA_TX_SUSPEND_IRQ + * @endpoints: bitmask of endpoints which case IPA_TX_SUSPEND_IRQ interrupt + * @dma_addr: DMA address of this Rx packet + */ +struct ipa_tx_suspend_irq_data { + u32 endpoints; +}; + + +/** + * typedef ipa_irq_handler_t - irq handler/callback type + * @param ipa_irq_type - [in] interrupt type + * @param private_data - [in, out] the client private data + * @param interrupt_data - [out] interrupt information data + * + * callback registered by ipa_add_interrupt_handler function to + * handle a specific interrupt type + * + * No return value + */ +typedef void (*ipa_irq_handler_t)(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data); + +/** + * struct IpaHwBamStats_t - Structure holding the BAM statistics + * + * @bamFifoFull : Number of times Bam Fifo got full - For In Ch: Good, + * For Out Ch: Bad + * @bamFifoEmpty : Number of times Bam Fifo got empty - For In Ch: Bad, + * For Out Ch: Good + * @bamFifoUsageHigh : Number of times Bam fifo usage went above 75% - + * For In Ch: Good, For Out Ch: Bad + * @bamFifoUsageLow : Number of times Bam fifo usage went below 25% - + * For In Ch: Bad, For Out Ch: Good + */ +struct IpaHwBamStats_t { + u32 bamFifoFull; + u32 bamFifoEmpty; + u32 bamFifoUsageHigh; + u32 bamFifoUsageLow; + u32 bamUtilCount; +} __packed; + +/** + * struct IpaHwRingStats_t - Structure holding the Ring statistics + * + * @ringFull : Number of times Transfer Ring got full - For In Ch: Good, + * For Out Ch: Bad + * @ringEmpty : Number of times Transfer Ring got empty - For In Ch: Bad, + * For Out Ch: Good + * @ringUsageHigh : Number of times Transfer Ring usage went above 75% - + * For In Ch: Good, For Out Ch: Bad + * @ringUsageLow : Number of times Transfer Ring usage went below 25% - + * For In Ch: Bad, For Out Ch: Good + */ +struct IpaHwRingStats_t { + u32 ringFull; + u32 ringEmpty; + u32 ringUsageHigh; + u32 ringUsageLow; + u32 RingUtilCount; +} __packed; + +/** + * struct IpaHwStatsWDIRxInfoData_t - Structure holding the WDI Rx channel + * structures + * + * @max_outstanding_pkts : Number of outstanding packets in Rx Ring + * @num_pkts_processed : Number of packets processed - cumulative + * @rx_ring_rp_value : Read pointer last advertized to the WLAN FW + * @rx_ind_ring_stats : Ring info + * @bam_stats : BAM info + * @num_bam_int_handled : Number of Bam Interrupts handled by FW + * @num_db : Number of times the doorbell was rung + * @num_unexpected_db : Number of unexpected doorbells + * @num_pkts_in_dis_uninit_state : number of completions we + * received in disabled or uninitialized state + * @num_ic_inj_vdev_change : Number of times the Imm Cmd is + * injected due to vdev_id change + * @num_ic_inj_fw_desc_change : Number of times the Imm Cmd is + * injected due to fw_desc change + * @num_qmb_int_handled : Number of QMB interrupts handled + */ +struct IpaHwStatsWDIRxInfoData_t { + u32 max_outstanding_pkts; + u32 num_pkts_processed; + u32 rx_ring_rp_value; + struct IpaHwRingStats_t rx_ind_ring_stats; + struct IpaHwBamStats_t bam_stats; + u32 num_bam_int_handled; + u32 num_db; + u32 num_unexpected_db; + u32 num_pkts_in_dis_uninit_state; + u32 num_ic_inj_vdev_change; + u32 num_ic_inj_fw_desc_change; + u32 num_qmb_int_handled; + u32 reserved1; + u32 reserved2; +} __packed; + +/** + * struct IpaHwStatsWDITxInfoData_t - Structure holding the WDI Tx channel + * structures + * + * @num_pkts_processed : Number of packets processed - cumulative + * @copy_engine_doorbell_value : latest value of doorbell written to copy engine + * @num_db_fired : Number of DB from uC FW to Copy engine + * @tx_comp_ring_stats : ring info + * @bam_stats : BAM info + * @num_db : Number of times the doorbell was rung + * @num_unexpected_db : Number of unexpected doorbells + * @num_bam_int_handled : Number of Bam Interrupts handled by FW + * @num_bam_int_in_non_running_state : Number of Bam interrupts while not in + * Running state + * @num_qmb_int_handled : Number of QMB interrupts handled + */ +struct IpaHwStatsWDITxInfoData_t { + u32 num_pkts_processed; + u32 copy_engine_doorbell_value; + u32 num_db_fired; + struct IpaHwRingStats_t tx_comp_ring_stats; + struct IpaHwBamStats_t bam_stats; + u32 num_db; + u32 num_unexpected_db; + u32 num_bam_int_handled; + u32 num_bam_int_in_non_running_state; + u32 num_qmb_int_handled; + u32 num_bam_int_handled_while_wait_for_bam; +} __packed; + +/** + * struct IpaHwStatsWDIInfoData_t - Structure holding the WDI channel structures + * + * @rx_ch_stats : RX stats + * @tx_ch_stats : TX stats + */ +struct IpaHwStatsWDIInfoData_t { + struct IpaHwStatsWDIRxInfoData_t rx_ch_stats; + struct IpaHwStatsWDITxInfoData_t tx_ch_stats; +} __packed; + + +/** + * struct ipa_wdi_ul_params - WDI_RX configuration + * @rdy_ring_base_pa: physical address of the base of the Rx ring (containing + * Rx buffers) + * @rdy_ring_size: size of the Rx ring in bytes + * @rdy_ring_rp_pa: physical address of the location through which IPA uc is + * reading (WDI-1.0) + * @rdy_comp_ring_base_pa: physical address of the base of the Rx completion + * ring (WDI-2.0) + * @rdy_comp_ring_wp_pa: physical address of the location through which IPA + * uc is writing (WDI-2.0) + * @rdy_comp_ring_size: size of the Rx_completion ring in bytes + * expected to communicate about the Read pointer into the Rx Ring + */ +struct ipa_wdi_ul_params { + phys_addr_t rdy_ring_base_pa; + u32 rdy_ring_size; + phys_addr_t rdy_ring_rp_pa; + phys_addr_t rdy_comp_ring_base_pa; + phys_addr_t rdy_comp_ring_wp_pa; + u32 rdy_comp_ring_size; + u32 *rdy_ring_rp_va; + u32 *rdy_comp_ring_wp_va; +}; + +/** + * struct ipa_wdi_ul_params_smmu - WDI_RX configuration (with WLAN SMMU) + * @rdy_ring: SG table describing the Rx ring (containing Rx buffers) + * @rdy_ring_size: size of the Rx ring in bytes + * @rdy_ring_rp_pa: physical address of the location through which IPA uc is + * expected to communicate about the Read pointer into the Rx Ring + */ +struct ipa_wdi_ul_params_smmu { + struct sg_table rdy_ring; + u32 rdy_ring_size; + phys_addr_t rdy_ring_rp_pa; + struct sg_table rdy_comp_ring; + phys_addr_t rdy_comp_ring_wp_pa; + u32 rdy_comp_ring_size; + u32 *rdy_ring_rp_va; + u32 *rdy_comp_ring_wp_va; +}; + +/** + * struct ipa_wdi_dl_params - WDI_TX configuration + * @comp_ring_base_pa: physical address of the base of the Tx completion ring + * @comp_ring_size: size of the Tx completion ring in bytes + * @ce_ring_base_pa: physical address of the base of the Copy Engine Source + * Ring + * @ce_door_bell_pa: physical address of the doorbell that the IPA uC has to + * write into to trigger the copy engine + * @ce_ring_size: Copy Engine Ring size in bytes + * @num_tx_buffers: Number of pkt buffers allocated + */ +struct ipa_wdi_dl_params { + phys_addr_t comp_ring_base_pa; + u32 comp_ring_size; + phys_addr_t ce_ring_base_pa; + phys_addr_t ce_door_bell_pa; + u32 ce_ring_size; + u32 num_tx_buffers; +}; + +/** + * struct ipa_wdi_dl_params_smmu - WDI_TX configuration (with WLAN SMMU) + * @comp_ring: SG table describing the Tx completion ring + * @comp_ring_size: size of the Tx completion ring in bytes + * @ce_ring: SG table describing the Copy Engine Source Ring + * @ce_door_bell_pa: physical address of the doorbell that the IPA uC has to + * write into to trigger the copy engine + * @ce_ring_size: Copy Engine Ring size in bytes + * @num_tx_buffers: Number of pkt buffers allocated + */ +struct ipa_wdi_dl_params_smmu { + struct sg_table comp_ring; + u32 comp_ring_size; + struct sg_table ce_ring; + phys_addr_t ce_door_bell_pa; + u32 ce_ring_size; + u32 num_tx_buffers; +}; + +/** + * struct ipa_wdi_in_params - information provided by WDI client + * @sys: IPA EP configuration info + * @ul: WDI_RX configuration info + * @dl: WDI_TX configuration info + * @ul_smmu: WDI_RX configuration info when WLAN uses SMMU + * @dl_smmu: WDI_TX configuration info when WLAN uses SMMU + * @smmu_enabled: true if WLAN uses SMMU + * @ipa_wdi_meter_notifier_cb: Get WDI stats and quato info + */ +struct ipa_wdi_in_params { + struct ipa_sys_connect_params sys; + union { + struct ipa_wdi_ul_params ul; + struct ipa_wdi_dl_params dl; + struct ipa_wdi_ul_params_smmu ul_smmu; + struct ipa_wdi_dl_params_smmu dl_smmu; + } u; + bool smmu_enabled; +#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN + ipa_wdi_meter_notifier_cb wdi_notify; +#endif +}; + +enum ipa_upstream_type { + IPA_UPSTEAM_MODEM = 1, + IPA_UPSTEAM_WLAN, + IPA_UPSTEAM_MAX +}; + +/** + * struct ipa_wdi_out_params - information provided to WDI client + * @uc_door_bell_pa: physical address of IPA uc doorbell + * @clnt_hdl: opaque handle assigned to client + */ +struct ipa_wdi_out_params { + phys_addr_t uc_door_bell_pa; + u32 clnt_hdl; +}; + +/** + * struct ipa_wdi_db_params - information provided to retrieve + * physical address of uC doorbell + * @client: type of "client" (IPA_CLIENT_WLAN#_PROD/CONS) + * @uc_door_bell_pa: physical address of IPA uc doorbell + */ +struct ipa_wdi_db_params { + enum ipa_client_type client; + phys_addr_t uc_door_bell_pa; +}; + +/** + * struct ipa_wdi_uc_ready_params - uC ready CB parameters + * @is_uC_ready: uC loaded or not + * @priv : callback cookie + * @notify: callback + */ +typedef void (*ipa_uc_ready_cb)(void *priv); +struct ipa_wdi_uc_ready_params { + bool is_uC_ready; + void *priv; + ipa_uc_ready_cb notify; +}; + +/** + * struct ipa_wdi_buffer_info - address info of a WLAN allocated buffer + * @pa: physical address of the buffer + * @iova: IOVA of the buffer as embedded inside the WDI descriptors + * @size: size in bytes of the buffer + * @result: result of map or unmap operations (out param) + * + * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa + */ +struct ipa_wdi_buffer_info { + phys_addr_t pa; + unsigned long iova; + size_t size; + int result; +}; + +/** + * struct ipa_gsi_ep_config - IPA GSI endpoint configurations + * + * @ipa_ep_num: IPA EP pipe number + * @ipa_gsi_chan_num: GSI channel number + * @ipa_if_tlv: number of IPA_IF TLV + * @ipa_if_aos: number of IPA_IF AOS + * @ee: Execution environment + */ +struct ipa_gsi_ep_config { + int ipa_ep_num; + int ipa_gsi_chan_num; + int ipa_if_tlv; + int ipa_if_aos; + int ee; +}; + +#if defined CONFIG_IPA || defined CONFIG_IPA3 + +/* + * Resume / Suspend + */ +int ipa_reset_endpoint(u32 clnt_hdl); + +/* + * Remove ep delay + */ +int ipa_clear_endpoint_delay(u32 clnt_hdl); + +/* + * Disable ep + */ +int ipa_disable_endpoint(u32 clnt_hdl); + +/* + * Configuration + */ +int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg); + +int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg); + +int ipa_cfg_ep_conn_track(u32 clnt_hdl, + const struct ipa_ep_cfg_conn_track *ep_conn_track); + +int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg); + +int ipa_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg); + +int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg); + +int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg); + +int ipa_cfg_ep_deaggr(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ipa_ep_cfg); + +int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg); + +int ipa_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg); + +int ipa_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg); + +int ipa_cfg_ep_metadata_mask(u32 clnt_hdl, const struct ipa_ep_cfg_metadata_mask + *ipa_ep_cfg); + +int ipa_cfg_ep_holb_by_client(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ipa_ep_cfg); + +int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl); + +/* + * Header removal / addition + */ +int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs); + +int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls); + +int ipa_commit_hdr(void); + +int ipa_reset_hdr(void); + +int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup); + +int ipa_put_hdr(u32 hdr_hdl); + +int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy); + +/* + * Header Processing Context + */ +int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs); + +int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); + +/* + * Routing + */ +int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules); + +int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls); + +int ipa_commit_rt(enum ipa_ip_type ip); + +int ipa_reset_rt(enum ipa_ip_type ip); + +int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup); + +int ipa_put_rt_tbl(u32 rt_tbl_hdl); + +int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in); + +int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules); + +/* + * Filtering + */ +int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules); + +int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls); + +int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules); + +int ipa_commit_flt(enum ipa_ip_type ip); + +int ipa_reset_flt(enum ipa_ip_type ip); + +/* + * NAT + */ +int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem); + +int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init); + +int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma); + +int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del); + +/* + * Messaging + */ +int ipa_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback); +int ipa_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback); +int ipa_deregister_pull_msg(struct ipa_msg_meta *meta); + +/* + * Interface + */ +int ipa_register_intf(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx); +int ipa_register_intf_ext(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext); +int ipa_deregister_intf(const char *name); + +/* + * Aggregation + */ +int ipa_set_aggr_mode(enum ipa_aggr_mode mode); + +int ipa_set_qcncm_ndp_sig(char sig[3]); + +int ipa_set_single_ndp_per_mbim(bool enable); + +/* + * Data path + */ +int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *metadata); + +/* + * To transfer multiple data packets + * While passing the data descriptor list, the anchor node + * should be of type struct ipa_tx_data_desc not list_head + */ +int ipa_tx_dp_mul(enum ipa_client_type dst, + struct ipa_tx_data_desc *data_desc); + +void ipa_free_skb(struct ipa_rx_data *data); +int ipa_rx_poll(u32 clnt_hdl, int budget); +void ipa_recycle_wan_skb(struct sk_buff *skb); + +/* + * System pipes + */ +int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl); + +int ipa_teardown_sys_pipe(u32 clnt_hdl); + +int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out); +int ipa_disconnect_wdi_pipe(u32 clnt_hdl); +int ipa_enable_wdi_pipe(u32 clnt_hdl); +int ipa_disable_wdi_pipe(u32 clnt_hdl); +int ipa_resume_wdi_pipe(u32 clnt_hdl); +int ipa_suspend_wdi_pipe(u32 clnt_hdl); +int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats); +u16 ipa_get_smem_restr_bytes(void); +int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid, + uint64_t num_bytes); + +/* + * To retrieve doorbell physical address of + * wlan pipes + */ +int ipa_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out); + +/* + * To register uC ready callback if uC not ready + * and also check uC readiness + * if uC not ready only, register callback + */ +int ipa_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param); +/* + * To de-register uC ready callback + */ +int ipa_uc_dereg_rdyCB(void); + +int ipa_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); +int ipa_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); + +/* + * Resource manager + */ +int ipa_rm_create_resource(struct ipa_rm_create_params *create_params); + +int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name); + +int ipa_rm_register(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params); + +int ipa_rm_deregister(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params); + +int ipa_rm_set_perf_profile(enum ipa_rm_resource_name resource_name, + struct ipa_rm_perf_profile *profile); + +int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name); + +int ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name); + +int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name); + +int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name); + +int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name); + +int ipa_rm_notify_completion(enum ipa_rm_event event, + enum ipa_rm_resource_name resource_name); + +int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name, + unsigned long msecs); + +int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name); + +int ipa_rm_inactivity_timer_request_resource( + enum ipa_rm_resource_name resource_name); + +int ipa_rm_inactivity_timer_release_resource( + enum ipa_rm_resource_name resource_name); + +/* + * Tethering bridge (Rmnet / MBIM) + */ +int teth_bridge_init(struct teth_bridge_init_params *params); + +int teth_bridge_disconnect(enum ipa_client_type client); + +int teth_bridge_connect(struct teth_bridge_connect_params *connect_params); + +/* + * Tethering client info + */ +void ipa_set_client(int index, enum ipacm_client_enum client, bool uplink); + +enum ipacm_client_enum ipa_get_client(int pipe_idx); + +bool ipa_get_client_uplink(int pipe_idx); + +/* + * IPADMA + */ +int ipa_dma_init(void); + +int ipa_dma_enable(void); + +int ipa_dma_disable(void); + +int ipa_dma_sync_memcpy(u64 dest, u64 src, int len); + +int ipa_dma_async_memcpy(u64 dest, u64 src, int len, + void (*user_cb)(void *user1), void *user_param); + +int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len); + +void ipa_dma_destroy(void); + +/* + * mux id + */ +int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in); + +/* + * interrupts + */ +int ipa_add_interrupt_handler(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data); + +int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt); + +int ipa_restore_suspend_handler(void); + +/* + * Miscellaneous + */ +void ipa_bam_reg_dump(void); + +int ipa_get_ep_mapping(enum ipa_client_type client); + +bool ipa_is_ready(void); + +void ipa_proxy_clk_vote(void); +void ipa_proxy_clk_unvote(void); + +enum ipa_hw_type ipa_get_hw_type(void); + +bool ipa_is_client_handle_valid(u32 clnt_hdl); + +enum ipa_client_type ipa_get_client_mapping(int pipe_idx); + +enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(int pipe_idx); + +bool ipa_get_modem_cfg_emb_pipe_flt(void); + +enum ipa_transport_type ipa_get_transport_type(void); + +struct device *ipa_get_dma_dev(void); +struct iommu_domain *ipa_get_smmu_domain(void); + +int ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count); + +const struct ipa_gsi_ep_config *ipa_get_gsi_ep_info + (enum ipa_client_type client); + +int ipa_stop_gsi_channel(u32 clnt_hdl); + +typedef void (*ipa_ready_cb)(void *user_data); + +/** + * ipa_register_ipa_ready_cb() - register a callback to be invoked + * when IPA core driver initialization is complete. + * + * @ipa_ready_cb: CB to be triggered. + * @user_data: Data to be sent to the originator of the CB. + * + * Note: This function is expected to be utilized when ipa_is_ready + * function returns false. + * An IPA client may also use this function directly rather than + * calling ipa_is_ready beforehand, as if this API returns -EEXIST, + * this means IPA initialization is complete (and no callback will + * be triggered). + * When the callback is triggered, the client MUST perform his + * operations in a different context. + * + * The function will return 0 on success, -ENOMEM on memory issues and + * -EEXIST if IPA initialization is complete already. + */ +int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data), + void *user_data); + +#else /* (CONFIG_IPA || CONFIG_IPA3) */ + +/* + * Resume / Suspend + */ +static inline int ipa_reset_endpoint(u32 clnt_hdl) +{ + return -EPERM; +} + +/* + * Remove ep delay + */ +static inline int ipa_clear_endpoint_delay(u32 clnt_hdl) +{ + return -EPERM; +} + +/* + * Disable ep + */ +static inline int ipa_disable_endpoint(u32 clnt_hdl) +{ + return -EPERM; +} + +/* + * Configuration + */ +static inline int ipa_cfg_ep(u32 clnt_hdl, + const struct ipa_ep_cfg *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_nat(u32 clnt_hdl, + const struct ipa_ep_cfg_nat *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_conn_track(u32 clnt_hdl, + const struct ipa_ep_cfg_conn_track *ep_conn_track) +{ + return -EPERM +} + +static inline int ipa_cfg_ep_hdr(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_mode(u32 clnt_hdl, + const struct ipa_ep_cfg_mode *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_aggr(u32 clnt_hdl, + const struct ipa_ep_cfg_aggr *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_deaggr(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_route(u32 clnt_hdl, + const struct ipa_ep_cfg_route *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_holb(u32 clnt_hdl, + const struct ipa_ep_cfg_holb *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_cfg(u32 clnt_hdl, + const struct ipa_ep_cfg_cfg *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_metadata_mask(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg) +{ + return -EPERM; +} + +static inline int ipa_cfg_ep_ctrl(u32 clnt_hdl, + const struct ipa_ep_cfg_ctrl *ep_ctrl) +{ + return -EPERM; +} + +/* + * Header removal / addition + */ +static inline int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs) +{ + return -EPERM; +} + +static inline int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls) +{ + return -EPERM; +} + +static inline int ipa_commit_hdr(void) +{ + return -EPERM; +} + +static inline int ipa_reset_hdr(void) +{ + return -EPERM; +} + +static inline int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup) +{ + return -EPERM; +} + +static inline int ipa_put_hdr(u32 hdr_hdl) +{ + return -EPERM; +} + +static inline int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy) +{ + return -EPERM; +} + +/* + * Header Processing Context + */ +static inline int ipa_add_hdr_proc_ctx( + struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs) +{ + return -EPERM; +} + +static inline int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls) +{ + return -EPERM; +} +/* + * Routing + */ +static inline int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) +{ + return -EPERM; +} + +static inline int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls) +{ + return -EPERM; +} + +static inline int ipa_commit_rt(enum ipa_ip_type ip) +{ + return -EPERM; +} + +static inline int ipa_reset_rt(enum ipa_ip_type ip) +{ + return -EPERM; +} + +static inline int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup) +{ + return -EPERM; +} + +static inline int ipa_put_rt_tbl(u32 rt_tbl_hdl) +{ + return -EPERM; +} + +static inline int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in) +{ + return -EPERM; +} + +static inline int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules) +{ + return -EPERM; +} + +/* + * Filtering + */ +static inline int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) +{ + return -EPERM; +} + +static inline int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls) +{ + return -EPERM; +} + +static inline int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules) +{ + return -EPERM; +} + +static inline int ipa_commit_flt(enum ipa_ip_type ip) +{ + return -EPERM; +} + +static inline int ipa_reset_flt(enum ipa_ip_type ip) +{ + return -EPERM; +} + +/* + * NAT + */ +static inline int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem) +{ + return -EPERM; +} + + +static inline int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) +{ + return -EPERM; +} + + +static inline int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma) +{ + return -EPERM; +} + + +static inline int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del) +{ + return -EPERM; +} + +/* + * Messaging + */ +static inline int ipa_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback) +{ + return -EPERM; +} + +static inline int ipa_register_pull_msg(struct ipa_msg_meta *meta, + ipa_msg_pull_fn callback) +{ + return -EPERM; +} + +static inline int ipa_deregister_pull_msg(struct ipa_msg_meta *meta) +{ + return -EPERM; +} + +/* + * Interface + */ +static inline int ipa_register_intf(const char *name, + const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx) +{ + return -EPERM; +} + +static inline int ipa_register_intf_ext(const char *name, + const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext) +{ + return -EPERM; +} + +static inline int ipa_deregister_intf(const char *name) +{ + return -EPERM; +} + +/* + * Aggregation + */ +static inline int ipa_set_aggr_mode(enum ipa_aggr_mode mode) +{ + return -EPERM; +} + +static inline int ipa_set_qcncm_ndp_sig(char sig[3]) +{ + return -EPERM; +} + +static inline int ipa_set_single_ndp_per_mbim(bool enable) +{ + return -EPERM; +} + +/* + * Data path + */ +static inline int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *metadata) +{ + return -EPERM; +} + +/* + * To transfer multiple data packets + */ +static inline int ipa_tx_dp_mul( + enum ipa_client_type dst, + struct ipa_tx_data_desc *data_desc) +{ + return -EPERM; +} + +static inline void ipa_free_skb(struct ipa_rx_data *rx_in) +{ +} + +static inline int ipa_rx_poll(u32 clnt_hdl, int budget) +{ + return -EPERM; +} + +static inline void ipa_recycle_wan_skb(struct sk_buff *skb) +{ +} + +/* + * System pipes + */ +static inline u16 ipa_get_smem_restr_bytes(void) +{ + return -EPERM; +} + +static inline int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, + u32 *clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_teardown_sys_pipe(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out) +{ + return -EPERM; +} + +static inline int ipa_disconnect_wdi_pipe(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_enable_wdi_pipe(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_disable_wdi_pipe(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_resume_wdi_pipe(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_suspend_wdi_pipe(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid, + uint64_t num_bytes) +{ + return -EPERM; +} + +static inline int ipa_uc_wdi_get_dbpa( + struct ipa_wdi_db_params *out) +{ + return -EPERM; +} + +static inline int ipa_uc_reg_rdyCB( + struct ipa_wdi_uc_ready_params *param) +{ + return -EPERM; +} + +static inline int ipa_uc_dereg_rdyCB(void) +{ + return -EPERM; +} + + +/* + * Resource manager + */ +static inline int ipa_rm_create_resource( + struct ipa_rm_create_params *create_params) +{ + return -EPERM; +} + +static inline int ipa_rm_delete_resource( + enum ipa_rm_resource_name resource_name) +{ + return -EPERM; +} + +static inline int ipa_rm_register(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params) +{ + return -EPERM; +} + +static inline int ipa_rm_set_perf_profile( + enum ipa_rm_resource_name resource_name, + struct ipa_rm_perf_profile *profile) +{ + return -EPERM; +} + +static inline int ipa_rm_deregister(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params) +{ + return -EPERM; +} + +static inline int ipa_rm_add_dependency( + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return -EPERM; +} + +static inline int ipa_rm_add_dependency_sync( + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return -EPERM; +} + +static inline int ipa_rm_delete_dependency( + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return -EPERM; +} + +static inline int ipa_rm_request_resource( + enum ipa_rm_resource_name resource_name) +{ + return -EPERM; +} + +static inline int ipa_rm_release_resource( + enum ipa_rm_resource_name resource_name) +{ + return -EPERM; +} + +static inline int ipa_rm_notify_completion(enum ipa_rm_event event, + enum ipa_rm_resource_name resource_name) +{ + return -EPERM; +} + +static inline int ipa_rm_inactivity_timer_init( + enum ipa_rm_resource_name resource_name, + unsigned long msecs) +{ + return -EPERM; +} + +static inline int ipa_rm_inactivity_timer_destroy( + enum ipa_rm_resource_name resource_name) +{ + return -EPERM; +} + +static inline int ipa_rm_inactivity_timer_request_resource( + enum ipa_rm_resource_name resource_name) +{ + return -EPERM; +} + +static inline int ipa_rm_inactivity_timer_release_resource( + enum ipa_rm_resource_name resource_name) +{ + return -EPERM; +} + +/* + * Tethering bridge (Rmnet / MBIM) + */ +static inline int teth_bridge_init(struct teth_bridge_init_params *params) +{ + return -EPERM; +} + +static inline int teth_bridge_disconnect(enum ipa_client_type client) +{ + return -EPERM; +} + +static inline int teth_bridge_connect(struct teth_bridge_connect_params + *connect_params) +{ + return -EPERM; +} + +/* + * Tethering client info + */ +static inline void ipa_set_client(int index, enum ipacm_client_enum client, + bool uplink) +{ +} + +static inline enum ipacm_client_enum ipa_get_client(int pipe_idx) +{ + return -EPERM; +} + +static inline bool ipa_get_client_uplink(int pipe_idx) +{ + return -EPERM; +} + +/* + * IPADMA + */ +static inline int ipa_dma_init(void) +{ + return -EPERM; +} + +static inline int ipa_dma_enable(void) +{ + return -EPERM; +} + +static inline int ipa_dma_disable(void) +{ + return -EPERM; +} + +static inline int ipa_dma_sync_memcpy(phys_addr_t dest, phys_addr_t src + , int len) +{ + return -EPERM; +} + +static inline int ipa_dma_async_memcpy(phys_addr_t dest, phys_addr_t src + , int len, void (*user_cb)(void *user1), + void *user_param) +{ + return -EPERM; +} + +static inline int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len) +{ + return -EPERM; +} + +static inline void ipa_dma_destroy(void) +{ +} + +/* + * mux id + */ +static inline int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in) +{ + return -EPERM; +} + +/* + * interrupts + */ +static inline int ipa_add_interrupt_handler(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data) +{ + return -EPERM; +} + +static inline int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt) +{ + return -EPERM; +} + +static inline int ipa_restore_suspend_handler(void) +{ + return -EPERM; +} + +/* + * Miscellaneous + */ +static inline void ipa_bam_reg_dump(void) +{ +} + +static inline int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats) +{ + return -EPERM; +} + +static inline int ipa_get_ep_mapping(enum ipa_client_type client) +{ + return -EPERM; +} + +static inline bool ipa_is_ready(void) +{ + return false; +} + +static inline void ipa_proxy_clk_vote(void) +{ +} + +static inline void ipa_proxy_clk_unvote(void) +{ +} + +static inline enum ipa_hw_type ipa_get_hw_type(void) +{ + return IPA_HW_None; +} + +static inline bool ipa_is_client_handle_valid(u32 clnt_hdl) +{ + return -EINVAL; +} + +static inline enum ipa_client_type ipa_get_client_mapping(int pipe_idx) +{ + return -EINVAL; +} + +static inline enum ipa_rm_resource_name ipa_get_rm_resource_from_ep( + int pipe_idx) +{ + return -EFAULT; +} + +static inline bool ipa_get_modem_cfg_emb_pipe_flt(void) +{ + return -EINVAL; +} + +static inline enum ipa_transport_type ipa_get_transport_type(void) +{ + return -EFAULT; +} + +static inline struct device *ipa_get_dma_dev(void) +{ + return NULL; +} + +static inline struct iommu_domain *ipa_get_smmu_domain(void) +{ + return NULL; +} + +static inline int ipa_create_wdi_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info) +{ + return -EINVAL; +} + +static inline int ipa_release_wdi_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info) +{ + return -EINVAL; +} + +static inline int ipa_disable_apps_wan_cons_deaggr(void) +{ + return -EINVAL; +} + +static inline const struct ipa_gsi_ep_config *ipa_get_gsi_ep_info + (enum ipa_client_type client) +{ + return NULL; +} + +static inline int ipa_stop_gsi_channel(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_register_ipa_ready_cb( + void (*ipa_ready_cb)(void *user_data), + void *user_data) +{ + return -EPERM; +} + +#endif /* (CONFIG_IPA || CONFIG_IPA3) */ + +#endif /* _IPA_H_ */ diff --git a/include/linux/ipa_mhi.h b/include/linux/ipa_mhi.h new file mode 100644 index 000000000000..d85f28f7fbf0 --- /dev/null +++ b/include/linux/ipa_mhi.h @@ -0,0 +1,161 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef IPA_MHI_H_ +#define IPA_MHI_H_ + +#include +#include + +/** + * enum ipa_mhi_event_type - event type for mhi callback + * + * @IPA_MHI_EVENT_READY: IPA MHI is ready and IPA uC is loaded. After getting + * this event MHI client is expected to call to ipa_mhi_start() API + * @IPA_MHI_EVENT_DATA_AVAILABLE: downlink data available on MHI channel + */ +enum ipa_mhi_event_type { + IPA_MHI_EVENT_READY, + IPA_MHI_EVENT_DATA_AVAILABLE, + IPA_MHI_EVENT_MAX, +}; + +typedef void (*mhi_client_cb)(void *priv, enum ipa_mhi_event_type event, + unsigned long data); + +/** + * struct ipa_mhi_msi_info - parameters for MSI (Message Signaled Interrupts) + * @addr_low: MSI lower base physical address + * @addr_hi: MSI higher base physical address + * @data: Data Pattern to use when generating the MSI + * @mask: Mask indicating number of messages assigned by the host to device + * + * msi value is written according to this formula: + * ((data & ~mask) | (mmio.msiVec & mask)) + */ +struct ipa_mhi_msi_info { + u32 addr_low; + u32 addr_hi; + u32 data; + u32 mask; +}; + +/** + * struct ipa_mhi_init_params - parameters for IPA MHI initialization API + * + * @msi: MSI (Message Signaled Interrupts) parameters + * @mmio_addr: MHI MMIO physical address + * @first_ch_idx: First channel ID for hardware accelerated channels. + * @first_er_idx: First event ring ID for hardware accelerated channels. + * @assert_bit40: should assert bit 40 in order to access host space. + * if PCIe iATU is configured then not need to assert bit40 + * @notify: client callback + * @priv: client private data to be provided in client callback + * @test_mode: flag to indicate if IPA MHI is in unit test mode + */ +struct ipa_mhi_init_params { + struct ipa_mhi_msi_info msi; + u32 mmio_addr; + u32 first_ch_idx; + u32 first_er_idx; + bool assert_bit40; + mhi_client_cb notify; + void *priv; + bool test_mode; +}; + +/** + * struct ipa_mhi_start_params - parameters for IPA MHI start API + * + * @host_ctrl_addr: Base address of MHI control data structures + * @host_data_addr: Base address of MHI data buffers + * @channel_context_addr: channel context array address in host address space + * @event_context_addr: event context array address in host address space + */ +struct ipa_mhi_start_params { + u32 host_ctrl_addr; + u32 host_data_addr; + u64 channel_context_array_addr; + u64 event_context_array_addr; +}; + +/** + * struct ipa_mhi_connect_params - parameters for IPA MHI channel connect API + * + * @sys: IPA EP configuration info + * @channel_id: MHI channel id + */ +struct ipa_mhi_connect_params { + struct ipa_sys_connect_params sys; + u8 channel_id; +}; + +/* bit #40 in address should be asserted for MHI transfers over pcie */ +#define IPA_MHI_HOST_ADDR(addr) ((addr) | BIT_ULL(40)) + +#if defined CONFIG_IPA || defined CONFIG_IPA3 + +int ipa_mhi_init(struct ipa_mhi_init_params *params); + +int ipa_mhi_start(struct ipa_mhi_start_params *params); + +int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl); + +int ipa_mhi_disconnect_pipe(u32 clnt_hdl); + +int ipa_mhi_suspend(bool force); + +int ipa_mhi_resume(void); + +void ipa_mhi_destroy(void); + +#else /* (CONFIG_IPA || CONFIG_IPA3) */ + +static inline int ipa_mhi_init(struct ipa_mhi_init_params *params) +{ + return -EPERM; +} + +static inline int ipa_mhi_start(struct ipa_mhi_start_params *params) +{ + return -EPERM; +} + +static inline int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, + u32 *clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_mhi_disconnect_pipe(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_mhi_suspend(bool force) +{ + return -EPERM; +} + +static inline int ipa_mhi_resume(void) +{ + return -EPERM; +} + +static inline void ipa_mhi_destroy(void) +{ + +} + +#endif /* (CONFIG_IPA || CONFIG_IPA3) */ + +#endif /* IPA_MHI_H_ */ diff --git a/include/linux/ipa_odu_bridge.h b/include/linux/ipa_odu_bridge.h new file mode 100644 index 000000000000..e7f75b7685c3 --- /dev/null +++ b/include/linux/ipa_odu_bridge.h @@ -0,0 +1,162 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_ODO_BRIDGE_H_ +#define _IPA_ODO_BRIDGE_H_ + +#include + +/** + * struct odu_bridge_params - parameters for odu bridge initialization API + * + * @netdev_name: network interface name + * @priv: private data that will be supplied to client's callback + * @tx_dp_notify: callback for handling SKB. the following event are supported: + * IPA_WRITE_DONE: will be called after client called to odu_bridge_tx_dp() + * Client is expected to free the skb. + * IPA_RECEIVE: will be called for delivering skb to APPS. + * Client is expected to deliver the skb to network stack. + * @send_dl_skb: callback for sending skb on downlink direction to adapter. + * Client is expected to free the skb. + * @device_ethaddr: device Ethernet address in network order. + * @ipa_desc_size: IPA Sys Pipe Desc Size + */ +struct odu_bridge_params { + const char *netdev_name; + void *priv; + ipa_notify_cb tx_dp_notify; + int (*send_dl_skb)(void *priv, struct sk_buff *skb); + u8 device_ethaddr[ETH_ALEN]; + u32 ipa_desc_size; +}; + +/** + * struct ipa_bridge_init_params - parameters for IPA bridge initialization API + * + * @info: structure contains initialization information + * @wakeup_request: callback to client to indicate there is downlink data + * available. Client is expected to call ipa_bridge_resume() to start + * receiving data + */ +struct ipa_bridge_init_params { + struct odu_bridge_params info; + void (*wakeup_request)(void *); +}; + +#ifdef CONFIG_IPA3 + +int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl); + +int ipa_bridge_connect(u32 hdl); + +int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth); + +int ipa_bridge_disconnect(u32 hdl); + +int ipa_bridge_suspend(u32 hdl); + +int ipa_bridge_resume(u32 hdl); + +int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb, + struct ipa_tx_meta *metadata); + +int ipa_bridge_cleanup(u32 hdl); + +#else + +static inline int ipa_bridge_init(struct odu_bridge_params *params, u32 *hdl) +{ + return -EPERM; +} + +static inline int ipa_bridge_connect(u32 hdl) +{ + return -EPERM; +} + +static inline int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth) +{ + return -EPERM; +} + +static inline int ipa_bridge_disconnect(u32 hdl) +{ + return -EPERM; +} + +static inline int ipa_bridge_suspend(u32 hdl) +{ + return -EPERM; +} + +static inline int ipa_bridge_resume(u32 hdl) +{ + return -EPERM; +} + +static inline int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb, +struct ipa_tx_meta *metadata) +{ + return -EPERM; +} + +static inline int ipa_bridge_cleanup(u32 hdl) +{ + return -EPERM; +} + +#endif /* CONFIG_IPA3 */ + +/* Below API is deprecated. Please use the API above */ +# if defined CONFIG_IPA || defined CONFIG_IPA3 + +int odu_bridge_init(struct odu_bridge_params *params); + +int odu_bridge_connect(void); + +int odu_bridge_disconnect(void); + +int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata); + +int odu_bridge_cleanup(void); + +#else + +static inline int odu_bridge_init(struct odu_bridge_params *params) +{ + return -EPERM; +} + +static inline int odu_bridge_disconnect(void) +{ + return -EPERM; +} + +static inline int odu_bridge_connect(void) +{ + return -EPERM; +} + +static inline int odu_bridge_tx_dp(struct sk_buff *skb, + struct ipa_tx_meta *metadata) +{ + return -EPERM; +} + +static inline int odu_bridge_cleanup(void) +{ + return -EPERM; +} + +#endif /* CONFIG_IPA || defined CONFIG_IPA3 */ + +#endif /* _IPA_ODO_BRIDGE_H */ diff --git a/include/linux/ipa_uc_offload.h b/include/linux/ipa_uc_offload.h new file mode 100644 index 000000000000..85d0ce92e6f6 --- /dev/null +++ b/include/linux/ipa_uc_offload.h @@ -0,0 +1,295 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_UC_OFFLOAD_H_ +#define _IPA_UC_OFFLOAD_H_ + +#include + +/** + * enum ipa_uc_offload_proto + * Protocol type: either WDI or Neutrino + * + * @IPA_UC_WDI: wdi Protocol + * @IPA_UC_NTN: Neutrino Protocol + */ +enum ipa_uc_offload_proto { + IPA_UC_INVALID = 0, + IPA_UC_WDI = 1, + IPA_UC_NTN = 2, + IPA_UC_MAX_PROT_SIZE +}; + +/** + * struct ipa_hdr_info - Header to install on IPA HW + * + * @hdr: header to install on IPA HW + * @hdr_len: length of header + * @dst_mac_addr_offset: destination mac address offset + * @hdr_type: layer two header type + */ +struct ipa_hdr_info { + u8 *hdr; + u8 hdr_len; + u8 dst_mac_addr_offset; + enum ipa_hdr_l2_type hdr_type; +}; + +/** + * struct ipa_uc_offload_intf_params - parameters for uC offload + * interface registration + * + * @netdev_name: network interface name + * @notify: callback for exception/embedded packets + * @priv: callback cookie + * @hdr_info: header information + * @meta_data: meta data if any + * @meta_data_mask: meta data mask + * @proto: uC offload protocol type + * @alt_dst_pipe: alternate routing output pipe + */ +struct ipa_uc_offload_intf_params { + const char *netdev_name; + ipa_notify_cb notify; + void *priv; + struct ipa_hdr_info hdr_info[IPA_IP_MAX]; + u8 is_meta_data_valid; + u32 meta_data; + u32 meta_data_mask; + enum ipa_uc_offload_proto proto; + enum ipa_client_type alt_dst_pipe; +}; + +/** + * struct ipa_ntn_setup_info - NTN TX/Rx configuration + * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS) + * @ring_base_pa: physical address of the base of the Tx/Rx ring + * @ntn_ring_size: size of the Tx/Rx ring (in terms of elements) + * @buff_pool_base_pa: physical address of the base of the Tx/Rx + * buffer pool + * @num_buffers: Rx/Tx buffer pool size (in terms of elements) + * @data_buff_size: size of the each data buffer allocated in DDR + * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN Ring's + * tail pointer + */ +struct ipa_ntn_setup_info { + enum ipa_client_type client; + phys_addr_t ring_base_pa; + u32 ntn_ring_size; + + phys_addr_t buff_pool_base_pa; + u32 num_buffers; + u32 data_buff_size; + + phys_addr_t ntn_reg_base_ptr_pa; +}; + +/** + * struct ipa_uc_offload_out_params - out parameters for uC offload + * + * @clnt_hndl: Handle that client need to pass during + * further operations + */ +struct ipa_uc_offload_out_params { + u32 clnt_hndl; +}; + +/** + * struct ipa_ntn_conn_in_params - NTN TX/Rx connect parameters + * @ul: parameters to connect UL pipe(from Neutrino to IPA) + * @dl: parameters to connect DL pipe(from IPA to Neutrino) + */ +struct ipa_ntn_conn_in_params { + struct ipa_ntn_setup_info ul; + struct ipa_ntn_setup_info dl; +}; + +/** + * struct ipa_ntn_conn_out_params - information provided + * to uC offload client + * @ul_uc_db_pa: physical address of IPA uc doorbell for UL + * @dl_uc_db_pa: physical address of IPA uc doorbell for DL + * @clnt_hdl: opaque handle assigned to offload client + */ +struct ipa_ntn_conn_out_params { + phys_addr_t ul_uc_db_pa; + phys_addr_t dl_uc_db_pa; +}; + +/** + * struct ipa_uc_offload_conn_in_params - information provided by + * uC offload client + * @clnt_hndl: Handle that return as part of reg interface + * @proto: Protocol to use for offload data path + * @ntn: uC RX/Tx configuration info + */ +struct ipa_uc_offload_conn_in_params { + u32 clnt_hndl; + union { + struct ipa_ntn_conn_in_params ntn; + } u; +}; + +/** + * struct ipa_uc_offload_conn_out_params - information provided + * to uC offload client + * @ul_uc_db_pa: physical address of IPA uc doorbell for UL + * @dl_uc_db_pa: physical address of IPA uc doorbell for DL + * @clnt_hdl: opaque handle assigned to offload client + */ +struct ipa_uc_offload_conn_out_params { + union { + struct ipa_ntn_conn_out_params ntn; + } u; +}; + +/** + * struct ipa_perf_profile - To set BandWidth profile + * + * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS) + * @max_supported_bw_mbps: maximum bandwidth needed (in Mbps) + */ +struct ipa_perf_profile { + enum ipa_client_type client; + u32 max_supported_bw_mbps; +}; + +/** + * struct ipa_uc_ready_params - uC ready CB parameters + * @is_uC_ready: uC loaded or not + * @priv : callback cookie + * @notify: callback + * @proto: uC offload protocol type + */ +struct ipa_uc_ready_params { + bool is_uC_ready; + void *priv; + ipa_uc_ready_cb notify; + enum ipa_uc_offload_proto proto; +}; + +#if defined CONFIG_IPA || defined CONFIG_IPA3 + +/** + * ipa_uc_offload_reg_intf - Client should call this function to + * init uC offload data path + * + * @init: [in] initialization parameters + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +int ipa_uc_offload_reg_intf( + struct ipa_uc_offload_intf_params *in, + struct ipa_uc_offload_out_params *out); + +/** + * ipa_uc_offload_cleanup - Client Driver should call this + * function before unload and after disconnect + * + * @Return 0 on success, negative on failure + */ +int ipa_uc_offload_cleanup(u32 clnt_hdl); + +/** + * ipa_uc_offload_conn_pipes - Client should call this + * function to connect uC pipe for offload data path + * + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *in, + struct ipa_uc_offload_conn_out_params *out); + +/** + * ipa_uc_offload_disconn_pipes() - Client should call this + * function to disconnect uC pipe to disable offload data path + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +int ipa_uc_offload_disconn_pipes(u32 clnt_hdl); + +/** + * ipa_set_perf_profile() - Client should call this function to + * set IPA clock Band Width based on data rates + * @profile: [in] BandWidth profile to use + * + * Returns: 0 on success, negative on failure + */ +int ipa_set_perf_profile(struct ipa_perf_profile *profile); + + +/* + * To register uC ready callback if uC not ready + * and also check uC readiness + * if uC not ready only, register callback + */ +int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *param); + +/* + * To de-register uC ready callback + */ +void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto); + +#else /* (CONFIG_IPA || CONFIG_IPA3) */ + +static inline int ipa_uc_offload_reg_intf( + struct ipa_uc_offload_intf_params *in, + struct ipa_uc_offload_out_params *out) +{ + return -EPERM; +} + +static inline int ipa_uC_offload_cleanup(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_uc_offload_conn_pipes( + struct ipa_uc_offload_conn_in_params *in, + struct ipa_uc_offload_conn_out_params *out) +{ + return -EPERM; +} + +static inline int ipa_uc_offload_disconn_pipes(u32 clnt_hdl) +{ + return -EPERM; +} + +static inline int ipa_set_perf_profile(struct ipa_perf_profile *profile) +{ + return -EPERM; +} + +static inline int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *param) +{ + return -EPERM; +} + +static void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto) +{ +} + +#endif /* CONFIG_IPA3 */ + +#endif /* _IPA_UC_OFFLOAD_H_ */ diff --git a/include/linux/ipa_usb.h b/include/linux/ipa_usb.h new file mode 100644 index 000000000000..f07b7fa98ee9 --- /dev/null +++ b/include/linux/ipa_usb.h @@ -0,0 +1,333 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IPA_USB_H_ +#define _IPA_USB_H_ + +enum ipa_usb_teth_prot { + IPA_USB_RNDIS = 0, + IPA_USB_ECM = 1, + IPA_USB_RMNET = 2, + IPA_USB_MBIM = 3, + IPA_USB_DIAG = 4, + IPA_USB_MAX_TETH_PROT_SIZE +}; + +/** + * ipa_usb_teth_params - parameters for RDNIS/ECM initialization API + * + * @host_ethaddr: host Ethernet address in network order + * @device_ethaddr: device Ethernet address in network order + */ +struct ipa_usb_teth_params { + u8 host_ethaddr[ETH_ALEN]; + u8 device_ethaddr[ETH_ALEN]; +}; + +enum ipa_usb_notify_event { + IPA_USB_DEVICE_READY, + IPA_USB_REMOTE_WAKEUP, + IPA_USB_SUSPEND_COMPLETED +}; + +enum ipa_usb_max_usb_packet_size { + IPA_USB_HIGH_SPEED_512B = 512, + IPA_USB_SUPER_SPEED_1024B = 1024 +}; + +/** + * ipa_usb_teth_prot_params - parameters for connecting RNDIS + * + * @max_xfer_size_bytes_to_dev: max size of UL packets in bytes + * @max_packet_number_to_dev: max number of UL aggregated packets + * @max_xfer_size_bytes_to_host: max size of DL packets in bytes + * + */ +struct ipa_usb_teth_prot_params { + u32 max_xfer_size_bytes_to_dev; + u32 max_packet_number_to_dev; + u32 max_xfer_size_bytes_to_host; +}; + +/** + * ipa_usb_xdci_connect_params - parameters required to start IN, OUT + * channels, and connect RNDIS/ECM/teth_bridge + * + * @max_pkt_size: high speed or full speed + * @ipa_to_usb_xferrscidx: Transfer Resource Index (XferRscIdx) for IN channel. + * The hardware-assigned transfer resource index for the + * transfer, which was returned in response to the + * Start Transfer command. This field is used for + * "Update Transfer" command. + * Should be 0 =< ipa_to_usb_xferrscidx <= 127. + * @ipa_to_usb_xferrscidx_valid: true if xferRscIdx should be updated for IN + * channel + * @usb_to_ipa_xferrscidx: Transfer Resource Index (XferRscIdx) for OUT channel + * Should be 0 =< usb_to_ipa_xferrscidx <= 127. + * @usb_to_ipa_xferrscidx_valid: true if xferRscIdx should be updated for OUT + * channel + * @teth_prot: tethering protocol + * @teth_prot_params: parameters for connecting the tethering protocol. + * @max_supported_bandwidth_mbps: maximum bandwidth need of the client in Mbps + */ +struct ipa_usb_xdci_connect_params { + enum ipa_usb_max_usb_packet_size max_pkt_size; + u8 ipa_to_usb_xferrscidx; + bool ipa_to_usb_xferrscidx_valid; + u8 usb_to_ipa_xferrscidx; + bool usb_to_ipa_xferrscidx_valid; + enum ipa_usb_teth_prot teth_prot; + struct ipa_usb_teth_prot_params teth_prot_params; + u32 max_supported_bandwidth_mbps; +}; + +/** + * ipa_usb_xdci_chan_scratch - xDCI protocol SW config area of + * channel scratch + * + * @last_trb_addr_iova: Address (iova LSB - based on alignment restrictions) of + * last TRB in queue. Used to identify roll over case + * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregation + * configuration). Must be aligned to max USB Packet Size. + * Should be 1 <= const_buffer_size <= 31. + * @depcmd_low_addr: Used to generate "Update Transfer" command + * @depcmd_hi_addr: Used to generate "Update Transfer" command. + */ +struct ipa_usb_xdci_chan_scratch { + u16 last_trb_addr_iova; + u8 const_buffer_size; + u32 depcmd_low_addr; + u8 depcmd_hi_addr; +}; + +/** + * ipa_usb_xdci_chan_params - xDCI channel related properties + * + * @client: type of "client" + * @ipa_ep_cfg: IPA EP configuration + * @keep_ipa_awake: when true, IPA will not be clock gated + * @teth_prot: tethering protocol for which the channel is created + * @gevntcount_low_addr: GEVNCOUNT low address for event scratch + * @gevntcount_hi_addr: GEVNCOUNT high address for event scratch + * @dir: channel direction + * @xfer_ring_len: length of transfer ring in bytes (must be integral + * multiple of transfer element size - 16B for xDCI) + * @xfer_ring_base_addr: physical base address of transfer ring. Address must be + * aligned to xfer_ring_len rounded to power of two + * @xfer_scratch: parameters for xDCI channel scratch + * @xfer_ring_base_addr_iova: IO virtual address mapped to xfer_ring_base_addr + * @data_buff_base_len: length of data buffer allocated by USB driver + * @data_buff_base_addr: physical base address for the data buffer (where TRBs + * points) + * @data_buff_base_addr_iova: IO virtual address mapped to data_buff_base_addr + * + */ +struct ipa_usb_xdci_chan_params { + /* IPA EP params */ + enum ipa_client_type client; + struct ipa_ep_cfg ipa_ep_cfg; + bool keep_ipa_awake; + enum ipa_usb_teth_prot teth_prot; + /* event ring params */ + u32 gevntcount_low_addr; + u8 gevntcount_hi_addr; + /* transfer ring params */ + enum gsi_chan_dir dir; + u16 xfer_ring_len; + u64 xfer_ring_base_addr; + struct ipa_usb_xdci_chan_scratch xfer_scratch; + u64 xfer_ring_base_addr_iova; + u32 data_buff_base_len; + u64 data_buff_base_addr; + u64 data_buff_base_addr_iova; +}; + +/** + * ipa_usb_chan_out_params - out parameters for channel request + * + * @clnt_hdl: opaque client handle assigned by IPA to client + * @db_reg_phs_addr_lsb: Physical address of doorbell register where the 32 + * LSBs of the doorbell value should be written + * @db_reg_phs_addr_msb: Physical address of doorbell register where the 32 + * MSBs of the doorbell value should be written + * + */ +struct ipa_req_chan_out_params { + u32 clnt_hdl; + u32 db_reg_phs_addr_lsb; + u32 db_reg_phs_addr_msb; +}; + +#ifdef CONFIG_IPA3 + +/** + * ipa_usb_init_teth_prot - Peripheral should call this function to initialize + * RNDIS/ECM/teth_bridge/DPL, prior to calling ipa_usb_xdci_connect() + * + * @usb_teth_type: tethering protocol type + * @teth_params: pointer to tethering protocol parameters. + * Should be struct ipa_usb_teth_params for RNDIS/ECM, + * or NULL for teth_bridge + * @ipa_usb_notify_cb: will be called to notify USB driver on certain events + * @user_data: cookie used for ipa_usb_notify_cb + * + * @Return 0 on success, negative on failure + */ +int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot, + struct ipa_usb_teth_params *teth_params, + int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, + void *), + void *user_data); + +/** + * ipa_usb_xdci_connect - Peripheral should call this function to start IN & + * OUT xDCI channels, and connect RNDIS/ECM/MBIM/RMNET. + * For DPL, only starts IN channel. + * + * @ul_chan_params: parameters for allocating UL xDCI channel. containing + * required info on event and transfer rings, and IPA EP + * configuration + * @ul_out_params: [out] opaque client handle assigned by IPA to client & DB + * registers physical address for UL channel + * @dl_chan_params: parameters for allocating DL xDCI channel. containing + * required info on event and transfer rings, and IPA EP + * configuration + * @dl_out_params: [out] opaque client handle assigned by IPA to client & DB + * registers physical address for DL channel + * @connect_params: handles and scratch params of the required channels, + * tethering protocol and the tethering protocol parameters. + * + * Note: Should not be called from atomic context + * + * @Return 0 on success, negative on failure + */ +int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params, + struct ipa_usb_xdci_chan_params *dl_chan_params, + struct ipa_req_chan_out_params *ul_out_params, + struct ipa_req_chan_out_params *dl_out_params, + struct ipa_usb_xdci_connect_params *connect_params); + +/** + * ipa_usb_xdci_disconnect - Peripheral should call this function to stop + * IN & OUT xDCI channels + * For DPL, only stops IN channel. + * + * @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect() + * for OUT channel + * @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect() + * for IN channel + * @teth_prot: tethering protocol + * + * Note: Should not be called from atomic context + * + * @Return 0 on success, negative on failure + */ +int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot); + +/** + * ipa_usb_deinit_teth_prot - Peripheral should call this function to deinit + * RNDIS/ECM/MBIM/RMNET + * + * @teth_prot: tethering protocol + * + * @Return 0 on success, negative on failure + */ +int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot); + +/** + * ipa_usb_xdci_suspend - Peripheral should call this function to suspend + * IN & OUT or DPL xDCI channels + * + * @ul_clnt_hdl: client handle previously obtained from + * ipa_usb_xdci_connect() for OUT channel + * @dl_clnt_hdl: client handle previously obtained from + * ipa_usb_xdci_connect() for IN channel + * @teth_prot: tethering protocol + * @with_remote_wakeup: Does host support remote wakeup? + * + * Note: Should not be called from atomic context + * Note: for DPL, the ul will be ignored as irrelevant + * + * @Return 0 on success, negative on failure + */ +int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot, + bool with_remote_wakeup); + +/** + * ipa_usb_xdci_resume - Peripheral should call this function to resume + * IN & OUT or DPL xDCI channels + * + * @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect() + * for OUT channel + * @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect() + * for IN channel + * @teth_prot: tethering protocol + * + * Note: Should not be called from atomic context + * Note: for DPL, the ul will be ignored as irrelevant + * + * @Return 0 on success, negative on failure + */ +int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot); + +#else /* CONFIG_IPA3 */ + +static inline int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot, + struct ipa_usb_teth_params *teth_params, + int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, + void *), + void *user_data) +{ + return -EPERM; +} + +static inline int ipa_usb_xdci_connect( + struct ipa_usb_xdci_chan_params *ul_chan_params, + struct ipa_usb_xdci_chan_params *dl_chan_params, + struct ipa_req_chan_out_params *ul_out_params, + struct ipa_req_chan_out_params *dl_out_params, + struct ipa_usb_xdci_connect_params *connect_params) +{ + return -EPERM; +} + +static inline int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + return -EPERM; +} + +static inline int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot) +{ + return -EPERM; +} + +static inline int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot, + bool with_remote_wakeup) +{ + return -EPERM; +} + +static inline int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + return -EPERM; +} + + +#endif /* CONFIG_IPA3 */ + +#endif /* _IPA_USB_H_ */ diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h new file mode 100644 index 000000000000..ebca44694898 --- /dev/null +++ b/include/linux/msm_gsi.h @@ -0,0 +1,1294 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef MSM_GSI_H +#define MSM_GSI_H +#include + +enum gsi_ver { + GSI_VER_ERR = 0, + GSI_VER_1_0 = 1, + GSI_VER_1_2 = 2, + GSI_VER_1_3 = 3, + GSI_VER_2_0 = 4, + GSI_VER_MAX, +}; + +enum gsi_status { + GSI_STATUS_SUCCESS = 0, + GSI_STATUS_ERROR = 1, + GSI_STATUS_RING_INSUFFICIENT_SPACE = 2, + GSI_STATUS_RING_EMPTY = 3, + GSI_STATUS_RES_ALLOC_FAILURE = 4, + GSI_STATUS_BAD_STATE = 5, + GSI_STATUS_INVALID_PARAMS = 6, + GSI_STATUS_UNSUPPORTED_OP = 7, + GSI_STATUS_NODEV = 8, + GSI_STATUS_POLL_EMPTY = 9, + GSI_STATUS_EVT_RING_INCOMPATIBLE = 10, + GSI_STATUS_TIMED_OUT = 11, + GSI_STATUS_AGAIN = 12, +}; + +enum gsi_per_evt { + GSI_PER_EVT_GLOB_ERROR, + GSI_PER_EVT_GLOB_GP1, + GSI_PER_EVT_GLOB_GP2, + GSI_PER_EVT_GLOB_GP3, + GSI_PER_EVT_GENERAL_BREAK_POINT, + GSI_PER_EVT_GENERAL_BUS_ERROR, + GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW, + GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW, +}; + +/** + * gsi_per_notify - Peripheral callback info + * + * @user_data: cookie supplied in gsi_register_device + * @evt_id: type of notification + * @err_desc: error related information + * + */ +struct gsi_per_notify { + void *user_data; + enum gsi_per_evt evt_id; + union { + uint16_t err_desc; + } data; +}; + +enum gsi_intr_type { + GSI_INTR_MSI = 0x0, + GSI_INTR_IRQ = 0x1 +}; + + +/** + * gsi_per_props - Peripheral related properties + * + * @gsi: GSI core version + * @ee: EE where this driver and peripheral driver runs + * @intr: control interrupt type + * @intvec: write data for MSI write + * @msi_addr: MSI address + * @irq: IRQ number + * @phys_addr: physical address of GSI block + * @size: register size of GSI block + * @notify_cb: general notification callback + * @req_clk_cb: callback to request peripheral clock + * granted should be set to true if request is completed + * synchronously, false otherwise (peripheral needs + * to call gsi_complete_clk_grant later when request is + * completed) + * if this callback is not provided, then GSI will assume + * peripheral is clocked at all times + * @rel_clk_cb: callback to release peripheral clock + * @user_data: cookie used for notifications + * + * All the callbacks are in interrupt context + * + */ +struct gsi_per_props { + enum gsi_ver ver; + unsigned int ee; + enum gsi_intr_type intr; + uint32_t intvec; + uint64_t msi_addr; + unsigned int irq; + phys_addr_t phys_addr; + unsigned long size; + void (*notify_cb)(struct gsi_per_notify *notify); + void (*req_clk_cb)(void *user_data, bool *granted); + int (*rel_clk_cb)(void *user_data); + void *user_data; +}; + +enum gsi_evt_err { + GSI_EVT_OUT_OF_BUFFERS_ERR = 0x0, + GSI_EVT_OUT_OF_RESOURCES_ERR = 0x1, + GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR = 0x2, + GSI_EVT_EVT_RING_EMPTY_ERR = 0x3, +}; + +/** + * gsi_evt_err_notify - event ring error callback info + * + * @user_data: cookie supplied in gsi_alloc_evt_ring + * @evt_id: type of error + * @err_desc: more info about the error + * + */ +struct gsi_evt_err_notify { + void *user_data; + enum gsi_evt_err evt_id; + uint16_t err_desc; +}; + +enum gsi_evt_chtype { + GSI_EVT_CHTYPE_MHI_EV = 0x0, + GSI_EVT_CHTYPE_XHCI_EV = 0x1, + GSI_EVT_CHTYPE_GPI_EV = 0x2, + GSI_EVT_CHTYPE_XDCI_EV = 0x3 +}; + +enum gsi_evt_ring_elem_size { + GSI_EVT_RING_RE_SIZE_4B = 4, + GSI_EVT_RING_RE_SIZE_16B = 16, +}; + +/** + * gsi_evt_ring_props - Event ring related properties + * + * @intf: interface type (of the associated channel) + * @intr: interrupt type + * @re_size: size of event ring element + * @ring_len: length of ring in bytes (must be integral multiple of + * re_size) + * @ring_base_addr: physical base address of ring. Address must be aligned to + * ring_len rounded to power of two + * @ring_base_vaddr: virtual base address of ring (set to NULL when not + * applicable) + * @int_modt: cycles base interrupt moderation (32KHz clock) + * @int_modc: interrupt moderation packet counter + * @intvec: write data for MSI write + * @msi_addr: MSI address + * @rp_update_addr: physical address to which event read pointer should be + * written on every event generation. must be set to 0 when + * no update is desdired + * @exclusive: if true, only one GSI channel can be associated with this + * event ring. if false, the event ring can be shared among + * multiple GSI channels but in that case no polling + * (GSI_CHAN_MODE_POLL) is supported on any of those channels + * @err_cb: error notification callback + * @user_data: cookie used for error notifications + * @evchid_valid: is evchid valid? + * @evchid: the event ID that is being specifically requested (this is + * relevant for MHI where doorbell routing requires ERs to be + * physically contiguous) + */ +struct gsi_evt_ring_props { + enum gsi_evt_chtype intf; + enum gsi_intr_type intr; + enum gsi_evt_ring_elem_size re_size; + uint16_t ring_len; + uint64_t ring_base_addr; + void *ring_base_vaddr; + uint16_t int_modt; + uint8_t int_modc; + uint32_t intvec; + uint64_t msi_addr; + uint64_t rp_update_addr; + bool exclusive; + void (*err_cb)(struct gsi_evt_err_notify *notify); + void *user_data; + bool evchid_valid; + uint8_t evchid; +}; + +enum gsi_chan_mode { + GSI_CHAN_MODE_CALLBACK = 0x0, + GSI_CHAN_MODE_POLL = 0x1, +}; + +enum gsi_chan_prot { + GSI_CHAN_PROT_MHI = 0x0, + GSI_CHAN_PROT_XHCI = 0x1, + GSI_CHAN_PROT_GPI = 0x2, + GSI_CHAN_PROT_XDCI = 0x3 +}; + +enum gsi_chan_dir { + GSI_CHAN_DIR_FROM_GSI = 0x0, + GSI_CHAN_DIR_TO_GSI = 0x1 +}; + +enum gsi_max_prefetch { + GSI_ONE_PREFETCH_SEG = 0x0, + GSI_TWO_PREFETCH_SEG = 0x1 +}; + +enum gsi_chan_evt { + GSI_CHAN_EVT_INVALID = 0x0, + GSI_CHAN_EVT_SUCCESS = 0x1, + GSI_CHAN_EVT_EOT = 0x2, + GSI_CHAN_EVT_OVERFLOW = 0x3, + GSI_CHAN_EVT_EOB = 0x4, + GSI_CHAN_EVT_OOB = 0x5, + GSI_CHAN_EVT_DB_MODE = 0x6, + GSI_CHAN_EVT_UNDEFINED = 0x10, + GSI_CHAN_EVT_RE_ERROR = 0x11, +}; + +/** + * gsi_chan_xfer_notify - Channel callback info + * + * @chan_user_data: cookie supplied in gsi_alloc_channel + * @xfer_user_data: cookie of the gsi_xfer_elem that caused the + * event to be generated + * @evt_id: type of event triggered by the associated TRE + * (corresponding to xfer_user_data) + * @bytes_xfered: number of bytes transferred by the associated TRE + * (corresponding to xfer_user_data) + * + */ +struct gsi_chan_xfer_notify { + void *chan_user_data; + void *xfer_user_data; + enum gsi_chan_evt evt_id; + uint16_t bytes_xfered; +}; + +enum gsi_chan_err { + GSI_CHAN_INVALID_TRE_ERR = 0x0, + GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR = 0x1, + GSI_CHAN_OUT_OF_BUFFERS_ERR = 0x2, + GSI_CHAN_OUT_OF_RESOURCES_ERR = 0x3, + GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR = 0x4, + GSI_CHAN_HWO_1_ERR = 0x5 +}; + +/** + * gsi_chan_err_notify - Channel general callback info + * + * @chan_user_data: cookie supplied in gsi_alloc_channel + * @evt_id: type of error + * @err_desc: more info about the error + * + */ +struct gsi_chan_err_notify { + void *chan_user_data; + enum gsi_chan_err evt_id; + uint16_t err_desc; +}; + +enum gsi_chan_ring_elem_size { + GSI_CHAN_RE_SIZE_4B = 4, + GSI_CHAN_RE_SIZE_16B = 16, + GSI_CHAN_RE_SIZE_32B = 32, +}; + +enum gsi_chan_use_db_eng { + GSI_CHAN_DIRECT_MODE = 0x0, + GSI_CHAN_DB_MODE = 0x1, +}; + +/** + * gsi_chan_props - Channel related properties + * + * @prot: interface type + * @dir: channel direction + * @ch_id: virtual channel ID + * @evt_ring_hdl: handle of associated event ring. set to ~0 if no + * event ring associated + * @re_size: size of channel ring element + * @ring_len: length of ring in bytes (must be integral multiple of + * re_size) + * @max_re_expected: maximal number of ring elements expected to be queued. + * used for data path statistics gathering. if 0 provided + * ring_len / re_size will be used. + * @ring_base_addr: physical base address of ring. Address must be aligned to + * ring_len rounded to power of two + * @ring_base_vaddr: virtual base address of ring (set to NULL when not + * applicable) + * @use_db_eng: 0 => direct mode (doorbells are written directly to RE + * engine) + * 1 => DB mode (doorbells are written to DB engine) + * @max_prefetch: limit number of pre-fetch segments for channel + * @low_weight: low channel weight (priority of channel for RE engine + * round robin algorithm); must be >= 1 + * @xfer_cb: transfer notification callback, this callback happens + * on event boundaries + * + * e.g. 1 + * + * out TD with 3 REs + * + * RE1: EOT=0, EOB=0, CHAIN=1; + * RE2: EOT=0, EOB=0, CHAIN=1; + * RE3: EOT=1, EOB=0, CHAIN=0; + * + * the callback will be triggered for RE3 using the + * xfer_user_data of that RE + * + * e.g. 2 + * + * in REs + * + * RE1: EOT=1, EOB=0, CHAIN=0; + * RE2: EOT=1, EOB=0, CHAIN=0; + * RE3: EOT=1, EOB=0, CHAIN=0; + * + * received packet consumes all of RE1, RE2 and part of RE3 + * for EOT condition. there will be three callbacks in below + * order + * + * callback for RE1 using GSI_CHAN_EVT_OVERFLOW + * callback for RE2 using GSI_CHAN_EVT_OVERFLOW + * callback for RE3 using GSI_CHAN_EVT_EOT + * + * @err_cb: error notification callback + * @chan_user_data: cookie used for notifications + * + * All the callbacks are in interrupt context + * + */ +struct gsi_chan_props { + enum gsi_chan_prot prot; + enum gsi_chan_dir dir; + uint8_t ch_id; + unsigned long evt_ring_hdl; + enum gsi_chan_ring_elem_size re_size; + uint16_t ring_len; + uint16_t max_re_expected; + uint64_t ring_base_addr; + void *ring_base_vaddr; + enum gsi_chan_use_db_eng use_db_eng; + enum gsi_max_prefetch max_prefetch; + uint8_t low_weight; + void (*xfer_cb)(struct gsi_chan_xfer_notify *notify); + void (*err_cb)(struct gsi_chan_err_notify *notify); + void *chan_user_data; +}; + +enum gsi_xfer_flag { + GSI_XFER_FLAG_CHAIN = 0x1, + GSI_XFER_FLAG_EOB = 0x100, + GSI_XFER_FLAG_EOT = 0x200, + GSI_XFER_FLAG_BEI = 0x400 +}; + +enum gsi_xfer_elem_type { + GSI_XFER_ELEM_DATA, + GSI_XFER_ELEM_IMME_CMD, + GSI_XFER_ELEM_NOP, +}; + +/** + * gsi_xfer_elem - Metadata about a single transfer + * + * @addr: physical address of buffer + * @len: size of buffer for GSI_XFER_ELEM_DATA: + * for outbound transfers this is the number of bytes to + * transfer. + * for inbound transfers, this is the maximum number of + * bytes the host expects from device in this transfer + * + * immediate command opcode for GSI_XFER_ELEM_IMME_CMD + * @flags: transfer flags, OR of all the applicable flags + * + * GSI_XFER_FLAG_BEI: Block event interrupt + * 1: Event generated by this ring element must not assert + * an interrupt to the host + * 0: Event generated by this ring element must assert an + * interrupt to the host + * + * GSI_XFER_FLAG_EOT: Interrupt on end of transfer + * 1: If an EOT condition is encountered when processing + * this ring element, an event is generated by the device + * with its completion code set to EOT. + * 0: If an EOT condition is encountered for this ring + * element, a completion event is not be generated by the + * device, unless IEOB is 1 + * + * GSI_XFER_FLAG_EOB: Interrupt on end of block + * 1: Device notifies host after processing this ring element + * by sending a completion event + * 0: Completion event is not required after processing this + * ring element + * + * GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring + * elements in a TD + * + * @type: transfer type + * + * GSI_XFER_ELEM_DATA: for all data transfers + * GSI_XFER_ELEM_IMME_CMD: for IPA immediate commands + * GSI_XFER_ELEM_NOP: for event generation only + * + * @xfer_user_data: cookie used in xfer_cb + * + */ +struct gsi_xfer_elem { + uint64_t addr; + uint16_t len; + uint16_t flags; + enum gsi_xfer_elem_type type; + void *xfer_user_data; +}; + +/** + * gsi_gpi_channel_scratch - GPI protocol SW config area of + * channel scratch + * + * @max_outstanding_tre: Used for the prefetch management sequence by the + * sequencer. Defines the maximum number of allowed + * outstanding TREs in IPA/GSI (in Bytes). RE engine + * prefetch will be limited by this configuration. It + * is suggested to configure this value to IPA_IF + * channel TLV queue size times element size. To disable + * the feature in doorbell mode (DB Mode=1). Maximum + * outstanding TREs should be set to 64KB + * (or any value larger or equal to ring length . RLEN) + * @outstanding_threshold: Used for the prefetch management sequence by the + * sequencer. Defines the threshold (in Bytes) as to when + * to update the channel doorbell. Should be smaller than + * Maximum outstanding TREs. value. It is suggested to + * configure this value to 2 * element size. + */ +struct __packed gsi_gpi_channel_scratch { + uint64_t resvd1; + uint32_t resvd2:16; + uint32_t max_outstanding_tre:16; + uint32_t resvd3:16; + uint32_t outstanding_threshold:16; +}; + +/** + * gsi_mhi_channel_scratch - MHI protocol SW config area of + * channel scratch + * + * @mhi_host_wp_addr: Valid only when UL/DL Sync En is asserted. Defines + * address in host from which channel write pointer + * should be read in polling mode + * @assert_bit40: 1: bit #41 in address should be asserted upon + * IPA_IF.ProcessDescriptor routine (for MHI over PCIe + * transfers) + * 0: bit #41 in address should be deasserted upon + * IPA_IF.ProcessDescriptor routine (for non-MHI over + * PCIe transfers) + * @polling_configuration: Uplink channels: Defines timer to poll on MHI + * context. Range: 1 to 31 milliseconds. + * Downlink channel: Defines transfer ring buffer + * availability threshold to poll on MHI context in + * multiple of 8. Range: 0 to 31, meaning 0 to 258 ring + * elements. E.g., value of 2 indicates 16 ring elements. + * Valid only when Burst Mode Enabled is set to 1 + * @burst_mode_enabled: 0: Burst mode is disabled for this channel + * 1: Burst mode is enabled for this channel + * @polling_mode: 0: the channel is not in polling mode, meaning the + * host should ring DBs. + * 1: the channel is in polling mode, meaning the host + * @oob_mod_threshold: Defines OOB moderation threshold. Units are in 8 + * ring elements. + * should not ring DBs until notified of DB mode/OOB mode + * @max_outstanding_tre: Used for the prefetch management sequence by the + * sequencer. Defines the maximum number of allowed + * outstanding TREs in IPA/GSI (in Bytes). RE engine + * prefetch will be limited by this configuration. It + * is suggested to configure this value to IPA_IF + * channel TLV queue size times element size. + * To disable the feature in doorbell mode (DB Mode=1). + * Maximum outstanding TREs should be set to 64KB + * (or any value larger or equal to ring length . RLEN) + * @outstanding_threshold: Used for the prefetch management sequence by the + * sequencer. Defines the threshold (in Bytes) as to when + * to update the channel doorbell. Should be smaller than + * Maximum outstanding TREs. value. It is suggested to + * configure this value to min(TLV_FIFO_SIZE/2,8) * + * element size. + */ +struct __packed gsi_mhi_channel_scratch { + uint64_t mhi_host_wp_addr; + uint32_t rsvd1:1; + uint32_t assert_bit40:1; + uint32_t polling_configuration:5; + uint32_t burst_mode_enabled:1; + uint32_t polling_mode:1; + uint32_t oob_mod_threshold:5; + uint32_t resvd2:2; + uint32_t max_outstanding_tre:16; + uint32_t resvd3:16; + uint32_t outstanding_threshold:16; +}; + +/** + * gsi_xdci_channel_scratch - xDCI protocol SW config area of + * channel scratch + * + * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregationi + * configuration). Must be aligned to Max USB Packet Size + * @xferrscidx: Transfer Resource Index (XferRscIdx). The hardware-assigned + * transfer resource index for the transfer, which was + * returned in response to the Start Transfer command. + * This field is used for "Update Transfer" command + * @last_trb_addr: Address (LSB - based on alignment restrictions) of + * last TRB in queue. Used to identify rollover case + * @depcmd_low_addr: Used to generate "Update Transfer" command + * @max_outstanding_tre: Used for the prefetch management sequence by the + * sequencer. Defines the maximum number of allowed + * outstanding TREs in IPA/GSI (in Bytes). RE engine + * prefetch will be limited by this configuration. It + * is suggested to configure this value to IPA_IF + * channel TLV queue size times element size. + * To disable the feature in doorbell mode (DB Mode=1) + * Maximum outstanding TREs should be set to 64KB + * (or any value larger or equal to ring length . RLEN) + * @depcmd_hi_addr: Used to generate "Update Transfer" command + * @outstanding_threshold: Used for the prefetch management sequence by the + * sequencer. Defines the threshold (in Bytes) as to when + * to update the channel doorbell. Should be smaller than + * Maximum outstanding TREs. value. It is suggested to + * configure this value to 2 * element size. for MBIM the + * suggested configuration is the element size. + */ +struct __packed gsi_xdci_channel_scratch { + uint32_t last_trb_addr:16; + uint32_t resvd1:4; + uint32_t xferrscidx:7; + uint32_t const_buffer_size:5; + uint32_t depcmd_low_addr; + uint32_t depcmd_hi_addr:8; + uint32_t resvd2:8; + uint32_t max_outstanding_tre:16; + uint32_t resvd3:16; + uint32_t outstanding_threshold:16; +}; + +/** + * gsi_channel_scratch - channel scratch SW config area + * + */ +union __packed gsi_channel_scratch { + struct __packed gsi_gpi_channel_scratch gpi; + struct __packed gsi_mhi_channel_scratch mhi; + struct __packed gsi_xdci_channel_scratch xdci; + struct __packed { + uint32_t word1; + uint32_t word2; + uint32_t word3; + uint32_t word4; + } data; +}; + +/** + * gsi_mhi_evt_scratch - MHI protocol SW config area of + * event scratch + */ +struct __packed gsi_mhi_evt_scratch { + uint32_t resvd1; + uint32_t resvd2; +}; + +/** + * gsi_xdci_evt_scratch - xDCI protocol SW config area of + * event scratch + * + */ +struct __packed gsi_xdci_evt_scratch { + uint32_t gevntcount_low_addr; + uint32_t gevntcount_hi_addr:8; + uint32_t resvd1:24; +}; + +/** + * gsi_evt_scratch - event scratch SW config area + * + */ +union __packed gsi_evt_scratch { + struct __packed gsi_mhi_evt_scratch mhi; + struct __packed gsi_xdci_evt_scratch xdci; + struct __packed { + uint32_t word1; + uint32_t word2; + } data; +}; + +/** + * gsi_device_scratch - EE scratch config parameters + * + * @mhi_base_chan_idx_valid: is mhi_base_chan_idx valid? + * @mhi_base_chan_idx: base index of IPA MHI channel indexes. + * IPA MHI channel index = GSI channel ID + + * MHI base channel index + * @max_usb_pkt_size_valid: is max_usb_pkt_size valid? + * @max_usb_pkt_size: max USB packet size in bytes (valid values are + * 512 and 1024) + */ +struct gsi_device_scratch { + bool mhi_base_chan_idx_valid; + uint8_t mhi_base_chan_idx; + bool max_usb_pkt_size_valid; + uint16_t max_usb_pkt_size; +}; + +/** + * gsi_chan_info - information about channel occupancy + * + * @wp: channel write pointer (physical address) + * @rp: channel read pointer (physical address) + * @evt_valid: is evt* info valid? + * @evt_wp: event ring write pointer (physical address) + * @evt_rp: event ring read pointer (physical address) + */ +struct gsi_chan_info { + uint64_t wp; + uint64_t rp; + bool evt_valid; + uint64_t evt_wp; + uint64_t evt_rp; +}; + +#ifdef CONFIG_GSI +/** + * gsi_register_device - Peripheral should call this function to + * register itself with GSI before invoking any other APIs + * + * @props: Peripheral properties + * @dev_hdl: Handle populated by GSI, opaque to client + * + * @Return -GSI_STATUS_AGAIN if request should be re-tried later + * other error codes for failure + */ +int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl); + +/** + * gsi_complete_clk_grant - Peripheral should call this function to + * grant the clock resource requested by GSI previously that could not + * be granted synchronously. GSI will release the clock resource using + * the rel_clk_cb when appropriate + * + * @dev_hdl: Client handle previously obtained from + * gsi_register_device + * + * @Return gsi_status + */ +int gsi_complete_clk_grant(unsigned long dev_hdl); + +/** + * gsi_write_device_scratch - Peripheral should call this function to + * write to the EE scratch area + * + * @dev_hdl: Client handle previously obtained from + * gsi_register_device + * @val: Value to write + * + * @Return gsi_status + */ +int gsi_write_device_scratch(unsigned long dev_hdl, + struct gsi_device_scratch *val); + +/** + * gsi_deregister_device - Peripheral should call this function to + * de-register itself with GSI + * + * @dev_hdl: Client handle previously obtained from + * gsi_register_device + * @force: When set to true, cleanup is performed even if there + * are in use resources like channels, event rings, etc. + * this would be used after GSI reset to recover from some + * fatal error + * When set to false, there must not exist any allocated + * channels and event rings. + * + * @Return gsi_status + */ +int gsi_deregister_device(unsigned long dev_hdl, bool force); + +/** + * gsi_alloc_evt_ring - Peripheral should call this function to + * allocate an event ring + * + * @props: Event ring properties + * @dev_hdl: Client handle previously obtained from + * gsi_register_device + * @evt_ring_hdl: Handle populated by GSI, opaque to client + * + * This function can sleep + * + * @Return gsi_status + */ +int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl, + unsigned long *evt_ring_hdl); + +/** + * gsi_write_evt_ring_scratch - Peripheral should call this function to + * write to the scratch area of the event ring context + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * @val: Value to write + * + * @Return gsi_status + */ +int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl, + union __packed gsi_evt_scratch val); + +/** + * gsi_dealloc_evt_ring - Peripheral should call this function to + * de-allocate an event ring. There should not exist any active + * channels using this event ring + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * + * This function can sleep + * + * @Return gsi_status + */ +int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl); + +/** + * gsi_query_evt_ring_db_addr - Peripheral should call this function to + * query the physical addresses of the event ring doorbell registers + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * @db_addr_wp_lsb: Physical address of doorbell register where the 32 + * LSBs of the doorbell value should be written + * @db_addr_wp_msb: Physical address of doorbell register where the 32 + * MSBs of the doorbell value should be written + * + * @Return gsi_status + */ +int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl, + uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb); + +/** + * gsi_ring_evt_ring_db - Peripheral should call this function for + * ringing the event ring doorbell with given value + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * @value: The value to be used for ringing the doorbell + * + * @Return gsi_status + */ +int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value); + +/** + * gsi_reset_evt_ring - Peripheral should call this function to + * reset an event ring to recover from error state + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * + * This function can sleep + * + * @Return gsi_status + */ +int gsi_reset_evt_ring(unsigned long evt_ring_hdl); + +/** + * gsi_get_evt_ring_cfg - This function returns the current config + * of the specified event ring + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * @props: where to copy properties to + * @scr: where to copy scratch info to + * + * @Return gsi_status + */ +int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl, + struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr); + +/** + * gsi_set_evt_ring_cfg - This function applies the supplied config + * to the specified event ring. + * + * exclusive property of the event ring cannot be changed after + * gsi_alloc_evt_ring + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * @props: the properties to apply + * @scr: the scratch info to apply + * + * @Return gsi_status + */ +int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl, + struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr); + +/** + * gsi_alloc_channel - Peripheral should call this function to + * allocate a channel + * + * @props: Channel properties + * @dev_hdl: Client handle previously obtained from + * gsi_register_device + * @chan_hdl: Handle populated by GSI, opaque to client + * + * This function can sleep + * + * @Return gsi_status + */ +int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl, + unsigned long *chan_hdl); + +/** + * gsi_write_channel_scratch - Peripheral should call this function to + * write to the scratch area of the channel context + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @val: Value to write + * + * @Return gsi_status + */ +int gsi_write_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch val); + +/** + * gsi_start_channel - Peripheral should call this function to + * start a channel i.e put into running state + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * + * This function can sleep + * + * @Return gsi_status + */ +int gsi_start_channel(unsigned long chan_hdl); + +/** + * gsi_stop_channel - Peripheral should call this function to + * stop a channel. Stop will happen on a packet boundary + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * + * This function can sleep + * + * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again + * other error codes for failure + */ +int gsi_stop_channel(unsigned long chan_hdl); + +/** + * gsi_reset_channel - Peripheral should call this function to + * reset a channel to recover from error state + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * + * This function can sleep + * + * @Return gsi_status + */ +int gsi_reset_channel(unsigned long chan_hdl); + +/** + * gsi_dealloc_channel - Peripheral should call this function to + * de-allocate a channel + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * + * This function can sleep + * + * @Return gsi_status + */ +int gsi_dealloc_channel(unsigned long chan_hdl); + +/** + * gsi_stop_db_channel - Peripheral should call this function to + * stop a channel when all transfer elements till the doorbell + * have been processed + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * + * This function can sleep + * + * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again + * other error codes for failure + */ +int gsi_stop_db_channel(unsigned long chan_hdl); + +/** + * gsi_query_channel_db_addr - Peripheral should call this function to + * query the physical addresses of the channel doorbell registers + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @db_addr_wp_lsb: Physical address of doorbell register where the 32 + * LSBs of the doorbell value should be written + * @db_addr_wp_msb: Physical address of doorbell register where the 32 + * MSBs of the doorbell value should be written + * + * @Return gsi_status + */ +int gsi_query_channel_db_addr(unsigned long chan_hdl, + uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb); + +/** + * gsi_query_channel_info - Peripheral can call this function to query the + * channel and associated event ring (if any) status. + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @info: Where to read the values into + * + * @Return gsi_status + */ +int gsi_query_channel_info(unsigned long chan_hdl, + struct gsi_chan_info *info); + +/** + * gsi_is_channel_empty - Peripheral can call this function to query if + * the channel is empty. This is only applicable to GPI. "Empty" means + * GSI has consumed all descriptors for a TO_GSI channel and SW has + * processed all completed descriptors for a FROM_GSI channel. + * + * @chan_hdl: Client handle previously obtained from gsi_alloc_channel + * @is_empty: set by GSI based on channel emptiness + * + * @Return gsi_status + */ +int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty); + +/** + * gsi_get_channel_cfg - This function returns the current config + * of the specified channel + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @props: where to copy properties to + * @scr: where to copy scratch info to + * + * @Return gsi_status + */ +int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props, + union gsi_channel_scratch *scr); + +/** + * gsi_set_channel_cfg - This function applies the supplied config + * to the specified channel + * + * ch_id and evt_ring_hdl of the channel cannot be changed after + * gsi_alloc_channel + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @props: the properties to apply + * @scr: the scratch info to apply + * + * @Return gsi_status + */ +int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props, + union gsi_channel_scratch *scr); + +/** + * gsi_poll_channel - Peripheral should call this function to query for + * completed transfer descriptors. + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @notify: Information about the completed transfer if any + * + * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers + * completed) + */ +int gsi_poll_channel(unsigned long chan_hdl, + struct gsi_chan_xfer_notify *notify); + +/** + * gsi_config_channel_mode - Peripheral should call this function + * to configure the channel mode. + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @mode: Mode to move the channel into + * + * @Return gsi_status + */ +int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode); + +/** + * gsi_queue_xfer - Peripheral should call this function + * to queue transfers on the given channel + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @num_xfers: Number of transfer in the array @ xfer + * @xfer: Array of num_xfers transfer descriptors + * @ring_db: If true, tell HW about these queued xfers + * If false, do not notify HW at this time + * + * @Return gsi_status + */ +int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers, + struct gsi_xfer_elem *xfer, bool ring_db); + +/** + * gsi_start_xfer - Peripheral should call this function to + * inform HW about queued xfers + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * + * @Return gsi_status + */ +int gsi_start_xfer(unsigned long chan_hdl); + +/** + * gsi_configure_regs - Peripheral should call this function + * to configure the GSI registers before/after the FW is + * loaded but before it is enabled. + * + * @gsi_base_addr: Base address of GSI register space + * @gsi_size: Mapping size of the GSI register space + * @per_base_addr: Base address of the peripheral using GSI + * + * @Return gsi_status + */ +int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size, + phys_addr_t per_base_addr); + +/** + * gsi_enable_fw - Peripheral should call this function + * to enable the GSI FW after the FW has been loaded to the SRAM. + * + * @gsi_base_addr: Base address of GSI register space + * @gsi_size: Mapping size of the GSI register space + * @ver: GSI core version + + * @Return gsi_status + */ +int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver); + +/** + * gsi_get_inst_ram_offset_and_size - Peripheral should call this function + * to get instruction RAM base address offset and size. Peripheral typically + * uses this info to load GSI FW into the IRAM. + * + * @base_offset:[OUT] - IRAM base offset address + * @size: [OUT] - IRAM size + + * @Return none + */ +void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset, + unsigned long *size); + +/** + * gsi_halt_channel_ee - Peripheral should call this function + * to stop other EE's channel. This is usually used in SSR clean + * + * @chan_idx: Virtual channel index + * @ee: EE + * @code: [out] response code for operation + + * @Return gsi_status + */ +int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code); + +/* + * Here is a typical sequence of calls + * + * gsi_register_device + * + * gsi_write_device_scratch (if the protocol needs this) + * + * gsi_alloc_evt_ring (for as many event rings as needed) + * gsi_write_evt_ring_scratch + * + * gsi_alloc_channel (for as many channels as needed; channels can have + * no event ring, an exclusive event ring or a shared event ring) + * gsi_write_channel_scratch + * gsi_start_channel + * gsi_queue_xfer/gsi_start_xfer + * gsi_config_channel_mode/gsi_poll_channel (if clients wants to poll on + * xfer completions) + * gsi_stop_db_channel/gsi_stop_channel + * + * gsi_dealloc_channel + * + * gsi_dealloc_evt_ring + * + * gsi_deregister_device + * + */ +#else +static inline int gsi_register_device(struct gsi_per_props *props, + unsigned long *dev_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_complete_clk_grant(unsigned long dev_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_write_device_scratch(unsigned long dev_hdl, + struct gsi_device_scratch *val) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_deregister_device(unsigned long dev_hdl, bool force) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, + unsigned long dev_hdl, + unsigned long *evt_ring_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl, + union __packed gsi_evt_scratch val) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl, + uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, + uint64_t value) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_reset_evt_ring(unsigned long evt_ring_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_alloc_channel(struct gsi_chan_props *props, + unsigned long dev_hdl, + unsigned long *chan_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_write_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch val) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_start_channel(unsigned long chan_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_stop_channel(unsigned long chan_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_reset_channel(unsigned long chan_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_dealloc_channel(unsigned long chan_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_stop_db_channel(unsigned long chan_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_query_channel_db_addr(unsigned long chan_hdl, + uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_query_channel_info(unsigned long chan_hdl, + struct gsi_chan_info *info) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_poll_channel(unsigned long chan_hdl, + struct gsi_chan_xfer_notify *notify) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_config_channel_mode(unsigned long chan_hdl, + enum gsi_chan_mode mode) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers, + struct gsi_xfer_elem *xfer, bool ring_db) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_start_xfer(unsigned long chan_hdl) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_get_channel_cfg(unsigned long chan_hdl, + struct gsi_chan_props *props, + union gsi_channel_scratch *scr) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_set_channel_cfg(unsigned long chan_hdl, + struct gsi_chan_props *props, + union gsi_channel_scratch *scr) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl, + struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl, + struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size, + phys_addr_t per_base_addr) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset, + unsigned long *size) +{ +} + +static inline int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, + int *code) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} +#endif +#endif diff --git a/include/linux/rndis_ipa.h b/include/linux/rndis_ipa.h new file mode 100644 index 000000000000..24c77d568212 --- /dev/null +++ b/include/linux/rndis_ipa.h @@ -0,0 +1,102 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RNDIS_IPA_H_ +#define _RNDIS_IPA_H_ + +#include + +/* + * @priv: private data given upon ipa_connect + * @evt: event enum, should be IPA_WRITE_DONE + * @data: for tx path the data field is the sent socket buffer. + */ +typedef void (*ipa_callback)(void *priv, + enum ipa_dp_evt_type evt, + unsigned long data); + +/* + * struct ipa_usb_init_params - parameters for driver initialization API + * + * @device_ready_notify: callback supplied by USB core driver + * This callback shall be called by the Netdev once the device + * is ready to receive data from tethered PC. + * @ipa_rx_notify: The network driver will set this callback (out parameter). + * this callback shall be supplied for ipa_connect upon pipe + * connection (USB->IPA), once IPA driver receive data packets + * from USB pipe destined for Apps this callback will be called. + * @ipa_tx_notify: The network driver will set this callback (out parameter). + * this callback shall be supplied for ipa_connect upon pipe + * connection (IPA->USB), once IPA driver send packets destined + * for USB, IPA BAM will notify for Tx-complete. + * @host_ethaddr: host Ethernet address in network order + * @device_ethaddr: device Ethernet address in network order + * @private: The network driver will set this pointer (out parameter). + * This pointer will hold the network device for later interaction + * with between USB driver and the network driver. + * @skip_ep_cfg: boolean field that determines if Apps-processor + * should or should not configure this end-point. + */ +struct ipa_usb_init_params { + void (*device_ready_notify)(void); + ipa_callback ipa_rx_notify; + ipa_callback ipa_tx_notify; + u8 host_ethaddr[ETH_ALEN]; + u8 device_ethaddr[ETH_ALEN]; + void *private; + bool skip_ep_cfg; +}; + +#ifdef CONFIG_RNDIS_IPA + +int rndis_ipa_init(struct ipa_usb_init_params *params); + +int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl, + u32 ipa_to_usb_hdl, + u32 max_xfer_size_bytes_to_dev, + u32 max_packet_number_to_dev, + u32 max_xfer_size_bytes_to_host, + void *private); + +int rndis_ipa_pipe_disconnect_notify(void *private); + +void rndis_ipa_cleanup(void *private); + +#else /* CONFIG_RNDIS_IPA*/ + +static inline int rndis_ipa_init(struct ipa_usb_init_params *params) +{ + return -ENOMEM; +} + +static inline int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl, + u32 ipa_to_usb_hdl, + u32 max_xfer_size_bytes_to_dev, + u32 max_packet_number_to_dev, + u32 max_xfer_size_bytes_to_host, + void *private) +{ + return -ENOMEM; +} + +static inline int rndis_ipa_pipe_disconnect_notify(void *private) +{ + return -ENOMEM; +} + +static inline void rndis_ipa_cleanup(void *private) +{ + +} +#endif /* CONFIG_RNDIS_IPA */ + +#endif /* _RNDIS_IPA_H_ */ diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h new file mode 100644 index 000000000000..77d86b8a0018 --- /dev/null +++ b/include/uapi/linux/ipa_qmi_service_v01.h @@ -0,0 +1,1705 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * This header file defines the types and structures that were defined in + * ipa. It contains the constant values defined, enums, structures, + * messages, and service message IDs (in that order) Structures that were + * defined in the IDL as messages contain mandatory elements, optional + * elements, a combination of mandatory and optional elements (mandatory + * always come before optionals in the structure), or nothing (null message) + + * An optional element in a message is preceded by a uint8_t value that must be + * set to true if the element is going to be included. When decoding a received + * message, the uint8_t values will be set to true or false by the decode + * routine, and should be checked before accessing the values that they + * correspond to. + + * Variable sized arrays are defined as static sized arrays with an unsigned + * integer (32 bit) preceding it that must be set to the number of elements + * in the array that are valid. For Example: + + * uint32_t test_opaque_len; + * uint8_t test_opaque[16]; + + * If only 4 elements are added to test_opaque[] then test_opaque_len must be + * set to 4 before sending the message. When decoding, the _len value is set + * by the decode routine and should be checked so that the correct number of + * elements in the array will be accessed. + */ +#ifndef IPA_QMI_SERVICE_V01_H +#define IPA_QMI_SERVICE_V01_H + +#define QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01 2 +#define QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01 2 +#define QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01 2 +#define QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01 2 +#define QMI_IPA_MAX_FILTERS_V01 64 +#define QMI_IPA_MAX_FILTERS_EX_V01 128 +#define QMI_IPA_MAX_PIPES_V01 20 +#define QMI_IPA_MAX_APN_V01 8 + +#define IPA_INT_MAX ((int)(~0U>>1)) +#define IPA_INT_MIN (-IPA_INT_MAX - 1) + +/* IPA definition as msm_qmi_interface.h */ + +enum ipa_qmi_result_type_v01 { + /* To force a 32 bit signed enum. Do not change or use*/ + IPA_QMI_RESULT_TYPE_MIN_ENUM_VAL_V01 = IPA_INT_MIN, + IPA_QMI_RESULT_SUCCESS_V01 = 0, + IPA_QMI_RESULT_FAILURE_V01 = 1, + IPA_QMI_RESULT_TYPE_MAX_ENUM_VAL_V01 = IPA_INT_MAX, +}; + +enum ipa_qmi_error_type_v01 { + /* To force a 32 bit signed enum. Do not change or use*/ + IPA_QMI_ERROR_TYPE_MIN_ENUM_VAL_V01 = IPA_INT_MIN, + IPA_QMI_ERR_NONE_V01 = 0x0000, + IPA_QMI_ERR_MALFORMED_MSG_V01 = 0x0001, + IPA_QMI_ERR_NO_MEMORY_V01 = 0x0002, + IPA_QMI_ERR_INTERNAL_V01 = 0x0003, + IPA_QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 = 0x0005, + IPA_QMI_ERR_INVALID_ID_V01 = 0x0029, + IPA_QMI_ERR_ENCODING_V01 = 0x003A, + IPA_QMI_ERR_INCOMPATIBLE_STATE_V01 = 0x005A, + IPA_QMI_ERR_NOT_SUPPORTED_V01 = 0x005E, + IPA_QMI_ERROR_TYPE_MAX_ENUM_VAL_V01 = IPA_INT_MAX, +}; + +struct ipa_qmi_response_type_v01 { + enum ipa_qmi_result_type_v01 result; + enum ipa_qmi_error_type_v01 error; +}; + +enum ipa_platform_type_enum_v01 { + IPA_PLATFORM_TYPE_ENUM_MIN_ENUM_VAL_V01 = + -2147483647, /* To force a 32 bit signed enum. Do not change or use */ + QMI_IPA_PLATFORM_TYPE_INVALID_V01 = 0, + /* Invalid platform identifier */ + QMI_IPA_PLATFORM_TYPE_TN_V01 = 1, + /* Platform identifier - Data card device */ + QMI_IPA_PLATFORM_TYPE_LE_V01 = 2, + /* Platform identifier - Data router device */ + QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01 = 3, + /* Platform identifier - MSM device with Android HLOS */ + QMI_IPA_PLATFORM_TYPE_MSM_WINDOWS_V01 = 4, + /* Platform identifier - MSM device with Windows HLOS */ + QMI_IPA_PLATFORM_TYPE_MSM_QNX_V01 = 5, + /* Platform identifier - MSM device with QNX HLOS */ + IPA_PLATFORM_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647 + /* To force a 32 bit signed enum. Do not change or use */ +}; + +struct ipa_hdr_tbl_info_type_v01 { + uint32_t modem_offset_start; + /* Offset from the start of IPA Shared memory from which + * modem driver may insert header table entries. + */ + uint32_t modem_offset_end; + /* Offset from the start of IPA shared mem beyond which modem + * driver shall not insert header table entries. The space + * available for the modem driver shall include the + * modem_offset_start and modem_offset_end. + */ +}; /* Type */ + +struct ipa_route_tbl_info_type_v01 { + uint32_t route_tbl_start_addr; + /* Identifies the start of the routing table. Denotes the offset + * from the start of the IPA Shared Mem + */ + + uint32_t num_indices; + /* Number of indices (starting from 0) that is being allocated to + * the modem. The number indicated here is also included in the + * allocation. The value of num_indices shall not exceed 31 + * (5 bits used to specify the routing table index), unless there + * is a change in the hardware. + */ +}; /* Type */ + +struct ipa_modem_mem_info_type_v01 { + + uint32_t block_start_addr; + /* Identifies the start of the memory block allocated for the + * modem. Denotes the offset from the start of the IPA Shared Mem + */ + + uint32_t size; + /* Size of the block allocated for the modem driver */ +}; /* Type */ + +struct ipa_hdr_proc_ctx_tbl_info_type_v01 { + + uint32_t modem_offset_start; + /* Offset from the start of IPA shared memory from which the modem + * driver may insert header processing context table entries. + */ + + uint32_t modem_offset_end; + /* Offset from the start of IPA shared memory beyond which the modem + * driver may not insert header proc table entries. The space + * available for the modem driver includes modem_offset_start and + * modem_offset_end. + */ +}; /* Type */ + +struct ipa_zip_tbl_info_type_v01 { + + uint32_t modem_offset_start; + /* Offset from the start of IPA shared memory from which the modem + * driver may insert compression/decompression command entries. + */ + + uint32_t modem_offset_end; + /* Offset from the start of IPA shared memory beyond which the modem + * driver may not insert compression/decompression command entries. + * The space available for the modem driver includes + * modem_offset_start and modem_offset_end. + */ +}; /* Type */ + +/** + * Request Message; Requests the modem IPA driver + * to perform initialization + */ +struct ipa_init_modem_driver_req_msg_v01 { + + /* Optional */ + /* Platform info */ + uint8_t platform_type_valid; + /* Must be set to true if platform_type is being passed */ + enum ipa_platform_type_enum_v01 platform_type; + /* Provides information about the platform (ex. TN/MN/LE/MSM,etc) */ + + /* Optional */ + /* Header table info */ + uint8_t hdr_tbl_info_valid; + /* Must be set to true if hdr_tbl_info is being passed */ + struct ipa_hdr_tbl_info_type_v01 hdr_tbl_info; + /* Provides information about the header table */ + + /* Optional */ + /* IPV4 Routing table info */ + uint8_t v4_route_tbl_info_valid; + /* Must be set to true if v4_route_tbl_info is being passed */ + struct ipa_route_tbl_info_type_v01 v4_route_tbl_info; + /* Provides information about the IPV4 routing table */ + + /* Optional */ + /* IPV6 Routing table info */ + uint8_t v6_route_tbl_info_valid; + /* Must be set to true if v6_route_tbl_info is being passed */ + struct ipa_route_tbl_info_type_v01 v6_route_tbl_info; + /* Provides information about the IPV6 routing table */ + + /* Optional */ + /* IPV4 Filter table start address */ + uint8_t v4_filter_tbl_start_addr_valid; + /* Must be set to true if v4_filter_tbl_start_addr is being passed */ + uint32_t v4_filter_tbl_start_addr; + /* Provides information about the starting address of IPV4 filter + * table in IPAv2 or non-hashable IPv4 filter table in IPAv3. + * Denotes the offset from the start of the IPA Shared Mem + */ + + /* Optional */ + /* IPV6 Filter table start address */ + uint8_t v6_filter_tbl_start_addr_valid; + /* Must be set to true if v6_filter_tbl_start_addr is being passed */ + uint32_t v6_filter_tbl_start_addr; + /* Provides information about the starting address of IPV6 filter + * table in IPAv2 or non-hashable IPv6 filter table in IPAv3. + * Denotes the offset from the start of the IPA Shared Mem + */ + + /* Optional */ + /* Modem memory block */ + uint8_t modem_mem_info_valid; + /* Must be set to true if modem_mem_info is being passed */ + struct ipa_modem_mem_info_type_v01 modem_mem_info; + /* Provides information about the start address and the size of + * the memory block that is being allocated to the modem driver. + * Denotes the physical address + */ + + /* Optional */ + /* Destination end point for control commands from modem */ + uint8_t ctrl_comm_dest_end_pt_valid; + /* Must be set to true if ctrl_comm_dest_end_pt is being passed */ + uint32_t ctrl_comm_dest_end_pt; + /* Provides information about the destination end point on the + * application processor to which the modem driver can send + * control commands. The value of this parameter cannot exceed + * 19 since IPA only supports 20 end points. + */ + + /* Optional */ + /* Modem Bootup Information */ + uint8_t is_ssr_bootup_valid; + /* Must be set to true if is_ssr_bootup is being passed */ + uint8_t is_ssr_bootup; + /* Specifies whether the modem is booting up after a modem only + * sub-system restart or not. This will let the modem driver + * know that it doesn't have to reinitialize some of the HW + * blocks because IPA has not been reset since the previous + * initialization. + */ + + /* Optional */ + /* Header Processing Context Table Information */ + uint8_t hdr_proc_ctx_tbl_info_valid; + /* Must be set to true if hdr_proc_ctx_tbl_info is being passed */ + struct ipa_hdr_proc_ctx_tbl_info_type_v01 hdr_proc_ctx_tbl_info; + /* Provides information about the header processing context table. + */ + + /* Optional */ + /* Compression Decompression Table Information */ + uint8_t zip_tbl_info_valid; + /* Must be set to true if zip_tbl_info is being passed */ + struct ipa_zip_tbl_info_type_v01 zip_tbl_info; + /* Provides information about the zip table. + */ + + /* Optional */ + /* IPv4 Hashable Routing Table Information */ + /** Must be set to true if v4_hash_route_tbl_info is being passed */ + uint8_t v4_hash_route_tbl_info_valid; + struct ipa_route_tbl_info_type_v01 v4_hash_route_tbl_info; + + /* Optional */ + /* IPv6 Hashable Routing Table Information */ + /** Must be set to true if v6_hash_route_tbl_info is being passed */ + uint8_t v6_hash_route_tbl_info_valid; + struct ipa_route_tbl_info_type_v01 v6_hash_route_tbl_info; + + /* + * Optional + * IPv4 Hashable Filter Table Start Address + * Must be set to true if v4_hash_filter_tbl_start_addr + * is being passed + */ + uint8_t v4_hash_filter_tbl_start_addr_valid; + uint32_t v4_hash_filter_tbl_start_addr; + /* Identifies the starting address of the IPv4 hashable filter + * table in IPAv3 onwards. Denotes the offset from the start of + * the IPA shared memory. + */ + + /* Optional + * IPv6 Hashable Filter Table Start Address + * Must be set to true if v6_hash_filter_tbl_start_addr + * is being passed + */ + uint8_t v6_hash_filter_tbl_start_addr_valid; + uint32_t v6_hash_filter_tbl_start_addr; + /* Identifies the starting address of the IPv6 hashable filter + * table in IPAv3 onwards. Denotes the offset from the start of + * the IPA shared memory. + */ +}; /* Message */ + +/* Response Message; Requests the modem IPA driver about initialization */ +struct ipa_init_modem_driver_resp_msg_v01 { + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /* Standard response type.*/ + + /* Optional */ + /* Destination end point for control commands from master driver */ + uint8_t ctrl_comm_dest_end_pt_valid; + /* Must be set to true if ctrl_comm_dest_ep is being passed */ + uint32_t ctrl_comm_dest_end_pt; + /* Provides information about the destination end point on the + * modem processor to which the master driver can send control + * commands. The value of this parameter cannot exceed 19 since + * IPA only supports 20 end points. This field is looked at only + * if the result in TLV RESULT_CODE is QMI_RESULT_SUCCESS + */ + + /* Optional */ + /* Default end point */ + uint8_t default_end_pt_valid; + /* Must be set to true if default_end_pt is being passed */ + uint32_t default_end_pt; + /* Provides information about the default end point. The master + * driver may or may not set the register in the hardware with + * this value. The value of this parameter cannot exceed 19 + * since IPA only supports 20 end points. This field is looked + * at only if the result in TLV RESULT_CODE is QMI_RESULT_SUCCESS + */ + + /* Optional */ + /* Modem Driver Initialization Pending */ + uint8_t modem_driver_init_pending_valid; + /* Must be set to true if modem_driver_init_pending is being passed */ + uint8_t modem_driver_init_pending; + /* + * Identifies if second level message handshake is needed + * between drivers to indicate when IPA HWP loading is completed. + * If this is set by modem driver, AP driver will need to wait + * for a INIT_MODEM_DRIVER_CMPLT message before communicating with + * IPA HWP. + */ +}; /* Message */ + +/* + * Request Message; Request from Modem IPA driver to indicate + * modem driver init completion + */ +struct ipa_init_modem_driver_cmplt_req_msg_v01 { + /* Mandatory */ + /* Modem Driver init complete status; */ + uint8_t status; + /* + * Specifies whether the modem driver initialization is complete + * including the micro controller image loading. + */ +}; /* Message */ + +/* + * Response Message; Request from Modem IPA driver to indicate + * modem driver init completion + */ +struct ipa_init_modem_driver_cmplt_resp_msg_v01 { + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /**< Standard response type.*/ +}; /* Message */ + +/* Request Message; This is the message that is exchanged between the + * control point and the service in order to register for indications. + */ +struct ipa_indication_reg_req_msg_v01 { + /* Optional */ + /* Master driver initialization completion */ + uint8_t master_driver_init_complete_valid; + /* Must be set to true if master_driver_init_complete is being passed */ + uint8_t master_driver_init_complete; + /* If set to TRUE, this field indicates that the client is + * interested in getting indications about the completion + * of the initialization sequence of the master driver. + * Setting this field in the request message makes sense + * only when the QMI_IPA_INDICATION_REGISTER_REQ is being + * originated from the modem driver + */ + + /* Optional */ + /* Data Usage Quota Reached */ + uint8_t data_usage_quota_reached_valid; + /* Must be set to true if data_usage_quota_reached is being passed */ + uint8_t data_usage_quota_reached; + /* If set to TRUE, this field indicates that the client wants to + * receive indications about reaching the data usage quota that + * previously set via QMI_IPA_SET_DATA_USAGE_QUOTA. Setting this field + * in the request message makes sense only when the + * QMI_IPA_INDICATION_REGISTER_REQ is being originated from the Master + * driver + */ +}; /* Message */ + + +/* Response Message; This is the message that is exchanged between the + * control point and the service in order to register for indications. + */ +struct ipa_indication_reg_resp_msg_v01 { + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /**< Standard response type.*/ +}; /* Message */ + + +/* Indication Message; Indication sent to the Modem IPA driver from + * master IPA driver about initialization being complete. + */ +struct ipa_master_driver_init_complt_ind_msg_v01 { + /* Mandatory */ + /* Master driver initialization completion status */ + struct ipa_qmi_response_type_v01 master_driver_init_status; + /* Indicates the status of initialization. If everything went + * as expected, this field is set to SUCCESS. ERROR is set + * otherwise. Extended error info may be used to convey + * additional information about the error + */ +}; /* Message */ + +struct ipa_ipfltr_range_eq_16_type_v01 { + uint8_t offset; + /* Specifies the offset from the IHL (Internet Header length) */ + + uint16_t range_low; + /* Specifies the lower bound of the range */ + + uint16_t range_high; + /* Specifies the upper bound of the range */ +}; /* Type */ + +struct ipa_ipfltr_mask_eq_32_type_v01 { + uint8_t offset; + /* Specifies the offset either from IHL or from the start of + * the IP packet. This depends on the equation that this structure + * is used in. + */ + + uint32_t mask; + /* Specifies the mask that has to be used in the comparison. + * The field is ANDed with the mask and compared against the value. + */ + + uint32_t value; + /* Specifies the 32 bit value that used in the comparison. */ +}; /* Type */ + +struct ipa_ipfltr_eq_16_type_v01 { + uint8_t offset; + /* Specifies the offset into the packet */ + + uint16_t value; + /* Specifies the 16 bit value that should be used in the comparison. */ +}; /* Type */ + +struct ipa_ipfltr_eq_32_type_v01 { + uint8_t offset; + /* Specifies the offset into the packet */ + + uint32_t value; + /* Specifies the 32 bit value that should be used in the comparison. */ +}; /* Type */ + +struct ipa_ipfltr_mask_eq_128_type_v01 { + uint8_t offset; + /* Specifies the offset into the packet */ + + uint8_t mask[16]; + /* Specifies the mask that has to be used in the comparison. + * The field is ANDed with the mask and compared against the value. + */ + + uint8_t value[16]; + /* Specifies the 128 bit value that should be used in the comparison. */ +}; /* Type */ + + +struct ipa_filter_rule_type_v01 { + uint16_t rule_eq_bitmap; + /* 16-bit Bitmask to indicate how many eqs are valid in this rule */ + + uint8_t tos_eq_present; + /* Specifies if a type of service check rule is present */ + + uint8_t tos_eq; + /* The value to check against the type of service (ipv4) field */ + + uint8_t protocol_eq_present; + /* Specifies if a protocol check rule is present */ + + uint8_t protocol_eq; + /* The value to check against the protocol field */ + + uint8_t num_ihl_offset_range_16; + /* The number of 16 bit range check rules at the location + * determined by IP header length plus a given offset offset + * in this rule. See the definition of the ipa_filter_range_eq_16 + * for better understanding. The value of this field cannot exceed + * IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS which is set as 2 + */ + + struct ipa_ipfltr_range_eq_16_type_v01 + ihl_offset_range_16[QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01]; + /* Array of the registered IP header length offset 16 bit range + * check rules. + */ + + uint8_t num_offset_meq_32; + /* The number of 32 bit masked comparison rules present + * in this rule + */ + + struct ipa_ipfltr_mask_eq_32_type_v01 + offset_meq_32[QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01]; + /* An array of all the possible 32bit masked comparison rules + * in this rule + */ + + uint8_t tc_eq_present; + /* Specifies if the traffic class rule is present in this rule */ + + uint8_t tc_eq; + /* The value against which the IPV4 traffic class field has to + * be checked + */ + + uint8_t flow_eq_present; + /* Specifies if the "flow equals" rule is present in this rule */ + + uint32_t flow_eq; + /* The value against which the IPV6 flow field has to be checked */ + + uint8_t ihl_offset_eq_16_present; + /* Specifies if there is a 16 bit comparison required at the + * location in the packet determined by "Intenet Header length + * + specified offset" + */ + + struct ipa_ipfltr_eq_16_type_v01 ihl_offset_eq_16; + /* The 16 bit comparison equation */ + + uint8_t ihl_offset_eq_32_present; + /* Specifies if there is a 32 bit comparison required at the + * location in the packet determined by "Intenet Header length + * + specified offset" + */ + + struct ipa_ipfltr_eq_32_type_v01 ihl_offset_eq_32; + /* The 32 bit comparison equation */ + + uint8_t num_ihl_offset_meq_32; + /* The number of 32 bit masked comparison equations in this + * rule. The location of the packet to be compared is + * determined by the IP Header length + the give offset + */ + + struct ipa_ipfltr_mask_eq_32_type_v01 + ihl_offset_meq_32[QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01]; + /* Array of 32 bit masked comparison equations. + */ + + uint8_t num_offset_meq_128; + /* The number of 128 bit comparison equations in this rule */ + + struct ipa_ipfltr_mask_eq_128_type_v01 + offset_meq_128[QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01]; + /* Array of 128 bit comparison equations. The location in the + * packet is determined by the specified offset + */ + + uint8_t metadata_meq32_present; + /* Boolean indicating if the 32 bit masked comparison equation + * is present or not. Comparison is done against the metadata + * in IPA. Metadata can either be extracted from the packet + * header or from the "metadata" register. + */ + + struct ipa_ipfltr_mask_eq_32_type_v01 + metadata_meq32; + /* The metadata 32 bit masked comparison equation */ + + uint8_t ipv4_frag_eq_present; + /* Specifies if the IPv4 Fragment equation is present in this rule */ +}; /* Type */ + + +enum ipa_ip_type_enum_v01 { + IPA_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647, + /* To force a 32 bit signed enum. Do not change or use*/ + QMI_IPA_IP_TYPE_INVALID_V01 = 0, + /* Invalid IP type identifier */ + QMI_IPA_IP_TYPE_V4_V01 = 1, + /* IP V4 type */ + QMI_IPA_IP_TYPE_V6_V01 = 2, + /* IP V6 type */ + QMI_IPA_IP_TYPE_V4V6_V01 = 3, + /* Applies to both IP types */ + IPA_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647 + /* To force a 32 bit signed enum. Do not change or use*/ +}; + + +enum ipa_filter_action_enum_v01 { + IPA_FILTER_ACTION_ENUM_MIN_ENUM_VAL_V01 = -2147483647, + /* To force a 32 bit signed enum. Do not change or use */ + QMI_IPA_FILTER_ACTION_INVALID_V01 = 0, + /* Invalid action on filter hit */ + QMI_IPA_FILTER_ACTION_SRC_NAT_V01 = 1, + /* Pass packet to NAT block for Source NAT */ + QMI_IPA_FILTER_ACTION_DST_NAT_V01 = 2, + /* Pass packet to NAT block for Destination NAT */ + QMI_IPA_FILTER_ACTION_ROUTING_V01 = 3, + /* Pass packet to Routing block */ + QMI_IPA_FILTER_ACTION_EXCEPTION_V01 = 4, + /* Treat packet as exception and send to exception pipe */ + IPA_FILTER_ACTION_ENUM_MAX_ENUM_VAL_V01 = 2147483647 + /* To force a 32 bit signed enum. Do not change or use*/ +}; + +struct ipa_filter_spec_type_v01 { + uint32_t filter_spec_identifier; + /* This field is used to identify a filter spec in the list + * of filter specs being sent from the client. This field + * is applicable only in the filter install request and response. + */ + + enum ipa_ip_type_enum_v01 ip_type; + /* This field identifies the IP type for which this rule is + * applicable. The driver needs to identify the filter table + * (V6 or V4) and this field is essential for that + */ + + struct ipa_filter_rule_type_v01 filter_rule; + /* This field specifies the rules in the filter spec. These rules + * are the ones that are matched against fields in the packet. + */ + + enum ipa_filter_action_enum_v01 filter_action; + /* This field specifies the action to be taken when a filter match + * occurs. The remote side should install this information into the + * hardware along with the filter equations. + */ + + uint8_t is_routing_table_index_valid; + /* Specifies whether the routing table index is present or not. + * If the action is "QMI_IPA_FILTER_ACTION_EXCEPTION", this + * parameter need not be provided. + */ + + uint32_t route_table_index; + /* This is the index in the routing table that should be used + * to route the packets if the filter rule is hit + */ + + uint8_t is_mux_id_valid; + /* Specifies whether the mux_id is valid */ + + uint32_t mux_id; + /* This field identifies the QMAP MUX ID. As a part of QMAP + * protocol, several data calls may be multiplexed over the + * same physical transport channel. This identifier is used to + * identify one such data call. The maximum value for this + * identifier is 255. + */ +}; /* Type */ + +struct ipa_filter_spec_ex_type_v01 { + enum ipa_ip_type_enum_v01 ip_type; + /* This field identifies the IP type for which this rule is + * applicable. The driver needs to identify the filter table + * (V6 or V4) and this field is essential for that + */ + + struct ipa_filter_rule_type_v01 filter_rule; + /* This field specifies the rules in the filter spec. These rules + * are the ones that are matched against fields in the packet. + */ + + enum ipa_filter_action_enum_v01 filter_action; + /* This field specifies the action to be taken when a filter match + * occurs. The remote side should install this information into the + * hardware along with the filter equations. + */ + + uint8_t is_routing_table_index_valid; + /* Specifies whether the routing table index is present or not. + * If the action is "QMI_IPA_FILTER_ACTION_EXCEPTION", this + * parameter need not be provided. + */ + + uint32_t route_table_index; + /* This is the index in the routing table that should be used + * to route the packets if the filter rule is hit + */ + + uint8_t is_mux_id_valid; + /* Specifies whether the mux_id is valid */ + + uint32_t mux_id; + /* This field identifies the QMAP MUX ID. As a part of QMAP + * protocol, several data calls may be multiplexed over the + * same physical transport channel. This identifier is used to + * identify one such data call. The maximum value for this + * identifier is 255. + */ + + uint32_t rule_id; + /* Rule Id of the given filter. The Rule Id is populated in the rule + * header when installing the rule in IPA. + */ + + uint8_t is_rule_hashable; + /** Specifies whether the given rule is hashable. + */ +}; /* Type */ + + +/* Request Message; This is the message that is exchanged between the + * control point and the service in order to request the installation + * of filtering rules in the hardware block by the remote side. + */ +struct ipa_install_fltr_rule_req_msg_v01 { + /* Optional + * IP type that this rule applies to + * Filter specification to be installed in the hardware + */ + uint8_t filter_spec_list_valid; + /* Must be set to true if filter_spec_list is being passed */ + uint32_t filter_spec_list_len; + /* Must be set to # of elements in filter_spec_list */ + struct ipa_filter_spec_type_v01 + filter_spec_list[QMI_IPA_MAX_FILTERS_V01]; + /* This structure defines the list of filters that have + * to be installed in the hardware. The driver installing + * these rules shall do so in the same order as specified + * in this list. + */ + + /* Optional */ + /* Pipe index to intall rule */ + uint8_t source_pipe_index_valid; + /* Must be set to true if source_pipe_index is being passed */ + uint32_t source_pipe_index; + /* This is the source pipe on which the filter rule is to be + * installed. The requestor may always not know the pipe + * indices. If not specified, the receiver shall install + * this rule on all the pipes that it controls through + * which data may be fed into IPA. + */ + + /* Optional */ + /* Total number of IPv4 filters in the filter spec list */ + uint8_t num_ipv4_filters_valid; + /* Must be set to true if num_ipv4_filters is being passed */ + uint32_t num_ipv4_filters; + /* Number of IPv4 rules included in filter spec list */ + + /* Optional */ + /* Total number of IPv6 filters in the filter spec list */ + uint8_t num_ipv6_filters_valid; + /* Must be set to true if num_ipv6_filters is being passed */ + uint32_t num_ipv6_filters; + /* Number of IPv6 rules included in filter spec list */ + + /* Optional */ + /* List of XLAT filter indices in the filter spec list */ + uint8_t xlat_filter_indices_list_valid; + /* Must be set to true if xlat_filter_indices_list + * is being passed + */ + uint32_t xlat_filter_indices_list_len; + /* Must be set to # of elements in xlat_filter_indices_list */ + uint32_t xlat_filter_indices_list[QMI_IPA_MAX_FILTERS_V01]; + /* List of XLAT filter indices. Filter rules at specified indices + * will need to be modified by the receiver if the PDN is XLAT + * before installing them on the associated IPA consumer pipe. + */ + + /* Optional */ + /* Extended Filter Specification */ + uint8_t filter_spec_ex_list_valid; + /* Must be set to true if filter_spec_ex_list is being passed */ + uint32_t filter_spec_ex_list_len; + /* Must be set to # of elements in filter_spec_ex_list */ + struct ipa_filter_spec_ex_type_v01 + filter_spec_ex_list[QMI_IPA_MAX_FILTERS_V01]; + /* + * List of filter specifications of filters that must be installed in + * the IPAv3.x hardware. + * The driver installing these rules must do so in the same + * order as specified in this list. + */ +}; /* Message */ + +struct ipa_filter_rule_identifier_to_handle_map_v01 { + uint32_t filter_spec_identifier; + /* This field is used to identify a filter spec in the list of + * filter specs being sent from the client. This field is + * applicable only in the filter install request and response. + */ + uint32_t filter_handle; + /* This field is used to identify a rule in any subsequent message. + * This is a value that is provided by the server to the control + * point + */ +}; /* Type */ + +/* Response Message; This is the message that is exchanged between the + * control point and the service in order to request the + * installation of filtering rules in the hardware block by + * the remote side. + */ +struct ipa_install_fltr_rule_resp_msg_v01 { + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /* Standard response type. + * Standard response type. Contains the following data members: + * - qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE + * - qmi_error_type -- Error code. Possible error code values are + * described in the error codes section of each message definition. + */ + + /* Optional */ + /* Filter Handle List */ + uint8_t filter_handle_list_valid; + /* Must be set to true if filter_handle_list is being passed */ + uint32_t filter_handle_list_len; + /* Must be set to # of elements in filter_handle_list */ + struct ipa_filter_rule_identifier_to_handle_map_v01 + filter_handle_list[QMI_IPA_MAX_FILTERS_V01]; + /* + * List of handles returned to the control point. Each handle is + * mapped to the rule identifier that was specified in the + * request message. Any further reference to the rule is done + * using the filter handle. + */ + + /* Optional */ + /* Rule id List */ + uint8_t rule_id_valid; + /* Must be set to true if rule_id is being passed */ + uint32_t rule_id_len; + /* Must be set to # of elements in rule_id */ + uint32_t rule_id[QMI_IPA_MAX_FILTERS_V01]; + /* + * List of rule ids returned to the control point. + * Any further reference to the rule is done using the + * filter rule id specified in this list. + */ +}; /* Message */ + +struct ipa_filter_handle_to_index_map_v01 { + uint32_t filter_handle; + /* This is a handle that was given to the remote client that + * requested the rule addition. + */ + uint32_t filter_index; + /* This index denotes the location in a filter table, where the + * filter rule has been installed. The maximum value of this + * field is 64. + */ +}; /* Type */ + +/* Request Message; This is the message that is exchanged between the + * control point and the service in order to notify the remote driver + * of the installation of the filter rule supplied earlier by the + * remote driver. + */ +struct ipa_fltr_installed_notif_req_msg_v01 { + /* Mandatory */ + /* Pipe index */ + uint32_t source_pipe_index; + /* This is the source pipe on which the filter rule has been + * installed or was attempted to be installed + */ + + /* Mandatory */ + /* Installation Status */ + enum ipa_qmi_result_type_v01 install_status; + /* This is the status of installation. If this indicates + * SUCCESS, other optional fields carry additional + * information + */ + + /* Mandatory */ + /* List of Filter Indices */ + uint32_t filter_index_list_len; + /* Must be set to # of elements in filter_index_list */ + struct ipa_filter_handle_to_index_map_v01 + filter_index_list[QMI_IPA_MAX_FILTERS_V01]; + /* + * Provides the list of filter indices and the corresponding + * filter handle. If the installation_status indicates a + * failure, the filter indices must be set to a reserve + * index (255). + */ + + /* Optional */ + /* Embedded pipe index */ + uint8_t embedded_pipe_index_valid; + /* Must be set to true if embedded_pipe_index is being passed */ + uint32_t embedded_pipe_index; + /* This index denotes the embedded pipe number on which a call to + * the same PDN has been made. If this field is set, it denotes + * that this is a use case where PDN sharing is happening. The + * embedded pipe is used to send data from the embedded client + * in the device + */ + + /* Optional */ + /* Retain Header Configuration */ + uint8_t retain_header_valid; + /* Must be set to true if retain_header is being passed */ + uint8_t retain_header; + /* This field indicates if the driver installing the rule has + * turned on the "retain header" bit. If this is true, the + * header that is removed by IPA is reinserted after the + * packet processing is completed. + */ + + /* Optional */ + /* Embedded call Mux Id */ + uint8_t embedded_call_mux_id_valid; + /**< Must be set to true if embedded_call_mux_id is being passed */ + uint32_t embedded_call_mux_id; + /* This identifies one of the many calls that have been originated + * on the embedded pipe. This is how we identify the PDN gateway + * to which traffic from the source pipe has to flow. + */ + + /* Optional */ + /* Total number of IPv4 filters in the filter index list */ + uint8_t num_ipv4_filters_valid; + /* Must be set to true if num_ipv4_filters is being passed */ + uint32_t num_ipv4_filters; + /* Number of IPv4 rules included in filter index list */ + + /* Optional */ + /* Total number of IPv6 filters in the filter index list */ + uint8_t num_ipv6_filters_valid; + /* Must be set to true if num_ipv6_filters is being passed */ + uint32_t num_ipv6_filters; + /* Number of IPv6 rules included in filter index list */ + + /* Optional */ + /* Start index on IPv4 filters installed on source pipe */ + uint8_t start_ipv4_filter_idx_valid; + /* Must be set to true if start_ipv4_filter_idx is being passed */ + uint32_t start_ipv4_filter_idx; + /* Start index of IPv4 rules in filter index list */ + + /* Optional */ + /* Start index on IPv6 filters installed on source pipe */ + uint8_t start_ipv6_filter_idx_valid; + /* Must be set to true if start_ipv6_filter_idx is being passed */ + uint32_t start_ipv6_filter_idx; + /* Start index of IPv6 rules in filter index list */ + + /* Optional */ + /* List of Rule Ids */ + uint8_t rule_id_valid; + /* Must be set to true if rule_id is being passed */ + uint32_t rule_id_len; + /* Must be set to # of elements in rule_id */ + uint32_t rule_id[QMI_IPA_MAX_FILTERS_V01]; + /* + * Provides the list of Rule Ids of rules added in IPA on the given + * source pipe index. If the install_status TLV indicates a + * failure, the Rule Ids in this list must be set to a reserved + * index (255). + */ +}; /* Message */ + +/* Response Message; This is the message that is exchanged between the + * control point and the service in order to notify the remote driver + * of the installation of the filter rule supplied earlier by the + * remote driver. + */ +struct ipa_fltr_installed_notif_resp_msg_v01 { + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /* Standard response type */ +}; /* Message */ + +/* Request Message; Notifies the remote driver of the need to clear the data + * path to prevent the IPA from being blocked at the head of the processing + * pipeline + */ +struct ipa_enable_force_clear_datapath_req_msg_v01 { + /* Mandatory */ + /* Pipe Mask */ + uint32_t source_pipe_bitmask; + /* Set of consumer (source) pipes that must be clear of + * active data transfers. + */ + + /* Mandatory */ + /* Request ID */ + uint32_t request_id; + /* Identifies the ID of the request that is sent to the server + * The same request ID is used in the message to remove the force_clear + * request. The server is expected to keep track of the request ID and + * the source_pipe_bitmask so that it can revert as needed + */ + + /* Optional */ + /* Source Throttle State */ + uint8_t throttle_source_valid; + /* Must be set to true if throttle_source is being passed */ + uint8_t throttle_source; + /* Specifies whether the server is to throttle the data from + * these consumer (source) pipes after clearing the exisiting + * data present in the IPA that were pulled from these pipes + * The server is expected to put all the source pipes in the + * source_pipe_bitmask in the same state + */ +}; /* Message */ + +/* Response Message; Notifies the remote driver of the need to clear the + * data path to prevent the IPA from being blocked at the head of the + * processing pipeline + */ +struct ipa_enable_force_clear_datapath_resp_msg_v01 { + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /* Standard response type */ +}; /* Message */ + +/* Request Message; Notifies the remote driver that the forceful clearing + * of the data path can be lifted + */ +struct ipa_disable_force_clear_datapath_req_msg_v01 { + /* Mandatory */ + /* Request ID */ + uint32_t request_id; + /* Identifies the request that was sent to the server to + * forcibly clear the data path. This request simply undoes + * the operation done in that request + */ +}; /* Message */ + +/* Response Message; Notifies the remote driver that the forceful clearing + * of the data path can be lifted + */ +struct ipa_disable_force_clear_datapath_resp_msg_v01 { + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /* Standard response type */ +}; /* Message */ + +enum ipa_peripheral_speed_enum_v01 { + IPA_PERIPHERAL_SPEED_ENUM_MIN_ENUM_VAL_V01 = -2147483647, + /* To force a 32 bit signed enum. Do not change or use */ + QMI_IPA_PER_USB_FS_V01 = 1, + /* Full-speed USB connection */ + QMI_IPA_PER_USB_HS_V01 = 2, + /* High-speed USB connection */ + QMI_IPA_PER_USB_SS_V01 = 3, + /* Super-speed USB connection */ + QMI_IPA_PER_WLAN_V01 = 4, + /* WLAN connection */ + IPA_PERIPHERAL_SPEED_ENUM_MAX_ENUM_VAL_V01 = 2147483647 + /* To force a 32 bit signed enum. Do not change or use*/ +}; + +enum ipa_pipe_mode_enum_v01 { + IPA_PIPE_MODE_ENUM_MIN_ENUM_VAL_V01 = -2147483647, + /* To force a 32 bit signed enum. Do not change or use */ + QMI_IPA_PIPE_MODE_HW_V01 = 1, + /* Pipe is connected with a hardware block */ + QMI_IPA_PIPE_MODE_SW_V01 = 2, + /* Pipe is controlled by the software */ + IPA_PIPE_MODE_ENUM_MAX_ENUM_VAL_V01 = 2147483647 + /* To force a 32 bit signed enum. Do not change or use */ +}; + +enum ipa_peripheral_type_enum_v01 { + IPA_PERIPHERAL_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647, + /* To force a 32 bit signed enum. Do not change or use */ + QMI_IPA_PERIPHERAL_USB_V01 = 1, + /* Specifies a USB peripheral */ + QMI_IPA_PERIPHERAL_HSIC_V01 = 2, + /* Specifies an HSIC peripheral */ + QMI_IPA_PERIPHERAL_PCIE_V01 = 3, + /* Specifies a PCIe peripheral */ + IPA_PERIPHERAL_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647 + /* To force a 32 bit signed enum. Do not change or use */ +}; + +struct ipa_config_req_msg_v01 { + /* Optional */ + /* Peripheral Type */ + uint8_t peripheral_type_valid; + /* Must be set to true if peripheral_type is being passed */ + enum ipa_peripheral_type_enum_v01 peripheral_type; + /* Informs the remote driver about the perhipheral for + * which this configuration information is relevant. Values: + * - QMI_IPA_PERIPHERAL_USB (1) -- Specifies a USB peripheral + * - QMI_IPA_PERIPHERAL_HSIC(2) -- Specifies an HSIC peripheral + * - QMI_IPA_PERIPHERAL_PCIE(3) -- Specifies a PCIe peripheral + */ + + /* Optional */ + /* HW Deaggregation Support */ + uint8_t hw_deaggr_supported_valid; + /* Must be set to true if hw_deaggr_supported is being passed */ + uint8_t hw_deaggr_supported; + /* Informs the remote driver whether the local IPA driver + * allows de-aggregation to be performed in the hardware + */ + + /* Optional */ + /* Maximum Aggregation Frame Size */ + uint8_t max_aggr_frame_size_valid; + /* Must be set to true if max_aggr_frame_size is being passed */ + uint32_t max_aggr_frame_size; + /* Specifies the maximum size of the aggregated frame that + * the remote driver can expect from this execution environment + * - Valid range: 128 bytes to 32768 bytes + */ + + /* Optional */ + /* IPA Ingress Pipe Mode */ + uint8_t ipa_ingress_pipe_mode_valid; + /* Must be set to true if ipa_ingress_pipe_mode is being passed */ + + enum ipa_pipe_mode_enum_v01 ipa_ingress_pipe_mode; + /* Indicates to the remote driver if the ingress pipe into the + * IPA is in direct connection with another hardware block or + * if the producer of data to this ingress pipe is a software + * module. Values: + * -QMI_IPA_PIPE_MODE_HW(1) --Pipe is connected with hardware block + * -QMI_IPA_PIPE_MODE_SW(2) --Pipe is controlled by the software + */ + + /* Optional */ + /* Peripheral Speed Info */ + uint8_t peripheral_speed_info_valid; + /* Must be set to true if peripheral_speed_info is being passed */ + + enum ipa_peripheral_speed_enum_v01 peripheral_speed_info; + /* Indicates the speed that the peripheral connected to the IPA supports + * Values: + * - QMI_IPA_PER_USB_FS (1) -- Full-speed USB connection + * - QMI_IPA_PER_USB_HS (2) -- High-speed USB connection + * - QMI_IPA_PER_USB_SS (3) -- Super-speed USB connection + * - QMI_IPA_PER_WLAN (4) -- WLAN connection + */ + + /* Optional */ + /* Downlink Accumulation Time limit */ + uint8_t dl_accumulation_time_limit_valid; + /* Must be set to true if dl_accumulation_time_limit is being passed */ + uint32_t dl_accumulation_time_limit; + /* Informs the remote driver about the time for which data + * is accumulated in the downlink direction before it is pushed into the + * IPA (downlink is with respect to the WWAN air interface) + * - Units: milliseconds + * - Maximum value: 255 + */ + + /* Optional */ + /* Downlink Accumulation Packet limit */ + uint8_t dl_accumulation_pkt_limit_valid; + /* Must be set to true if dl_accumulation_pkt_limit is being passed */ + uint32_t dl_accumulation_pkt_limit; + /* Informs the remote driver about the number of packets + * that are to be accumulated in the downlink direction before it is + * pushed into the IPA - Maximum value: 1023 + */ + + /* Optional */ + /* Downlink Accumulation Byte Limit */ + uint8_t dl_accumulation_byte_limit_valid; + /* Must be set to true if dl_accumulation_byte_limit is being passed */ + uint32_t dl_accumulation_byte_limit; + /* Inform the remote driver about the number of bytes + * that are to be accumulated in the downlink direction before it + * is pushed into the IPA - Maximum value: TBD + */ + + /* Optional */ + /* Uplink Accumulation Time Limit */ + uint8_t ul_accumulation_time_limit_valid; + /* Must be set to true if ul_accumulation_time_limit is being passed */ + uint32_t ul_accumulation_time_limit; + /* Inform thes remote driver about the time for which data + * is to be accumulated in the uplink direction before it is pushed into + * the IPA (downlink is with respect to the WWAN air interface). + * - Units: milliseconds + * - Maximum value: 255 + */ + + /* Optional */ + /* HW Control Flags */ + uint8_t hw_control_flags_valid; + /* Must be set to true if hw_control_flags is being passed */ + uint32_t hw_control_flags; + /* Informs the remote driver about the hardware control flags: + * - Bit 0: IPA_HW_FLAG_HALT_SYSTEM_ON_NON_TERMINAL_FAILURE -- + * Indicates to the hardware that it must not continue with + * any subsequent operation even if the failure is not terminal + * - Bit 1: IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR -- + * Indicates to the hardware that it is not required to report + * channel errors to the host. + * - Bit 2: IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP -- + * Indicates to the hardware that it is not required to generate + * wake-up events to the host. + * - Bit 4: IPA_HW_FLAG_WORK_OVER_DDR -- + * Indicates to the hardware that it is accessing addresses in + * the DDR and not over PCIe + * - Bit 5: IPA_HW_FLAG_INTERRUPT_MODE_CTRL_FLAG -- + * Indicates whether the device must + * raise an event to let the host know that it is going into an + * interrupt mode (no longer polling for data/buffer availability) + */ + + /* Optional */ + /* Uplink MSI Event Threshold */ + uint8_t ul_msi_event_threshold_valid; + /* Must be set to true if ul_msi_event_threshold is being passed */ + uint32_t ul_msi_event_threshold; + /* Informs the remote driver about the threshold that will + * cause an interrupt (MSI) to be fired to the host. This ensures + * that the remote driver does not accumulate an excesive number of + * events before firing an interrupt. + * This threshold is applicable for data moved in the UL direction. + * - Maximum value: 65535 + */ + + /* Optional */ + /* Downlink MSI Event Threshold */ + uint8_t dl_msi_event_threshold_valid; + /* Must be set to true if dl_msi_event_threshold is being passed */ + uint32_t dl_msi_event_threshold; + /* Informs the remote driver about the threshold that will + * cause an interrupt (MSI) to be fired to the host. This ensures + * that the remote driver does not accumulate an excesive number of + * events before firing an interrupt + * This threshold is applicable for data that is moved in the + * DL direction - Maximum value: 65535 + */ + + /* Optional */ + /* Uplink Fifo Size */ + uint8_t ul_fifo_size_valid; + /* Must be set to true if ul_fifo_size is being passed */ + uint32_t ul_fifo_size; + /* + * Informs the remote driver about the total Uplink xDCI + * buffer size that holds the complete aggregated frame + * or BAM data fifo size of the peripheral channel/pipe(in Bytes). + * This deprecates the max_aggr_frame_size field. This TLV + * deprecates max_aggr_frame_size TLV from version 1.9 onwards + * and the max_aggr_frame_size TLV will be ignored in the presence + * of this TLV. + */ + + /* Optional */ + /* Downlink Fifo Size */ + uint8_t dl_fifo_size_valid; + /* Must be set to true if dl_fifo_size is being passed */ + uint32_t dl_fifo_size; + /* + * Informs the remote driver about the total Downlink xDCI buffering + * capacity or BAM data fifo size of the peripheral channel/pipe. + * (In Bytes). dl_fifo_size = n * dl_buf_size. This deprecates the + * max_aggr_frame_size field. If this value is set + * max_aggr_frame_size is ignored. + */ + + /* Optional */ + /* Downlink Buffer Size */ + uint8_t dl_buf_size_valid; + /* Must be set to true if dl_buf_size is being passed */ + uint32_t dl_buf_size; + /* Informs the remote driver about the single xDCI buffer size. + * This is applicable only in GSI mode(in Bytes).\n + */ +}; /* Message */ + +/* Response Message; Notifies the remote driver of the configuration + * information + */ +struct ipa_config_resp_msg_v01 { + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /**< Standard response type.*/ +}; /* Message */ + +enum ipa_stats_type_enum_v01 { + IPA_STATS_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647, + /* To force a 32 bit signed enum. Do not change or use */ + QMI_IPA_STATS_TYPE_INVALID_V01 = 0, + /* Invalid stats type identifier */ + QMI_IPA_STATS_TYPE_PIPE_V01 = 1, + /* Pipe stats type */ + QMI_IPA_STATS_TYPE_FILTER_RULES_V01 = 2, + /* Filter rule stats type */ + IPA_STATS_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647 + /* To force a 32 bit signed enum. Do not change or use */ +}; + +struct ipa_pipe_stats_info_type_v01 { + uint32_t pipe_index; + /* Pipe index for statistics to be retrieved. */ + + uint64_t num_ipv4_packets; + /* Accumulated number of IPv4 packets over this pipe. */ + + uint64_t num_ipv4_bytes; + /* Accumulated number of IPv4 bytes over this pipe. */ + + uint64_t num_ipv6_packets; + /* Accumulated number of IPv6 packets over this pipe. */ + + uint64_t num_ipv6_bytes; + /* Accumulated number of IPv6 bytes over this pipe. */ +}; + +struct ipa_stats_type_filter_rule_v01 { + uint32_t filter_rule_index; + /* Filter rule index for statistics to be retrieved. */ + + uint64_t num_packets; + /* Accumulated number of packets over this filter rule. */ +}; + +/* Request Message; Retrieve the data statistics collected on modem + * IPA driver. + */ +struct ipa_get_data_stats_req_msg_v01 { + /* Mandatory */ + /* Stats Type */ + enum ipa_stats_type_enum_v01 ipa_stats_type; + /* Indicates the type of statistics to be retrieved. */ + + /* Optional */ + /* Reset Statistics */ + uint8_t reset_stats_valid; + /* Must be set to true if reset_stats is being passed */ + uint8_t reset_stats; + /* Option to reset the specific type of data statistics + * currently collected. + */ +}; /* Message */ + +/* Response Message; Retrieve the data statistics collected + * on modem IPA driver. + */ +struct ipa_get_data_stats_resp_msg_v01 { + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /* Standard response type. */ + + /* Optional */ + /* Stats Type */ + uint8_t ipa_stats_type_valid; + /* Must be set to true if ipa_stats_type is passed */ + enum ipa_stats_type_enum_v01 ipa_stats_type; + /* Indicates the type of statistics that are retrieved. */ + + /* Optional */ + /* Uplink Source Pipe Statistics List */ + uint8_t ul_src_pipe_stats_list_valid; + /* Must be set to true if ul_src_pipe_stats_list is being passed */ + uint32_t ul_src_pipe_stats_list_len; + /* Must be set to # of elements in ul_src_pipe_stats_list */ + struct ipa_pipe_stats_info_type_v01 + ul_src_pipe_stats_list[QMI_IPA_MAX_PIPES_V01]; + /* List of all Uplink pipe statistics that are retrieved. */ + + /* Optional */ + /* Downlink Destination Pipe Statistics List */ + uint8_t dl_dst_pipe_stats_list_valid; + /* Must be set to true if dl_dst_pipe_stats_list is being passed */ + uint32_t dl_dst_pipe_stats_list_len; + /* Must be set to # of elements in dl_dst_pipe_stats_list */ + struct ipa_pipe_stats_info_type_v01 + dl_dst_pipe_stats_list[QMI_IPA_MAX_PIPES_V01]; + /* List of all Downlink pipe statistics that are retrieved. */ + + /* Optional */ + /* Downlink Filter Rule Stats List */ + uint8_t dl_filter_rule_stats_list_valid; + /* Must be set to true if dl_filter_rule_stats_list is being passed */ + uint32_t dl_filter_rule_stats_list_len; + /* Must be set to # of elements in dl_filter_rule_stats_list */ + struct ipa_stats_type_filter_rule_v01 + dl_filter_rule_stats_list[QMI_IPA_MAX_FILTERS_V01]; + /* List of all Downlink filter rule statistics retrieved. */ +}; /* Message */ + +struct ipa_apn_data_stats_info_type_v01 { + uint32_t mux_id; + /* Indicates the MUX ID associated with the APN for which the data + * usage statistics is queried + */ + + uint64_t num_ul_packets; + /* Accumulated number of uplink packets corresponding to + * this Mux ID + */ + + uint64_t num_ul_bytes; + /* Accumulated number of uplink bytes corresponding to + * this Mux ID + */ + + uint64_t num_dl_packets; + /* Accumulated number of downlink packets corresponding + * to this Mux ID + */ + + uint64_t num_dl_bytes; + /* Accumulated number of downlink bytes corresponding to + * this Mux ID + */ +}; /* Type */ + +/* Request Message; Retrieve the APN data statistics collected from modem */ +struct ipa_get_apn_data_stats_req_msg_v01 { + /* Optional */ + /* Mux ID List */ + uint8_t mux_id_list_valid; + /* Must be set to true if mux_id_list is being passed */ + uint32_t mux_id_list_len; + /* Must be set to # of elements in mux_id_list */ + uint32_t mux_id_list[QMI_IPA_MAX_APN_V01]; + /* The list of MUX IDs associated with APNs for which the data usage + * statistics is being retrieved + */ +}; /* Message */ + +/* Response Message; Retrieve the APN data statistics collected from modem */ +struct ipa_get_apn_data_stats_resp_msg_v01 { + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /* Standard response type.*/ + + /* Optional */ + /* APN Data Statistics List */ + uint8_t apn_data_stats_list_valid; + /* Must be set to true if apn_data_stats_list is being passed */ + uint32_t apn_data_stats_list_len; + /* Must be set to # of elements in apn_data_stats_list */ + struct ipa_apn_data_stats_info_type_v01 + apn_data_stats_list[QMI_IPA_MAX_APN_V01]; + /* List of APN data retrieved as per request on mux_id. + * For now, only one APN monitoring is supported on modem driver. + * Making this as list for expandability to support more APNs in future. + */ +}; /* Message */ + +struct ipa_data_usage_quota_info_type_v01 { + uint32_t mux_id; + /* Indicates the MUX ID associated with the APN for which the data usage + * quota needs to be set + */ + + uint64_t num_Mbytes; + /* Number of Mega-bytes of quota value to be set on this APN associated + * with this Mux ID. + */ +}; /* Type */ + +/* Request Message; Master driver sets a data usage quota value on + * modem driver + */ +struct ipa_set_data_usage_quota_req_msg_v01 { + /* Optional */ + /* APN Quota List */ + uint8_t apn_quota_list_valid; + /* Must be set to true if apn_quota_list is being passed */ + uint32_t apn_quota_list_len; + /* Must be set to # of elements in apn_quota_list */ + struct ipa_data_usage_quota_info_type_v01 + apn_quota_list[QMI_IPA_MAX_APN_V01]; + /* The list of APNs on which a data usage quota to be set on modem + * driver. For now, only one APN monitoring is supported on modem + * driver. Making this as list for expandability to support more + * APNs in future. + */ +}; /* Message */ + +/* Response Message; Master driver sets a data usage on modem driver. */ +struct ipa_set_data_usage_quota_resp_msg_v01 { + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /* Standard response type.*/ +}; /* Message */ + +/* Indication Message; Modem driver sends this indication to master + * driver when the data usage quota is reached + */ +struct ipa_data_usage_quota_reached_ind_msg_v01 { + /* Mandatory */ + /* APN Quota List */ + struct ipa_data_usage_quota_info_type_v01 apn; + /* This message indicates which APN has the previously set quota + * reached. For now, only one APN monitoring is supported on modem + * driver. + */ +}; /* Message */ + +/* Request Message; Master driver request modem driver to terminate + * the current data usage quota monitoring session. + */ +struct ipa_stop_data_usage_quota_req_msg_v01 { + /* This element is a placeholder to prevent the declaration of + * an empty struct. DO NOT USE THIS FIELD UNDER ANY CIRCUMSTANCE + */ + char __placeholder; +}; /* Message */ + +/* Response Message; Master driver request modem driver to terminate + * the current quota monitoring session. + */ +struct ipa_stop_data_usage_quota_resp_msg_v01 { + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /**< Standard response type.*/ +}; /* Message */ + +/* Request Message; Request from Modem IPA driver to set DPL peripheral pipe */ +struct ipa_install_fltr_rule_req_ex_msg_v01 { + + /* Optional */ + /* Extended Filter Specification */ + uint8_t filter_spec_ex_list_valid; + uint32_t filter_spec_ex_list_len; + struct ipa_filter_spec_ex_type_v01 + filter_spec_ex_list[QMI_IPA_MAX_FILTERS_EX_V01]; + /* List of filter specifications of filters that must be installed in + * the IPAv3.x hardware. + * The driver installing these rules must do so in the same order as + * specified in this list. + */ + + /* Optional */ + /* Pipe Index to Install Rule */ + uint8_t source_pipe_index_valid; + uint32_t source_pipe_index; + /* Pipe index to install the filter rule. + * The requester may not always know the pipe indices. If not specified, + * the receiver must install this rule on all pipes that it controls, + * through which data may be fed into the IPA. + */ + + /* Optional */ + /* Total Number of IPv4 Filters in the Filter Spec List */ + uint8_t num_ipv4_filters_valid; + uint32_t num_ipv4_filters; + /* Number of IPv4 rules included in the filter specification list. */ + + /* Optional */ + /* Total Number of IPv6 Filters in the Filter Spec List */ + uint8_t num_ipv6_filters_valid; + uint32_t num_ipv6_filters; + /* Number of IPv6 rules included in the filter specification list. */ + + /* Optional */ + /* List of XLAT Filter Indices in the Filter Spec List */ + uint8_t xlat_filter_indices_list_valid; + uint32_t xlat_filter_indices_list_len; + uint32_t xlat_filter_indices_list[QMI_IPA_MAX_FILTERS_EX_V01]; + /* List of XLAT filter indices. + * Filter rules at specified indices must be modified by the + * receiver if the PDN is XLAT before installing them on the associated + * IPA consumer pipe. + */ +}; /* Message */ + +/* Response Message; Requests installation of filtering rules in the hardware + * block on the remote side. + */ +struct ipa_install_fltr_rule_resp_ex_msg_v01 { + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /* Standard response type. + * Standard response type. Contains the following data members: + * - qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE + * - qmi_error_type -- Error code. Possible error code values are + * described in the error codes + * section of each message + * definition. + */ + + /* Optional */ + /* Rule ID List */ + uint8_t rule_id_valid; + uint32_t rule_id_len; + uint32_t rule_id[QMI_IPA_MAX_FILTERS_EX_V01]; + /* List of rule IDs returned to the control point. + * Any further reference to the rule is done using the filter rule ID + * specified in this list. + */ +}; /* Message */ + +/*Service Message Definition*/ +#define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020 +#define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020 +#define QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 0x0021 +#define QMI_IPA_INIT_MODEM_DRIVER_RESP_V01 0x0021 +#define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01 0x0022 +#define QMI_IPA_INSTALL_FILTER_RULE_REQ_V01 0x0023 +#define QMI_IPA_INSTALL_FILTER_RULE_RESP_V01 0x0023 +#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01 0x0024 +#define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01 0x0024 +#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01 0x0025 +#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01 0x0025 +#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01 0x0026 +#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01 0x0026 +#define QMI_IPA_CONFIG_REQ_V01 0x0027 +#define QMI_IPA_CONFIG_RESP_V01 0x0027 +#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_REQ_V01 0x0028 +#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_RESP_V01 0x0028 +#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_REQ_V01 0x0029 +#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_RESP_V01 0x0029 +#define QMI_IPA_GET_DATA_STATS_REQ_V01 0x0030 +#define QMI_IPA_GET_DATA_STATS_RESP_V01 0x0030 +#define QMI_IPA_GET_APN_DATA_STATS_REQ_V01 0x0031 +#define QMI_IPA_GET_APN_DATA_STATS_RESP_V01 0x0031 +#define QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01 0x0032 +#define QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 0x0032 +#define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01 0x0033 +#define QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01 0x0034 +#define QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 0x0034 +#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01 0x0035 +#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01 0x0035 +#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01 0x0037 +#define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_V01 0x0037 + +/* add for max length*/ +#define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 134 +#define QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01 25 +#define QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01 8 +#define QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01 7 +#define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 22369 +#define QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01 783 +#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 834 +#define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01 7 +#define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01 7 +#define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01 15 + + +#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01 18 +#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01 7 +#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01 7 +#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01 7 + + +#define QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01 102 +#define QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01 7 +#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_REQ_MAX_MSG_LEN_V01 18 +#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_RESP_MAX_MSG_LEN_V01 7 +#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_REQ_MAX_MSG_LEN_V01 7 +#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_RESP_MAX_MSG_LEN_V01 7 +#define QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01 11 +#define QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01 2234 +#define QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01 36 +#define QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01 299 +#define QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01 100 +#define QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01 7 +#define QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01 0 +#define QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01 7 + +#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01 4 +#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_MAX_MSG_LEN_V01 7 + +#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01 22685 +#define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_MAX_MSG_LEN_V01 523 + +/* Service Object Accessor */ + +#endif/* IPA_QMI_SERVICE_V01_H */ diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h new file mode 100644 index 000000000000..6cc0cbb3ee62 --- /dev/null +++ b/include/uapi/linux/msm_ipa.h @@ -0,0 +1,1953 @@ +#ifndef _UAPI_MSM_IPA_H_ +#define _UAPI_MSM_IPA_H_ + +#ifndef __KERNEL__ +#include +#include +#include +#endif +#include +#include +#include + +/** + * unique magic number of the IPA device + */ +#define IPA_IOC_MAGIC 0xCF + +/** + * name of the default routing tables for v4 and v6 + */ +#define IPA_DFLT_RT_TBL_NAME "ipa_dflt_rt" + +/** + * the commands supported by IPA driver + */ +#define IPA_IOCTL_ADD_HDR 0 +#define IPA_IOCTL_DEL_HDR 1 +#define IPA_IOCTL_ADD_RT_RULE 2 +#define IPA_IOCTL_DEL_RT_RULE 3 +#define IPA_IOCTL_ADD_FLT_RULE 4 +#define IPA_IOCTL_DEL_FLT_RULE 5 +#define IPA_IOCTL_COMMIT_HDR 6 +#define IPA_IOCTL_RESET_HDR 7 +#define IPA_IOCTL_COMMIT_RT 8 +#define IPA_IOCTL_RESET_RT 9 +#define IPA_IOCTL_COMMIT_FLT 10 +#define IPA_IOCTL_RESET_FLT 11 +#define IPA_IOCTL_DUMP 12 +#define IPA_IOCTL_GET_RT_TBL 13 +#define IPA_IOCTL_PUT_RT_TBL 14 +#define IPA_IOCTL_COPY_HDR 15 +#define IPA_IOCTL_QUERY_INTF 16 +#define IPA_IOCTL_QUERY_INTF_TX_PROPS 17 +#define IPA_IOCTL_QUERY_INTF_RX_PROPS 18 +#define IPA_IOCTL_GET_HDR 19 +#define IPA_IOCTL_PUT_HDR 20 +#define IPA_IOCTL_SET_FLT 21 +#define IPA_IOCTL_ALLOC_NAT_MEM 22 +#define IPA_IOCTL_V4_INIT_NAT 23 +#define IPA_IOCTL_NAT_DMA 24 +#define IPA_IOCTL_V4_DEL_NAT 26 +#define IPA_IOCTL_PULL_MSG 27 +#define IPA_IOCTL_GET_NAT_OFFSET 28 +#define IPA_IOCTL_RM_ADD_DEPENDENCY 29 +#define IPA_IOCTL_RM_DEL_DEPENDENCY 30 +#define IPA_IOCTL_GENERATE_FLT_EQ 31 +#define IPA_IOCTL_QUERY_INTF_EXT_PROPS 32 +#define IPA_IOCTL_QUERY_EP_MAPPING 33 +#define IPA_IOCTL_QUERY_RT_TBL_INDEX 34 +#define IPA_IOCTL_WRITE_QMAPID 35 +#define IPA_IOCTL_MDFY_FLT_RULE 36 +#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD 37 +#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL 38 +#define IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED 39 +#define IPA_IOCTL_ADD_HDR_PROC_CTX 40 +#define IPA_IOCTL_DEL_HDR_PROC_CTX 41 +#define IPA_IOCTL_MDFY_RT_RULE 42 +#define IPA_IOCTL_ADD_RT_RULE_AFTER 43 +#define IPA_IOCTL_ADD_FLT_RULE_AFTER 44 +#define IPA_IOCTL_GET_HW_VERSION 45 +#define IPA_IOCTL_ADD_RT_RULE_EXT 46 +#define IPA_IOCTL_ADD_VLAN_IFACE 47 +#define IPA_IOCTL_DEL_VLAN_IFACE 48 +#define IPA_IOCTL_ADD_L2TP_VLAN_MAPPING 49 +#define IPA_IOCTL_DEL_L2TP_VLAN_MAPPING 50 +#define IPA_IOCTL_NAT_MODIFY_PDN 51 +#define IPA_IOCTL_MAX 52 + +/** + * max size of the header to be inserted + */ +#define IPA_HDR_MAX_SIZE 64 + +/** + * max size of the name of the resource (routing table, header) + */ +#define IPA_RESOURCE_NAME_MAX 32 + +/** + * max number of interface properties + */ +#define IPA_NUM_PROPS_MAX 35 + +/** + * size of the mac address + */ +#define IPA_MAC_ADDR_SIZE 6 + +/** + * max number of mbim streams + */ +#define IPA_MBIM_MAX_STREAM_NUM 8 + +/** + * size of the ipv6 address + */ +#define IPA_WAN_MSG_IPv6_ADDR_GW_LEN 4 + +/** + * the attributes of the rule (routing or filtering) + */ +#define IPA_FLT_TOS (1ul << 0) +#define IPA_FLT_PROTOCOL (1ul << 1) +#define IPA_FLT_SRC_ADDR (1ul << 2) +#define IPA_FLT_DST_ADDR (1ul << 3) +#define IPA_FLT_SRC_PORT_RANGE (1ul << 4) +#define IPA_FLT_DST_PORT_RANGE (1ul << 5) +#define IPA_FLT_TYPE (1ul << 6) +#define IPA_FLT_CODE (1ul << 7) +#define IPA_FLT_SPI (1ul << 8) +#define IPA_FLT_SRC_PORT (1ul << 9) +#define IPA_FLT_DST_PORT (1ul << 10) +#define IPA_FLT_TC (1ul << 11) +#define IPA_FLT_FLOW_LABEL (1ul << 12) +#define IPA_FLT_NEXT_HDR (1ul << 13) +#define IPA_FLT_META_DATA (1ul << 14) +#define IPA_FLT_FRAGMENT (1ul << 15) +#define IPA_FLT_TOS_MASKED (1ul << 16) +#define IPA_FLT_MAC_SRC_ADDR_ETHER_II (1ul << 17) +#define IPA_FLT_MAC_DST_ADDR_ETHER_II (1ul << 18) +#define IPA_FLT_MAC_SRC_ADDR_802_3 (1ul << 19) +#define IPA_FLT_MAC_DST_ADDR_802_3 (1ul << 20) +#define IPA_FLT_MAC_ETHER_TYPE (1ul << 21) +#define IPA_FLT_MAC_DST_ADDR_L2TP (1ul << 22) + +/** + * maximal number of NAT PDNs in the PDN config table + */ +#define IPA_MAX_PDN_NUM 5 + +/** + * enum ipa_client_type - names for the various IPA "clients" + * these are from the perspective of the clients, for e.g. + * HSIC1_PROD means HSIC client is the producer and IPA is the + * consumer. + * PROD clients are always even, and CONS clients are always odd. + * Add new clients in the end of the list and update IPA_CLIENT_MAX + */ +enum ipa_client_type { + IPA_CLIENT_HSIC1_PROD = 0, + IPA_CLIENT_HSIC1_CONS = 1, + + IPA_CLIENT_HSIC2_PROD = 2, + IPA_CLIENT_HSIC2_CONS = 3, + + IPA_CLIENT_HSIC3_PROD = 4, + IPA_CLIENT_HSIC3_CONS = 5, + + IPA_CLIENT_HSIC4_PROD = 6, + IPA_CLIENT_HSIC4_CONS = 7, + + IPA_CLIENT_HSIC5_PROD = 8, + IPA_CLIENT_HSIC5_CONS = 9, + + IPA_CLIENT_WLAN1_PROD = 10, + IPA_CLIENT_WLAN1_CONS = 11, + + IPA_CLIENT_A5_WLAN_AMPDU_PROD = 12, + IPA_CLIENT_WLAN2_CONS = 13, + + /* RESERVERD PROD = 14, */ + IPA_CLIENT_WLAN3_CONS = 15, + + /* RESERVERD PROD = 16, */ + IPA_CLIENT_WLAN4_CONS = 17, + + IPA_CLIENT_USB_PROD = 18, + IPA_CLIENT_USB_CONS = 19, + + IPA_CLIENT_USB2_PROD = 20, + IPA_CLIENT_USB2_CONS = 21, + + IPA_CLIENT_USB3_PROD = 22, + IPA_CLIENT_USB3_CONS = 23, + + IPA_CLIENT_USB4_PROD = 24, + IPA_CLIENT_USB4_CONS = 25, + + IPA_CLIENT_UC_USB_PROD = 26, + IPA_CLIENT_USB_DPL_CONS = 27, + + IPA_CLIENT_A2_EMBEDDED_PROD = 28, + IPA_CLIENT_A2_EMBEDDED_CONS = 29, + + IPA_CLIENT_A2_TETHERED_PROD = 30, + IPA_CLIENT_A2_TETHERED_CONS = 31, + + IPA_CLIENT_APPS_LAN_PROD = 32, + IPA_CLIENT_APPS_LAN_CONS = 33, + + IPA_CLIENT_APPS_WAN_PROD = 34, + IPA_CLIENT_APPS_LAN_WAN_PROD = IPA_CLIENT_APPS_WAN_PROD, + IPA_CLIENT_APPS_WAN_CONS = 35, + + IPA_CLIENT_APPS_CMD_PROD = 36, + IPA_CLIENT_A5_LAN_WAN_CONS = 37, + + IPA_CLIENT_ODU_PROD = 38, + IPA_CLIENT_ODU_EMB_CONS = 39, + + /* RESERVERD PROD = 40, */ + IPA_CLIENT_ODU_TETH_CONS = 41, + + IPA_CLIENT_MHI_PROD = 42, + IPA_CLIENT_MHI_CONS = 43, + + IPA_CLIENT_MEMCPY_DMA_SYNC_PROD = 44, + IPA_CLIENT_MEMCPY_DMA_SYNC_CONS = 45, + + IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD = 46, + IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS = 47, + + IPA_CLIENT_ETHERNET_PROD = 48, + IPA_CLIENT_ETHERNET_CONS = 49, + + IPA_CLIENT_Q6_LAN_PROD = 50, + IPA_CLIENT_Q6_LAN_CONS = 51, + + IPA_CLIENT_Q6_WAN_PROD = 52, + IPA_CLIENT_Q6_WAN_CONS = 53, + + IPA_CLIENT_Q6_CMD_PROD = 54, + IPA_CLIENT_Q6_DUN_CONS = 55, + + IPA_CLIENT_Q6_DECOMP_PROD = 56, + IPA_CLIENT_Q6_DECOMP_CONS = 57, + + IPA_CLIENT_Q6_DECOMP2_PROD = 58, + IPA_CLIENT_Q6_DECOMP2_CONS = 59, + + /* RESERVERD PROD = 60, */ + IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS = 61, + + IPA_CLIENT_TEST_PROD = 62, + IPA_CLIENT_TEST_CONS = 63, + + IPA_CLIENT_TEST1_PROD = 64, + IPA_CLIENT_TEST1_CONS = 65, + + IPA_CLIENT_TEST2_PROD = 66, + IPA_CLIENT_TEST2_CONS = 67, + + IPA_CLIENT_TEST3_PROD = 68, + IPA_CLIENT_TEST3_CONS = 69, + + IPA_CLIENT_TEST4_PROD = 70, + IPA_CLIENT_TEST4_CONS = 71, + + /* RESERVERD PROD = 72, */ + IPA_CLIENT_DUMMY_CONS = 73 +}; + +#define IPA_CLIENT_MAX (IPA_CLIENT_DUMMY_CONS + 1) + +#define IPA_CLIENT_IS_APPS_CONS(client) \ + ((client) == IPA_CLIENT_APPS_LAN_CONS || \ + (client) == IPA_CLIENT_APPS_WAN_CONS) + +#define IPA_CLIENT_IS_USB_CONS(client) \ + ((client) == IPA_CLIENT_USB_CONS || \ + (client) == IPA_CLIENT_USB2_CONS || \ + (client) == IPA_CLIENT_USB3_CONS || \ + (client) == IPA_CLIENT_USB_DPL_CONS || \ + (client) == IPA_CLIENT_USB4_CONS) + +#define IPA_CLIENT_IS_WLAN_CONS(client) \ + ((client) == IPA_CLIENT_WLAN1_CONS || \ + (client) == IPA_CLIENT_WLAN2_CONS || \ + (client) == IPA_CLIENT_WLAN3_CONS || \ + (client) == IPA_CLIENT_WLAN4_CONS) + +#define IPA_CLIENT_IS_ODU_CONS(client) \ + ((client) == IPA_CLIENT_ODU_EMB_CONS || \ + (client) == IPA_CLIENT_ODU_TETH_CONS) + +#define IPA_CLIENT_IS_Q6_CONS(client) \ + ((client) == IPA_CLIENT_Q6_LAN_CONS || \ + (client) == IPA_CLIENT_Q6_WAN_CONS || \ + (client) == IPA_CLIENT_Q6_DUN_CONS || \ + (client) == IPA_CLIENT_Q6_DECOMP_CONS || \ + (client) == IPA_CLIENT_Q6_DECOMP2_CONS || \ + (client) == IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS) + +#define IPA_CLIENT_IS_Q6_PROD(client) \ + ((client) == IPA_CLIENT_Q6_LAN_PROD || \ + (client) == IPA_CLIENT_Q6_WAN_PROD || \ + (client) == IPA_CLIENT_Q6_CMD_PROD || \ + (client) == IPA_CLIENT_Q6_DECOMP_PROD || \ + (client) == IPA_CLIENT_Q6_DECOMP2_PROD) + +#define IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client) \ + ((client) == IPA_CLIENT_Q6_LAN_CONS || \ + (client) == IPA_CLIENT_Q6_WAN_CONS || \ + (client) == IPA_CLIENT_Q6_DUN_CONS || \ + (client) == IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS) + +#define IPA_CLIENT_IS_Q6_ZIP_CONS(client) \ + ((client) == IPA_CLIENT_Q6_DECOMP_CONS || \ + (client) == IPA_CLIENT_Q6_DECOMP2_CONS) + +#define IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client) \ + ((client) == IPA_CLIENT_Q6_LAN_PROD || \ + (client) == IPA_CLIENT_Q6_WAN_PROD || \ + (client) == IPA_CLIENT_Q6_CMD_PROD) + +#define IPA_CLIENT_IS_Q6_ZIP_PROD(client) \ + ((client) == IPA_CLIENT_Q6_DECOMP_PROD || \ + (client) == IPA_CLIENT_Q6_DECOMP2_PROD) + +#define IPA_CLIENT_IS_MEMCPY_DMA_CONS(client) \ + ((client) == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS || \ + (client) == IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) + +#define IPA_CLIENT_IS_MEMCPY_DMA_PROD(client) \ + ((client) == IPA_CLIENT_MEMCPY_DMA_SYNC_PROD || \ + (client) == IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD) + +#define IPA_CLIENT_IS_MHI_CONS(client) \ + ((client) == IPA_CLIENT_MHI_CONS) + +#define IPA_CLIENT_IS_MHI(client) \ + ((client) == IPA_CLIENT_MHI_CONS || \ + (client) == IPA_CLIENT_MHI_PROD) + +#define IPA_CLIENT_IS_TEST_PROD(client) \ + ((client) == IPA_CLIENT_TEST_PROD || \ + (client) == IPA_CLIENT_TEST1_PROD || \ + (client) == IPA_CLIENT_TEST2_PROD || \ + (client) == IPA_CLIENT_TEST3_PROD || \ + (client) == IPA_CLIENT_TEST4_PROD) + +#define IPA_CLIENT_IS_TEST_CONS(client) \ + ((client) == IPA_CLIENT_TEST_CONS || \ + (client) == IPA_CLIENT_TEST1_CONS || \ + (client) == IPA_CLIENT_TEST2_CONS || \ + (client) == IPA_CLIENT_TEST3_CONS || \ + (client) == IPA_CLIENT_TEST4_CONS) + +#define IPA_CLIENT_IS_TEST(client) \ + (IPA_CLIENT_IS_TEST_PROD(client) || IPA_CLIENT_IS_TEST_CONS(client)) + +/** + * enum ipa_ip_type - Address family: IPv4 or IPv6 + */ +enum ipa_ip_type { + IPA_IP_v4, + IPA_IP_v6, + IPA_IP_MAX +}; + +/** + * enum ipa_rule_type - Type of routing or filtering rule + * Hashable: Rule will be located at the hashable tables + * Non_Hashable: Rule will be located at the non-hashable tables + */ +enum ipa_rule_type { + IPA_RULE_HASHABLE, + IPA_RULE_NON_HASHABLE, +}; +#define IPA_RULE_TYPE_MAX (IPA_RULE_NON_HASHABLE + 1) + +/** + * enum ipa_flt_action - action field of filtering rule + * + * Pass to routing: 5'd0 + * Pass to source NAT: 5'd1 + * Pass to destination NAT: 5'd2 + * Pass to default output pipe (e.g., Apps or Modem): 5'd3 + */ +enum ipa_flt_action { + IPA_PASS_TO_ROUTING, + IPA_PASS_TO_SRC_NAT, + IPA_PASS_TO_DST_NAT, + IPA_PASS_TO_EXCEPTION +}; + +/** + * enum ipa_wlan_event - Events for wlan client + * + * wlan client connect: New wlan client connected + * wlan client disconnect: wlan client disconnected + * wlan client power save: wlan client moved to power save + * wlan client normal: wlan client moved out of power save + * sw routing enable: ipa routing is disabled + * sw routing disable: ipa routing is enabled + * wlan ap connect: wlan AP(access point) is up + * wlan ap disconnect: wlan AP(access point) is down + * wlan sta connect: wlan STA(station) is up + * wlan sta disconnect: wlan STA(station) is down + * wlan client connect ex: new wlan client connected + * wlan scc switch: wlan interfaces in scc mode + * wlan mcc switch: wlan interfaces in mcc mode + * wlan wdi enable: wdi data path completed + * wlan wdi disable: wdi data path teardown + */ +enum ipa_wlan_event { + WLAN_CLIENT_CONNECT, + WLAN_CLIENT_DISCONNECT, + WLAN_CLIENT_POWER_SAVE_MODE, + WLAN_CLIENT_NORMAL_MODE, + SW_ROUTING_ENABLE, + SW_ROUTING_DISABLE, + WLAN_AP_CONNECT, + WLAN_AP_DISCONNECT, + WLAN_STA_CONNECT, + WLAN_STA_DISCONNECT, + WLAN_CLIENT_CONNECT_EX, + WLAN_SWITCH_TO_SCC, + WLAN_SWITCH_TO_MCC, + WLAN_WDI_ENABLE, + WLAN_WDI_DISABLE, + IPA_WLAN_EVENT_MAX +}; + +/** + * enum ipa_wan_event - Events for wan client + * + * wan default route add/del + * wan embms connect: New wan embms interface connected + */ +enum ipa_wan_event { + WAN_UPSTREAM_ROUTE_ADD = IPA_WLAN_EVENT_MAX, + WAN_UPSTREAM_ROUTE_DEL, + WAN_EMBMS_CONNECT, + WAN_XLAT_CONNECT, + IPA_WAN_EVENT_MAX +}; + +enum ipa_ecm_event { + ECM_CONNECT = IPA_WAN_EVENT_MAX, + ECM_DISCONNECT, + IPA_ECM_EVENT_MAX, +}; + +enum ipa_tethering_stats_event { + IPA_TETHERING_STATS_UPDATE_STATS = IPA_ECM_EVENT_MAX, + IPA_TETHERING_STATS_UPDATE_NETWORK_STATS, + IPA_TETHERING_STATS_EVENT_MAX, +}; + +enum ipa_quota_event { + IPA_QUOTA_REACH = IPA_TETHERING_STATS_EVENT_MAX, + IPA_QUOTA_EVENT_MAX, +}; + +enum ipa_ssr_event { + IPA_SSR_BEFORE_SHUTDOWN = IPA_QUOTA_EVENT_MAX, + IPA_SSR_AFTER_POWERUP, + IPA_SSR_EVENT_MAX +}; + +enum ipa_vlan_l2tp_event { + ADD_VLAN_IFACE = IPA_SSR_EVENT_MAX, + DEL_VLAN_IFACE, + ADD_L2TP_VLAN_MAPPING, + DEL_L2TP_VLAN_MAPPING, + IPA_VLAN_L2TP_EVENT_MAX, +}; + +#define IPA_EVENT_MAX_NUM (IPA_VLAN_L2TP_EVENT_MAX) +#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM) + +/** + * enum ipa_rm_resource_name - IPA RM clients identification names + * + * PROD resources are always even, and CONS resources are always odd. + * Add new clients in the end of the list and update IPA_RM_RESOURCE_MAX + */ +enum ipa_rm_resource_name { + IPA_RM_RESOURCE_Q6_PROD = 0, + IPA_RM_RESOURCE_Q6_CONS = 1, + + IPA_RM_RESOURCE_USB_PROD = 2, + IPA_RM_RESOURCE_USB_CONS = 3, + + IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD = 4, + IPA_RM_RESOURCE_USB_DPL_CONS = 5, + + IPA_RM_RESOURCE_HSIC_PROD = 6, + IPA_RM_RESOURCE_HSIC_CONS = 7, + + IPA_RM_RESOURCE_STD_ECM_PROD = 8, + IPA_RM_RESOURCE_APPS_CONS = 9, + + IPA_RM_RESOURCE_RNDIS_PROD = 10, + /* RESERVED CONS = 11, */ + + IPA_RM_RESOURCE_WWAN_0_PROD = 12, + /* RESERVED CONS = 13, */ + + IPA_RM_RESOURCE_WLAN_PROD = 14, + IPA_RM_RESOURCE_WLAN_CONS = 15, + + IPA_RM_RESOURCE_ODU_ADAPT_PROD = 16, + IPA_RM_RESOURCE_ODU_ADAPT_CONS = 17, + + IPA_RM_RESOURCE_MHI_PROD = 18, + IPA_RM_RESOURCE_MHI_CONS = 19, + + IPA_RM_RESOURCE_ETHERNET_PROD = 20, + IPA_RM_RESOURCE_ETHERNET_CONS = 21, +}; +#define IPA_RM_RESOURCE_MAX (IPA_RM_RESOURCE_ETHERNET_CONS + 1) + +/** + * enum ipa_hw_type - IPA hardware version type + * @IPA_HW_None: IPA hardware version not defined + * @IPA_HW_v1_0: IPA hardware version 1.0 + * @IPA_HW_v1_1: IPA hardware version 1.1 + * @IPA_HW_v2_0: IPA hardware version 2.0 + * @IPA_HW_v2_1: IPA hardware version 2.1 + * @IPA_HW_v2_5: IPA hardware version 2.5 + * @IPA_HW_v2_6: IPA hardware version 2.6 + * @IPA_HW_v2_6L: IPA hardware version 2.6L + * @IPA_HW_v3_0: IPA hardware version 3.0 + * @IPA_HW_v3_1: IPA hardware version 3.1 + * @IPA_HW_v3_5: IPA hardware version 3.5 + * @IPA_HW_v3_5_1: IPA hardware version 3.5.1 + * @IPA_HW_v4_0: IPA hardware version 4.0 + */ +enum ipa_hw_type { + IPA_HW_None = 0, + IPA_HW_v1_0 = 1, + IPA_HW_v1_1 = 2, + IPA_HW_v2_0 = 3, + IPA_HW_v2_1 = 4, + IPA_HW_v2_5 = 5, + IPA_HW_v2_6 = IPA_HW_v2_5, + IPA_HW_v2_6L = 6, + IPA_HW_v3_0 = 10, + IPA_HW_v3_1 = 11, + IPA_HW_v3_5 = 12, + IPA_HW_v3_5_1 = 13, + IPA_HW_v4_0 = 14, +}; +#define IPA_HW_MAX (IPA_HW_v4_0 + 1) + +#define IPA_HW_v4_0 IPA_HW_v4_0 + +/** + * struct ipa_rule_attrib - attributes of a routing/filtering + * rule, all in LE + * @attrib_mask: what attributes are valid + * @src_port_lo: low port of src port range + * @src_port_hi: high port of src port range + * @dst_port_lo: low port of dst port range + * @dst_port_hi: high port of dst port range + * @type: ICMP/IGMP type + * @code: ICMP/IGMP code + * @spi: IPSec SPI + * @src_port: exact src port + * @dst_port: exact dst port + * @meta_data: meta-data val + * @meta_data_mask: meta-data mask + * @u.v4.tos: type of service + * @u.v4.protocol: protocol + * @u.v4.src_addr: src address value + * @u.v4.src_addr_mask: src address mask + * @u.v4.dst_addr: dst address value + * @u.v4.dst_addr_mask: dst address mask + * @u.v6.tc: traffic class + * @u.v6.flow_label: flow label + * @u.v6.next_hdr: next header + * @u.v6.src_addr: src address val + * @u.v6.src_addr_mask: src address mask + * @u.v6.dst_addr: dst address val + * @u.v6.dst_addr_mask: dst address mask + */ +struct ipa_rule_attrib { + uint32_t attrib_mask; + uint16_t src_port_lo; + uint16_t src_port_hi; + uint16_t dst_port_lo; + uint16_t dst_port_hi; + uint8_t type; + uint8_t code; + uint8_t tos_value; + uint8_t tos_mask; + uint32_t spi; + uint16_t src_port; + uint16_t dst_port; + uint32_t meta_data; + uint32_t meta_data_mask; + uint8_t src_mac_addr[ETH_ALEN]; + uint8_t src_mac_addr_mask[ETH_ALEN]; + uint8_t dst_mac_addr[ETH_ALEN]; + uint8_t dst_mac_addr_mask[ETH_ALEN]; + uint16_t ether_type; + union { + struct { + uint8_t tos; + uint8_t protocol; + uint32_t src_addr; + uint32_t src_addr_mask; + uint32_t dst_addr; + uint32_t dst_addr_mask; + } v4; + struct { + uint8_t tc; + uint32_t flow_label; + uint8_t next_hdr; + uint32_t src_addr[4]; + uint32_t src_addr_mask[4]; + uint32_t dst_addr[4]; + uint32_t dst_addr_mask[4]; + } v6; + } u; +}; + +/*! @brief The maximum number of Mask Equal 32 Eqns */ +#define IPA_IPFLTR_NUM_MEQ_32_EQNS 2 + +/*! @brief The maximum number of IHL offset Mask Equal 32 Eqns */ +#define IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS 2 + +/*! @brief The maximum number of Mask Equal 128 Eqns */ +#define IPA_IPFLTR_NUM_MEQ_128_EQNS 2 + +/*! @brief The maximum number of IHL offset Range Check 16 Eqns */ +#define IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS 2 + +/*! @brief Offset and 16 bit comparison equation */ +struct ipa_ipfltr_eq_16 { + int8_t offset; + uint16_t value; +}; + +/*! @brief Offset and 32 bit comparison equation */ +struct ipa_ipfltr_eq_32 { + int8_t offset; + uint32_t value; +}; + +/*! @brief Offset and 128 bit masked comparison equation */ +struct ipa_ipfltr_mask_eq_128 { + int8_t offset; + uint8_t mask[16]; + uint8_t value[16]; +}; + +/*! @brief Offset and 32 bit masked comparison equation */ +struct ipa_ipfltr_mask_eq_32 { + int8_t offset; + uint32_t mask; + uint32_t value; +}; + +/*! @brief Equation for identifying a range. Ranges are inclusive */ +struct ipa_ipfltr_range_eq_16 { + int8_t offset; + uint16_t range_low; + uint16_t range_high; +}; + +/*! @brief Rule equations which are set according to DS filter installation */ +struct ipa_ipfltri_rule_eq { + /*! 16-bit Bitmask to indicate how many eqs are valid in this rule */ + uint16_t rule_eq_bitmap; + /*! Specifies if a type of service check rule is present */ + uint8_t tos_eq_present; + /*! The value to check against the type of service (ipv4) field */ + uint8_t tos_eq; + /*! Specifies if a protocol check rule is present */ + uint8_t protocol_eq_present; + /*! The value to check against the protocol (ipv6) field */ + uint8_t protocol_eq; + /*! The number of ip header length offset 16 bit range check + * rules in this rule + */ + uint8_t num_ihl_offset_range_16; + /*! An array of the registered ip header length offset 16 bit + * range check rules + */ + struct ipa_ipfltr_range_eq_16 + ihl_offset_range_16[IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS]; + /*! The number of mask equal 32 rules present in this rule */ + uint8_t num_offset_meq_32; + /*! An array of all the possible mask equal 32 rules in this rule */ + struct ipa_ipfltr_mask_eq_32 + offset_meq_32[IPA_IPFLTR_NUM_MEQ_32_EQNS]; + /*! Specifies if the traffic class rule is present in this rule */ + uint8_t tc_eq_present; + /*! The value to check the traffic class (ipv4) field against */ + uint8_t tc_eq; + /*! Specifies if the flow equals rule is present in this rule */ + uint8_t fl_eq_present; + /*! The value to check the flow (ipv6) field against */ + uint32_t fl_eq; + /*! The number of ip header length offset 16 bit equations in this + * rule + */ + uint8_t ihl_offset_eq_16_present; + /*! The ip header length offset 16 bit equation */ + struct ipa_ipfltr_eq_16 ihl_offset_eq_16; + /*! The number of ip header length offset 32 bit equations in this + * rule + */ + uint8_t ihl_offset_eq_32_present; + /*! The ip header length offset 32 bit equation */ + struct ipa_ipfltr_eq_32 ihl_offset_eq_32; + /*! The number of ip header length offset 32 bit mask equations in + * this rule + */ + uint8_t num_ihl_offset_meq_32; + /*! The ip header length offset 32 bit mask equation */ + struct ipa_ipfltr_mask_eq_32 + ihl_offset_meq_32[IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS]; + /*! The number of ip header length offset 128 bit equations in this + * rule + */ + uint8_t num_offset_meq_128; + /*! The ip header length offset 128 bit equation */ + struct ipa_ipfltr_mask_eq_128 + offset_meq_128[IPA_IPFLTR_NUM_MEQ_128_EQNS]; + /*! The metadata 32 bit masked comparison equation present or not */ + /* Metadata based rules are added internally by IPA driver */ + uint8_t metadata_meq32_present; + /*! The metadata 32 bit masked comparison equation */ + struct ipa_ipfltr_mask_eq_32 metadata_meq32; + /*! Specifies if the Fragment equation is present in this rule */ + uint8_t ipv4_frag_eq_present; +}; + +/** + * struct ipa_flt_rule - attributes of a filtering rule + * @retain_hdr: bool switch to instruct IPA core to add back to the packet + * the header removed as part of header removal + * @to_uc: bool switch to pass packet to micro-controller + * @action: action field + * @rt_tbl_hdl: handle of table from "get" + * @attrib: attributes of the rule + * @eq_attrib: attributes of the rule in equation form (valid when + * eq_attrib_type is true) + * @rt_tbl_idx: index of RT table referred to by filter rule (valid when + * eq_attrib_type is true and non-exception action) + * @eq_attrib_type: true if equation level form used to specify attributes + * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit, + * IPA will use the rule and will not look for other rules that may have + * higher priority + * @hashable: bool switch. is this rule hashable or not? + * ipa uses hashable rules to cache their hit results to be used in + * consecutive packets + * @rule_id: rule_id to be assigned to the filter rule. In case client specifies + * rule_id as 0 the driver will assign a new rule_id + * @set_metadata: bool switch. should metadata replacement at the NAT block + * take place? + * @pdn_idx: if action is "pass to source\destination NAT" then a comparison + * against the PDN index in the matching PDN entry will take place as an + * additional condition for NAT hit. + */ +struct ipa_flt_rule { + uint8_t retain_hdr; + uint8_t to_uc; + enum ipa_flt_action action; + uint32_t rt_tbl_hdl; + struct ipa_rule_attrib attrib; + struct ipa_ipfltri_rule_eq eq_attrib; + uint32_t rt_tbl_idx; + uint8_t eq_attrib_type; + uint8_t max_prio; + uint8_t hashable; + uint16_t rule_id; + uint8_t set_metadata; + uint8_t pdn_idx; +}; + +/** + * enum ipa_hdr_l2_type - L2 header type + * IPA_HDR_L2_NONE: L2 header which isn't Ethernet II and isn't 802_3 + * IPA_HDR_L2_ETHERNET_II: L2 header of type Ethernet II + * IPA_HDR_L2_802_3: L2 header of type 802_3 + */ +enum ipa_hdr_l2_type { + IPA_HDR_L2_NONE, + IPA_HDR_L2_ETHERNET_II, + IPA_HDR_L2_802_3, +}; +#define IPA_HDR_L2_MAX (IPA_HDR_L2_802_3 + 1) + +/** + * enum ipa_hdr_l2_type - Processing context type + * IPA_HDR_PROC_NONE: No processing context + * IPA_HDR_PROC_ETHII_TO_ETHII: Process Ethernet II to Ethernet II + * IPA_HDR_PROC_ETHII_TO_802_3: Process Ethernet II to 802_3 + * IPA_HDR_PROC_802_3_TO_ETHII: Process 802_3 to Ethernet II + * IPA_HDR_PROC_802_3_TO_802_3: Process 802_3 to 802_3 + */ +enum ipa_hdr_proc_type { + IPA_HDR_PROC_NONE, + IPA_HDR_PROC_ETHII_TO_ETHII, + IPA_HDR_PROC_ETHII_TO_802_3, + IPA_HDR_PROC_802_3_TO_ETHII, + IPA_HDR_PROC_802_3_TO_802_3, + IPA_HDR_PROC_L2TP_HEADER_ADD, + IPA_HDR_PROC_L2TP_HEADER_REMOVE +}; +#define IPA_HDR_PROC_MAX (IPA_HDR_PROC_L2TP_HEADER_REMOVE + 1) + +/** + * struct ipa_rt_rule - attributes of a routing rule + * @dst: dst "client" + * @hdr_hdl: handle to the dynamic header + it is not an index or an offset + * @hdr_proc_ctx_hdl: handle to header processing context. if it is provided + hdr_hdl shall be 0 + * @attrib: attributes of the rule + * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit, + * IPA will use the rule and will not look for other rules that may have + * higher priority + * @hashable: bool switch. is this rule hashable or not? + * ipa uses hashable rules to cache their hit results to be used in + * consecutive packets + * @retain_hdr: bool switch to instruct IPA core to add back to the packet + * the header removed as part of header removal + */ +struct ipa_rt_rule { + enum ipa_client_type dst; + uint32_t hdr_hdl; + uint32_t hdr_proc_ctx_hdl; + struct ipa_rule_attrib attrib; + uint8_t max_prio; + uint8_t hashable; + uint8_t retain_hdr; +}; + +/** + * struct ipa_hdr_add - header descriptor includes in and out + * parameters + * @name: name of the header + * @hdr: actual header to be inserted + * @hdr_len: size of above header + * @type: l2 header type + * @is_partial: header not fully specified + * @hdr_hdl: out parameter, handle to header, valid when status is 0 + * @status: out parameter, status of header add operation, + * 0 for success, + * -1 for failure + * @is_eth2_ofst_valid: is eth2_ofst field valid? + * @eth2_ofst: offset to start of Ethernet-II/802.3 header + */ +struct ipa_hdr_add { + char name[IPA_RESOURCE_NAME_MAX]; + uint8_t hdr[IPA_HDR_MAX_SIZE]; + uint8_t hdr_len; + enum ipa_hdr_l2_type type; + uint8_t is_partial; + uint32_t hdr_hdl; + int status; + uint8_t is_eth2_ofst_valid; + uint16_t eth2_ofst; +}; + +/** + * struct ipa_ioc_add_hdr - header addition parameters (support + * multiple headers and commit) + * @commit: should headers be written to IPA HW also? + * @num_hdrs: num of headers that follow + * @ipa_hdr_add hdr: all headers need to go here back to + * back, no pointers + */ +struct ipa_ioc_add_hdr { + uint8_t commit; + uint8_t num_hdrs; + struct ipa_hdr_add hdr[0]; +}; + +/** + * struct ipa_l2tp_header_add_procparams - + * @eth_hdr_retained: Specifies if Ethernet header is retained or not + * @input_ip_version: Specifies if Input header is IPV4(0) or IPV6(1) + * @output_ip_version: Specifies if template header is IPV4(0) or IPV6(1) + */ +struct ipa_l2tp_header_add_procparams { + uint32_t eth_hdr_retained:1; + uint32_t input_ip_version:1; + uint32_t output_ip_version:1; + uint32_t reserved:29; +}; + +/** + * struct ipa_l2tp_header_remove_procparams - + * @hdr_len_remove: Specifies how much of the header needs to + be removed in bytes + * @eth_hdr_retained: Specifies if Ethernet header is retained or not + */ +struct ipa_l2tp_header_remove_procparams { + uint32_t hdr_len_remove:8; + uint32_t eth_hdr_retained:1; + uint32_t reserved:23; +}; + +/** + * union ipa_l2tp_hdr_proc_ctx_params - + * @hdr_add_param: parameters for header add + * @hdr_remove_param: parameters for header remove + */ +union ipa_l2tp_hdr_proc_ctx_params { + struct ipa_l2tp_header_add_procparams hdr_add_param; + struct ipa_l2tp_header_remove_procparams hdr_remove_param; +}; +/** + * struct ipa_hdr_proc_ctx_add - processing context descriptor includes + * in and out parameters + * @type: processing context type + * @hdr_hdl: in parameter, handle to header + * @l2tp_params: l2tp parameters + * @proc_ctx_hdl: out parameter, handle to proc_ctx, valid when status is 0 + * @status: out parameter, status of header add operation, + * 0 for success, + * -1 for failure + */ +struct ipa_hdr_proc_ctx_add { + enum ipa_hdr_proc_type type; + uint32_t hdr_hdl; + uint32_t proc_ctx_hdl; + int status; + union ipa_l2tp_hdr_proc_ctx_params l2tp_params; +}; + +#define IPA_L2TP_HDR_PROC_SUPPORT + +/** + * struct ipa_ioc_add_hdr - processing context addition parameters (support + * multiple processing context and commit) + * @commit: should processing context be written to IPA HW also? + * @num_proc_ctxs: num of processing context that follow + * @proc_ctx: all processing context need to go here back to + * back, no pointers + */ +struct ipa_ioc_add_hdr_proc_ctx { + uint8_t commit; + uint8_t num_proc_ctxs; + struct ipa_hdr_proc_ctx_add proc_ctx[0]; +}; + +/** + * struct ipa_ioc_copy_hdr - retrieve a copy of the specified + * header - caller can then derive the complete header + * @name: name of the header resource + * @hdr: out parameter, contents of specified header, + * valid only when ioctl return val is non-negative + * @hdr_len: out parameter, size of above header + * valid only when ioctl return val is non-negative + * @type: l2 header type + * valid only when ioctl return val is non-negative + * @is_partial: out parameter, indicates whether specified header is partial + * valid only when ioctl return val is non-negative + * @is_eth2_ofst_valid: is eth2_ofst field valid? + * @eth2_ofst: offset to start of Ethernet-II/802.3 header + */ +struct ipa_ioc_copy_hdr { + char name[IPA_RESOURCE_NAME_MAX]; + uint8_t hdr[IPA_HDR_MAX_SIZE]; + uint8_t hdr_len; + enum ipa_hdr_l2_type type; + uint8_t is_partial; + uint8_t is_eth2_ofst_valid; + uint16_t eth2_ofst; +}; + +/** + * struct ipa_ioc_get_hdr - header entry lookup parameters, if lookup was + * successful caller must call put to release the reference count when done + * @name: name of the header resource + * @hdl: out parameter, handle of header entry + * valid only when ioctl return val is non-negative + */ +struct ipa_ioc_get_hdr { + char name[IPA_RESOURCE_NAME_MAX]; + uint32_t hdl; +}; + +/** + * struct ipa_hdr_del - header descriptor includes in and out + * parameters + * + * @hdl: handle returned from header add operation + * @status: out parameter, status of header remove operation, + * 0 for success, + * -1 for failure + */ +struct ipa_hdr_del { + uint32_t hdl; + int status; +}; + +/** + * struct ipa_ioc_del_hdr - header deletion parameters (support + * multiple headers and commit) + * @commit: should headers be removed from IPA HW also? + * @num_hdls: num of headers being removed + * @ipa_hdr_del hdl: all handles need to go here back to back, no pointers + */ +struct ipa_ioc_del_hdr { + uint8_t commit; + uint8_t num_hdls; + struct ipa_hdr_del hdl[0]; +}; + +/** + * struct ipa_hdr_proc_ctx_del - processing context descriptor includes + * in and out parameters + * @hdl: handle returned from processing context add operation + * @status: out parameter, status of header remove operation, + * 0 for success, + * -1 for failure + */ +struct ipa_hdr_proc_ctx_del { + uint32_t hdl; + int status; +}; + +/** + * ipa_ioc_del_hdr_proc_ctx - processing context deletion parameters (support + * multiple headers and commit) + * @commit: should processing contexts be removed from IPA HW also? + * @num_hdls: num of processing contexts being removed + * @ipa_hdr_proc_ctx_del hdl: all handles need to go here back to back, + * no pointers + */ +struct ipa_ioc_del_hdr_proc_ctx { + uint8_t commit; + uint8_t num_hdls; + struct ipa_hdr_proc_ctx_del hdl[0]; +}; + +/** + * struct ipa_rt_rule_add - routing rule descriptor includes in + * and out parameters + * @rule: actual rule to be added + * @at_rear: add at back of routing table, it is NOT possible to add rules at + * the rear of the "default" routing tables + * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0 + * @status: output parameter, status of routing rule add operation, + * 0 for success, + * -1 for failure + */ +struct ipa_rt_rule_add { + struct ipa_rt_rule rule; + uint8_t at_rear; + uint32_t rt_rule_hdl; + int status; +}; + +/** + * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports + * multiple rules and commit); + * + * all rules MUST be added to same table + * @commit: should rules be written to IPA HW also? + * @ip: IP family of rule + * @rt_tbl_name: name of routing table resource + * @num_rules: number of routing rules that follow + * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers + */ +struct ipa_ioc_add_rt_rule { + uint8_t commit; + enum ipa_ip_type ip; + char rt_tbl_name[IPA_RESOURCE_NAME_MAX]; + uint8_t num_rules; + struct ipa_rt_rule_add rules[0]; +}; + +/** + * struct ipa_ioc_add_rt_rule_after - routing rule addition after a specific + * rule parameters(supports multiple rules and commit); + * + * all rules MUST be added to same table + * @commit: should rules be written to IPA HW also? + * @ip: IP family of rule + * @rt_tbl_name: name of routing table resource + * @num_rules: number of routing rules that follow + * @add_after_hdl: the rules will be added after this specific rule + * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers + * at_rear field will be ignored when using this IOCTL + */ +struct ipa_ioc_add_rt_rule_after { + uint8_t commit; + enum ipa_ip_type ip; + char rt_tbl_name[IPA_RESOURCE_NAME_MAX]; + uint8_t num_rules; + uint32_t add_after_hdl; + struct ipa_rt_rule_add rules[0]; +}; + +/** + * struct ipa_rt_rule_mdfy - routing rule descriptor includes + * in and out parameters + * @rule: actual rule to be added + * @rt_rule_hdl: handle to rule which supposed to modify + * @status: output parameter, status of routing rule modify operation, + * 0 for success, + * -1 for failure + * + */ +struct ipa_rt_rule_mdfy { + struct ipa_rt_rule rule; + uint32_t rt_rule_hdl; + int status; +}; + +/** + * struct ipa_ioc_mdfy_rt_rule - routing rule modify parameters (supports + * multiple rules and commit) + * @commit: should rules be written to IPA HW also? + * @ip: IP family of rule + * @num_rules: number of routing rules that follow + * @rules: all rules need to go back to back here, no pointers + */ +struct ipa_ioc_mdfy_rt_rule { + uint8_t commit; + enum ipa_ip_type ip; + uint8_t num_rules; + struct ipa_rt_rule_mdfy rules[0]; +}; + +/** + * struct ipa_rt_rule_del - routing rule descriptor includes in + * and out parameters + * @hdl: handle returned from route rule add operation + * @status: output parameter, status of route rule delete operation, + * 0 for success, + * -1 for failure + */ +struct ipa_rt_rule_del { + uint32_t hdl; + int status; +}; + +/** + * struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports + * multiple headers and commit) + * @commit: should rules be removed from IPA HW also? + * @ip: IP family of rules + * @num_hdls: num of rules being removed + * @ipa_rt_rule_del hdl: all handles need to go back to back here, no pointers + */ +struct ipa_ioc_del_rt_rule { + uint8_t commit; + enum ipa_ip_type ip; + uint8_t num_hdls; + struct ipa_rt_rule_del hdl[0]; +}; + +/** + * struct ipa_ioc_get_rt_tbl_indx - routing table index lookup parameters + * @ip: IP family of table + * @name: name of routing table resource + * @index: output parameter, routing table index, valid only when ioctl + * return val is non-negative + */ +struct ipa_ioc_get_rt_tbl_indx { + enum ipa_ip_type ip; + char name[IPA_RESOURCE_NAME_MAX]; + uint32_t idx; +}; + +/** + * struct ipa_flt_rule_add - filtering rule descriptor includes + * in and out parameters + * @rule: actual rule to be added + * @at_rear: add at back of filtering table? + * @flt_rule_hdl: out parameter, handle to rule, valid when status is 0 + * @status: output parameter, status of filtering rule add operation, + * 0 for success, + * -1 for failure + * + */ +struct ipa_flt_rule_add { + struct ipa_flt_rule rule; + uint8_t at_rear; + uint32_t flt_rule_hdl; + int status; +}; + +/** + * struct ipa_ioc_add_flt_rule - filtering rule addition parameters (supports + * multiple rules and commit) + * all rules MUST be added to same table + * @commit: should rules be written to IPA HW also? + * @ip: IP family of rule + * @ep: which "clients" pipe does this rule apply to? + * valid only when global is 0 + * @global: does this apply to global filter table of specific IP family + * @num_rules: number of filtering rules that follow + * @rules: all rules need to go back to back here, no pointers + */ +struct ipa_ioc_add_flt_rule { + uint8_t commit; + enum ipa_ip_type ip; + enum ipa_client_type ep; + uint8_t global; + uint8_t num_rules; + struct ipa_flt_rule_add rules[0]; +}; + +/** + * struct ipa_ioc_add_flt_rule_after - filtering rule addition after specific + * rule parameters (supports multiple rules and commit) + * all rules MUST be added to same table + * @commit: should rules be written to IPA HW also? + * @ip: IP family of rule + * @ep: which "clients" pipe does this rule apply to? + * @num_rules: number of filtering rules that follow + * @add_after_hdl: rules will be added after the rule with this handle + * @rules: all rules need to go back to back here, no pointers. at rear field + * is ignored when using this IOCTL + */ +struct ipa_ioc_add_flt_rule_after { + uint8_t commit; + enum ipa_ip_type ip; + enum ipa_client_type ep; + uint8_t num_rules; + uint32_t add_after_hdl; + struct ipa_flt_rule_add rules[0]; +}; + +/** + * struct ipa_flt_rule_mdfy - filtering rule descriptor includes + * in and out parameters + * @rule: actual rule to be added + * @flt_rule_hdl: handle to rule + * @status: output parameter, status of filtering rule modify operation, + * 0 for success, + * -1 for failure + * + */ +struct ipa_flt_rule_mdfy { + struct ipa_flt_rule rule; + uint32_t rule_hdl; + int status; +}; + +/** + * struct ipa_ioc_mdfy_flt_rule - filtering rule modify parameters (supports + * multiple rules and commit) + * @commit: should rules be written to IPA HW also? + * @ip: IP family of rule + * @num_rules: number of filtering rules that follow + * @rules: all rules need to go back to back here, no pointers + */ +struct ipa_ioc_mdfy_flt_rule { + uint8_t commit; + enum ipa_ip_type ip; + uint8_t num_rules; + struct ipa_flt_rule_mdfy rules[0]; +}; + +/** + * struct ipa_flt_rule_del - filtering rule descriptor includes + * in and out parameters + * + * @hdl: handle returned from filtering rule add operation + * @status: output parameter, status of filtering rule delete operation, + * 0 for success, + * -1 for failure + */ +struct ipa_flt_rule_del { + uint32_t hdl; + int status; +}; + +/** + * struct ipa_ioc_del_flt_rule - filtering rule deletion parameters (supports + * multiple headers and commit) + * @commit: should rules be removed from IPA HW also? + * @ip: IP family of rules + * @num_hdls: num of rules being removed + * @hdl: all handles need to go back to back here, no pointers + */ +struct ipa_ioc_del_flt_rule { + uint8_t commit; + enum ipa_ip_type ip; + uint8_t num_hdls; + struct ipa_flt_rule_del hdl[0]; +}; + +/** + * struct ipa_ioc_get_rt_tbl - routing table lookup parameters, if lookup was + * successful caller must call put to release the reference + * count when done + * @ip: IP family of table + * @name: name of routing table resource + * @htl: output parameter, handle of routing table, valid only when ioctl + * return val is non-negative + */ +struct ipa_ioc_get_rt_tbl { + enum ipa_ip_type ip; + char name[IPA_RESOURCE_NAME_MAX]; + uint32_t hdl; +}; + +/** + * struct ipa_ioc_query_intf - used to lookup number of tx and + * rx properties of interface + * @name: name of interface + * @num_tx_props: output parameter, number of tx properties + * valid only when ioctl return val is non-negative + * @num_rx_props: output parameter, number of rx properties + * valid only when ioctl return val is non-negative + * @num_ext_props: output parameter, number of ext properties + * valid only when ioctl return val is non-negative + * @excp_pipe: exception packets of this interface should be + * routed to this pipe + */ +struct ipa_ioc_query_intf { + char name[IPA_RESOURCE_NAME_MAX]; + uint32_t num_tx_props; + uint32_t num_rx_props; + uint32_t num_ext_props; + enum ipa_client_type excp_pipe; +}; + +/** + * struct ipa_ioc_tx_intf_prop - interface tx property + * @ip: IP family of routing rule + * @attrib: routing rule + * @dst_pipe: routing output pipe + * @alt_dst_pipe: alternate routing output pipe + * @hdr_name: name of associated header if any, empty string when no header + * @hdr_l2_type: type of associated header if any, use NONE when no header + */ +struct ipa_ioc_tx_intf_prop { + enum ipa_ip_type ip; + struct ipa_rule_attrib attrib; + enum ipa_client_type dst_pipe; + enum ipa_client_type alt_dst_pipe; + char hdr_name[IPA_RESOURCE_NAME_MAX]; + enum ipa_hdr_l2_type hdr_l2_type; +}; + +/** + * struct ipa_ioc_query_intf_tx_props - interface tx propertie + * @name: name of interface + * @num_tx_props: number of TX properties + * @tx[0]: output parameter, the tx properties go here back to back + */ +struct ipa_ioc_query_intf_tx_props { + char name[IPA_RESOURCE_NAME_MAX]; + uint32_t num_tx_props; + struct ipa_ioc_tx_intf_prop tx[0]; +}; + +/** + * struct ipa_ioc_ext_intf_prop - interface extended property + * @ip: IP family of routing rule + * @eq_attrib: attributes of the rule in equation form + * @action: action field + * @rt_tbl_idx: index of RT table referred to by filter rule + * @mux_id: MUX_ID + * @filter_hdl: handle of filter (as specified by provider of filter rule) + * @is_xlat_rule: it is xlat flt rule or not + */ +struct ipa_ioc_ext_intf_prop { + enum ipa_ip_type ip; + struct ipa_ipfltri_rule_eq eq_attrib; + enum ipa_flt_action action; + uint32_t rt_tbl_idx; + uint8_t mux_id; + uint32_t filter_hdl; + uint8_t is_xlat_rule; + uint32_t rule_id; + uint8_t is_rule_hashable; +}; + +/** + * struct ipa_ioc_query_intf_ext_props - interface ext propertie + * @name: name of interface + * @num_ext_props: number of EXT properties + * @ext[0]: output parameter, the ext properties go here back to back + */ +struct ipa_ioc_query_intf_ext_props { + char name[IPA_RESOURCE_NAME_MAX]; + uint32_t num_ext_props; + struct ipa_ioc_ext_intf_prop ext[0]; +}; + +/** + * struct ipa_ioc_rx_intf_prop - interface rx property + * @ip: IP family of filtering rule + * @attrib: filtering rule + * @src_pipe: input pipe + * @hdr_l2_type: type of associated header if any, use NONE when no header + */ +struct ipa_ioc_rx_intf_prop { + enum ipa_ip_type ip; + struct ipa_rule_attrib attrib; + enum ipa_client_type src_pipe; + enum ipa_hdr_l2_type hdr_l2_type; +}; + +/** + * struct ipa_ioc_query_intf_rx_props - interface rx propertie + * @name: name of interface + * @num_rx_props: number of RX properties + * @rx: output parameter, the rx properties go here back to back + */ +struct ipa_ioc_query_intf_rx_props { + char name[IPA_RESOURCE_NAME_MAX]; + uint32_t num_rx_props; + struct ipa_ioc_rx_intf_prop rx[0]; +}; + +/** + * struct ipa_ioc_nat_alloc_mem - nat table memory allocation + * properties + * @dev_name: input parameter, the name of table + * @size: input parameter, size of table in bytes + * @offset: output parameter, offset into page in case of system memory + */ +struct ipa_ioc_nat_alloc_mem { + char dev_name[IPA_RESOURCE_NAME_MAX]; + size_t size; + off_t offset; +}; + +/** + * struct ipa_ioc_v4_nat_init - nat table initialization + * parameters + * @tbl_index: input parameter, index of the table + * @ipv4_rules_offset: input parameter, ipv4 rules address offset + * @expn_rules_offset: input parameter, ipv4 expansion rules address offset + * @index_offset: input parameter, index rules offset + * @index_expn_offset: input parameter, index expansion rules offset + * @table_entries: input parameter, ipv4 rules table size in entries + * @expn_table_entries: input parameter, ipv4 expansion rules table size + * @ip_addr: input parameter, public ip address + */ +struct ipa_ioc_v4_nat_init { + uint8_t tbl_index; + uint32_t ipv4_rules_offset; + uint32_t expn_rules_offset; + + uint32_t index_offset; + uint32_t index_expn_offset; + + uint16_t table_entries; + uint16_t expn_table_entries; + uint32_t ip_addr; +}; + +/** + * struct ipa_ioc_v4_nat_del - nat table delete parameter + * @table_index: input parameter, index of the table + * @public_ip_addr: input parameter, public ip address + */ +struct ipa_ioc_v4_nat_del { + uint8_t table_index; + uint32_t public_ip_addr; +}; + +/** + * struct ipa_ioc_nat_dma_one - nat dma command parameter + * @table_index: input parameter, index of the table + * @base_addr: type of table, from which the base address of the table + * can be inferred + * @offset: destination offset within the NAT table + * @data: data to be written. + */ +struct ipa_ioc_nat_dma_one { + uint8_t table_index; + uint8_t base_addr; + + uint32_t offset; + uint16_t data; + +}; + +/** + * struct ipa_ioc_nat_dma_cmd - To hold multiple nat dma commands + * @entries: number of dma commands in use + * @dma: data pointer to the dma commands + */ +struct ipa_ioc_nat_dma_cmd { + uint8_t entries; + struct ipa_ioc_nat_dma_one dma[0]; + +}; + +/** + * struct ipa_ioc_nat_pdn_entry - PDN entry modification data + * @pdn_index: index of the entry in the PDN config table to be changed + * @public_ip: PDN's public ip + * @src_metadata: PDN's source NAT metadata for metadata replacement + * @dst_metadata: PDN's destination NAT metadata for metadata replacement + */ +struct ipa_ioc_nat_pdn_entry { + uint8_t pdn_index; + uint32_t public_ip; + uint32_t src_metadata; + uint32_t dst_metadata; +}; + +/** + * struct ipa_ioc_vlan_iface_info - add vlan interface + * @name: interface name + * @vlan_id: VLAN ID + */ +struct ipa_ioc_vlan_iface_info { + char name[IPA_RESOURCE_NAME_MAX]; + uint8_t vlan_id; +}; + +/** + * struct ipa_ioc_l2tp_vlan_mapping_info - l2tp->vlan mapping info + * @iptype: l2tp tunnel IP type + * @l2tp_iface_name: l2tp interface name + * @l2tp_session_id: l2tp session id + * @vlan_iface_name: vlan interface name + */ +struct ipa_ioc_l2tp_vlan_mapping_info { + enum ipa_ip_type iptype; + char l2tp_iface_name[IPA_RESOURCE_NAME_MAX]; + uint8_t l2tp_session_id; + char vlan_iface_name[IPA_RESOURCE_NAME_MAX]; +}; + +/** + * struct ipa_msg_meta - Format of the message meta-data. + * @msg_type: the type of the message + * @rsvd: reserved bits for future use. + * @msg_len: the length of the message in bytes + * + * For push model: + * Client in user-space should issue a read on the device (/dev/ipa) with a + * sufficiently large buffer in a continuous loop, call will block when there is + * no message to read. Upon return, client can read the ipa_msg_meta from start + * of buffer to find out type and length of message + * size of buffer supplied >= (size of largest message + size of metadata) + * + * For pull model: + * Client in user-space can also issue a pull msg IOCTL to device (/dev/ipa) + * with a payload containing space for the ipa_msg_meta and the message specific + * payload length. + * size of buffer supplied == (len of specific message + size of metadata) + */ +struct ipa_msg_meta { + uint8_t msg_type; + uint8_t rsvd; + uint16_t msg_len; +}; + +/** + * struct ipa_wlan_msg - To hold information about wlan client + * @name: name of the wlan interface + * @mac_addr: mac address of wlan client + * + * wlan drivers need to pass name of wlan iface and mac address of + * wlan client along with ipa_wlan_event, whenever a wlan client is + * connected/disconnected/moved to power save/come out of power save + */ +struct ipa_wlan_msg { + char name[IPA_RESOURCE_NAME_MAX]; + uint8_t mac_addr[IPA_MAC_ADDR_SIZE]; +}; + +/** + * enum ipa_wlan_hdr_attrib_type - attribute type + * in wlan client header + * + * WLAN_HDR_ATTRIB_MAC_ADDR: attrib type mac address + * WLAN_HDR_ATTRIB_STA_ID: attrib type station id + */ +enum ipa_wlan_hdr_attrib_type { + WLAN_HDR_ATTRIB_MAC_ADDR, + WLAN_HDR_ATTRIB_STA_ID +}; + +/** + * struct ipa_wlan_hdr_attrib_val - header attribute value + * @attrib_type: type of attribute + * @offset: offset of attribute within header + * @u.mac_addr: mac address + * @u.sta_id: station id + */ +struct ipa_wlan_hdr_attrib_val { + enum ipa_wlan_hdr_attrib_type attrib_type; + uint8_t offset; + union { + uint8_t mac_addr[IPA_MAC_ADDR_SIZE]; + uint8_t sta_id; + } u; +}; + +/** + * struct ipa_wlan_msg_ex - To hold information about wlan client + * @name: name of the wlan interface + * @num_of_attribs: number of attributes + * @attrib_val: holds attribute values + * + * wlan drivers need to pass name of wlan iface and mac address + * of wlan client or station id along with ipa_wlan_event, + * whenever a wlan client is connected/disconnected/moved to + * power save/come out of power save + */ +struct ipa_wlan_msg_ex { + char name[IPA_RESOURCE_NAME_MAX]; + uint8_t num_of_attribs; + struct ipa_wlan_hdr_attrib_val attribs[0]; +}; + +struct ipa_ecm_msg { + char name[IPA_RESOURCE_NAME_MAX]; + int ifindex; +}; + +/** + * struct ipa_wan_msg - To hold information about wan client + * @name: name of the wan interface + * + * CnE need to pass the name of default wan iface when connected/disconnected. + * CNE need to pass the gw info in wlan AP+STA mode. + * netmgr need to pass the name of wan eMBMS iface when connected. + */ +struct ipa_wan_msg { + char upstream_ifname[IPA_RESOURCE_NAME_MAX]; + char tethered_ifname[IPA_RESOURCE_NAME_MAX]; + enum ipa_ip_type ip; + uint32_t ipv4_addr_gw; + uint32_t ipv6_addr_gw[IPA_WAN_MSG_IPv6_ADDR_GW_LEN]; +}; + +/** + * struct ipa_ioc_rm_dependency - parameters for add/delete dependency + * @resource_name: name of dependent resource + * @depends_on_name: name of its dependency + */ +struct ipa_ioc_rm_dependency { + enum ipa_rm_resource_name resource_name; + enum ipa_rm_resource_name depends_on_name; +}; + +struct ipa_ioc_generate_flt_eq { + enum ipa_ip_type ip; + struct ipa_rule_attrib attrib; + struct ipa_ipfltri_rule_eq eq_attrib; +}; + +/** + * struct ipa_ioc_write_qmapid - to write mux id to endpoint meta register + * @mux_id: mux id of wan + */ +struct ipa_ioc_write_qmapid { + enum ipa_client_type client; + uint8_t qmap_id; +}; + +enum ipacm_client_enum { + IPACM_CLIENT_USB = 1, + IPACM_CLIENT_WLAN, + IPACM_CLIENT_MAX +}; +/** + * actual IOCTLs supported by IPA driver + */ +#define IPA_IOC_ADD_HDR _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_HDR, \ + struct ipa_ioc_add_hdr *) +#define IPA_IOC_DEL_HDR _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_HDR, \ + struct ipa_ioc_del_hdr *) +#define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_RT_RULE, \ + struct ipa_ioc_add_rt_rule *) +#define IPA_IOC_ADD_RT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_RT_RULE_AFTER, \ + struct ipa_ioc_add_rt_rule_after *) +#define IPA_IOC_DEL_RT_RULE _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_RT_RULE, \ + struct ipa_ioc_del_rt_rule *) +#define IPA_IOC_ADD_FLT_RULE _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_FLT_RULE, \ + struct ipa_ioc_add_flt_rule *) +#define IPA_IOC_ADD_FLT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_FLT_RULE_AFTER, \ + struct ipa_ioc_add_flt_rule_after *) +#define IPA_IOC_DEL_FLT_RULE _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_FLT_RULE, \ + struct ipa_ioc_del_flt_rule *) +#define IPA_IOC_COMMIT_HDR _IO(IPA_IOC_MAGIC,\ + IPA_IOCTL_COMMIT_HDR) +#define IPA_IOC_RESET_HDR _IO(IPA_IOC_MAGIC,\ + IPA_IOCTL_RESET_HDR) +#define IPA_IOC_COMMIT_RT _IOW(IPA_IOC_MAGIC, \ + IPA_IOCTL_COMMIT_RT, \ + enum ipa_ip_type) +#define IPA_IOC_RESET_RT _IOW(IPA_IOC_MAGIC, \ + IPA_IOCTL_RESET_RT, \ + enum ipa_ip_type) +#define IPA_IOC_COMMIT_FLT _IOW(IPA_IOC_MAGIC, \ + IPA_IOCTL_COMMIT_FLT, \ + enum ipa_ip_type) +#define IPA_IOC_RESET_FLT _IOW(IPA_IOC_MAGIC, \ + IPA_IOCTL_RESET_FLT, \ + enum ipa_ip_type) +#define IPA_IOC_DUMP _IO(IPA_IOC_MAGIC, \ + IPA_IOCTL_DUMP) +#define IPA_IOC_GET_RT_TBL _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_RT_TBL, \ + struct ipa_ioc_get_rt_tbl *) +#define IPA_IOC_PUT_RT_TBL _IOW(IPA_IOC_MAGIC, \ + IPA_IOCTL_PUT_RT_TBL, \ + uint32_t) +#define IPA_IOC_COPY_HDR _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_COPY_HDR, \ + struct ipa_ioc_copy_hdr *) +#define IPA_IOC_QUERY_INTF _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF, \ + struct ipa_ioc_query_intf *) +#define IPA_IOC_QUERY_INTF_TX_PROPS _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_TX_PROPS, \ + struct ipa_ioc_query_intf_tx_props *) +#define IPA_IOC_QUERY_INTF_RX_PROPS _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_RX_PROPS, \ + struct ipa_ioc_query_intf_rx_props *) +#define IPA_IOC_QUERY_INTF_EXT_PROPS _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_EXT_PROPS, \ + struct ipa_ioc_query_intf_ext_props *) +#define IPA_IOC_GET_HDR _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_HDR, \ + struct ipa_ioc_get_hdr *) +#define IPA_IOC_PUT_HDR _IOW(IPA_IOC_MAGIC, \ + IPA_IOCTL_PUT_HDR, \ + uint32_t) +#define IPA_IOC_ALLOC_NAT_MEM _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ALLOC_NAT_MEM, \ + struct ipa_ioc_nat_alloc_mem *) +#define IPA_IOC_V4_INIT_NAT _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_V4_INIT_NAT, \ + struct ipa_ioc_v4_nat_init *) +#define IPA_IOC_NAT_DMA _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NAT_DMA, \ + struct ipa_ioc_nat_dma_cmd *) +#define IPA_IOC_V4_DEL_NAT _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_V4_DEL_NAT, \ + struct ipa_ioc_v4_nat_del *) +#define IPA_IOC_GET_NAT_OFFSET _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_NAT_OFFSET, \ + uint32_t *) +#define IPA_IOC_NAT_MODIFY_PDN _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NAT_MODIFY_PDN, \ + struct ipa_ioc_nat_pdn_entry *) +#define IPA_IOC_SET_FLT _IOW(IPA_IOC_MAGIC, \ + IPA_IOCTL_SET_FLT, \ + uint32_t) +#define IPA_IOC_PULL_MSG _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_PULL_MSG, \ + struct ipa_msg_meta *) +#define IPA_IOC_RM_ADD_DEPENDENCY _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_RM_ADD_DEPENDENCY, \ + struct ipa_ioc_rm_dependency *) +#define IPA_IOC_RM_DEL_DEPENDENCY _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_RM_DEL_DEPENDENCY, \ + struct ipa_ioc_rm_dependency *) +#define IPA_IOC_GENERATE_FLT_EQ _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GENERATE_FLT_EQ, \ + struct ipa_ioc_generate_flt_eq *) +#define IPA_IOC_QUERY_EP_MAPPING _IOR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_EP_MAPPING, \ + uint32_t) +#define IPA_IOC_QUERY_RT_TBL_INDEX _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_RT_TBL_INDEX, \ + struct ipa_ioc_get_rt_tbl_indx *) +#define IPA_IOC_WRITE_QMAPID _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_WRITE_QMAPID, \ + struct ipa_ioc_write_qmapid *) +#define IPA_IOC_MDFY_FLT_RULE _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_MDFY_FLT_RULE, \ + struct ipa_ioc_mdfy_flt_rule *) +#define IPA_IOC_MDFY_RT_RULE _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_MDFY_RT_RULE, \ + struct ipa_ioc_mdfy_rt_rule *) + +#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \ + struct ipa_wan_msg *) + +#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \ + struct ipa_wan_msg *) +#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \ + struct ipa_wan_msg *) +#define IPA_IOC_ADD_HDR_PROC_CTX _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_HDR_PROC_CTX, \ + struct ipa_ioc_add_hdr_proc_ctx *) +#define IPA_IOC_DEL_HDR_PROC_CTX _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_HDR_PROC_CTX, \ + struct ipa_ioc_del_hdr_proc_ctx *) + +#define IPA_IOC_GET_HW_VERSION _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_HW_VERSION, \ + enum ipa_hw_type *) + +#define IPA_IOC_ADD_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_VLAN_IFACE, \ + struct ipa_ioc_vlan_iface_info *) + +#define IPA_IOC_DEL_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_VLAN_IFACE, \ + struct ipa_ioc_vlan_iface_info *) + +#define IPA_IOC_ADD_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_L2TP_VLAN_MAPPING, \ + struct ipa_ioc_l2tp_vlan_mapping_info *) + +#define IPA_IOC_DEL_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_L2TP_VLAN_MAPPING, \ + struct ipa_ioc_l2tp_vlan_mapping_info *) +/* + * unique magic number of the Tethering bridge ioctls + */ +#define TETH_BRIDGE_IOC_MAGIC 0xCE + +/* + * Ioctls supported by Tethering bridge driver + */ +#define TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE 0 +#define TETH_BRIDGE_IOCTL_SET_AGGR_PARAMS 1 +#define TETH_BRIDGE_IOCTL_GET_AGGR_PARAMS 2 +#define TETH_BRIDGE_IOCTL_GET_AGGR_CAPABILITIES 3 +#define TETH_BRIDGE_IOCTL_MAX 4 + + +/** + * enum teth_link_protocol_type - link protocol (IP / Ethernet) + */ +enum teth_link_protocol_type { + TETH_LINK_PROTOCOL_IP, + TETH_LINK_PROTOCOL_ETHERNET, + TETH_LINK_PROTOCOL_MAX, +}; + +/** + * enum teth_aggr_protocol_type - Aggregation protocol (MBIM / TLP) + */ +enum teth_aggr_protocol_type { + TETH_AGGR_PROTOCOL_NONE, + TETH_AGGR_PROTOCOL_MBIM, + TETH_AGGR_PROTOCOL_TLP, + TETH_AGGR_PROTOCOL_MAX, +}; + +/** + * struct teth_aggr_params_link - Aggregation parameters for uplink/downlink + * @aggr_prot: Aggregation protocol (MBIM / TLP) + * @max_transfer_size_byte: Maximal size of aggregated packet in bytes. + * Default value is 16*1024. + * @max_datagrams: Maximal number of IP packets in an aggregated + * packet. Default value is 16 + */ +struct teth_aggr_params_link { + enum teth_aggr_protocol_type aggr_prot; + uint32_t max_transfer_size_byte; + uint32_t max_datagrams; +}; + + +/** + * struct teth_aggr_params - Aggregation parmeters + * @ul: Uplink parameters + * @dl: Downlink parmaeters + */ +struct teth_aggr_params { + struct teth_aggr_params_link ul; + struct teth_aggr_params_link dl; +}; + +/** + * struct teth_aggr_capabilities - Aggregation capabilities + * @num_protocols: Number of protocols described in the array + * @prot_caps[]: Array of aggregation capabilities per protocol + */ +struct teth_aggr_capabilities { + uint16_t num_protocols; + struct teth_aggr_params_link prot_caps[0]; +}; + +/** + * struct teth_ioc_set_bridge_mode + * @link_protocol: link protocol (IP / Ethernet) + * @lcid: logical channel number + */ +struct teth_ioc_set_bridge_mode { + enum teth_link_protocol_type link_protocol; + uint16_t lcid; +}; + +/** + * struct teth_ioc_set_aggr_params + * @aggr_params: Aggregation parmeters + * @lcid: logical channel number + */ +struct teth_ioc_aggr_params { + struct teth_aggr_params aggr_params; + uint16_t lcid; +}; + + +#define TETH_BRIDGE_IOC_SET_BRIDGE_MODE _IOW(TETH_BRIDGE_IOC_MAGIC, \ + TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE, \ + struct teth_ioc_set_bridge_mode *) +#define TETH_BRIDGE_IOC_SET_AGGR_PARAMS _IOW(TETH_BRIDGE_IOC_MAGIC, \ + TETH_BRIDGE_IOCTL_SET_AGGR_PARAMS, \ + struct teth_ioc_aggr_params *) +#define TETH_BRIDGE_IOC_GET_AGGR_PARAMS _IOR(TETH_BRIDGE_IOC_MAGIC, \ + TETH_BRIDGE_IOCTL_GET_AGGR_PARAMS, \ + struct teth_ioc_aggr_params *) +#define TETH_BRIDGE_IOC_GET_AGGR_CAPABILITIES _IOWR(TETH_BRIDGE_IOC_MAGIC, \ + TETH_BRIDGE_IOCTL_GET_AGGR_CAPABILITIES, \ + struct teth_aggr_capabilities *) + +/* + * unique magic number of the ODU bridge ioctls + */ +#define ODU_BRIDGE_IOC_MAGIC 0xCD + +/* + * Ioctls supported by ODU bridge driver + */ +#define ODU_BRIDGE_IOCTL_SET_MODE 0 +#define ODU_BRIDGE_IOCTL_SET_LLV6_ADDR 1 +#define ODU_BRIDGE_IOCTL_MAX 2 + +/** + * enum odu_bridge_mode - bridge mode + * (ROUTER MODE / BRIDGE MODE) + */ +enum odu_bridge_mode { + ODU_BRIDGE_MODE_ROUTER, + ODU_BRIDGE_MODE_BRIDGE, + ODU_BRIDGE_MODE_MAX, +}; + +#define ODU_BRIDGE_IOC_SET_MODE _IOW(ODU_BRIDGE_IOC_MAGIC, \ + ODU_BRIDGE_IOCTL_SET_MODE, \ + enum odu_bridge_mode) + +#define ODU_BRIDGE_IOC_SET_LLV6_ADDR _IOW(ODU_BRIDGE_IOC_MAGIC, \ + ODU_BRIDGE_IOCTL_SET_LLV6_ADDR, \ + struct in6_addr *) + +#endif /* _UAPI_MSM_IPA_H_ */ diff --git a/include/uapi/linux/rmnet_ipa_fd_ioctl.h b/include/uapi/linux/rmnet_ipa_fd_ioctl.h new file mode 100644 index 000000000000..f04ac495a5c0 --- /dev/null +++ b/include/uapi/linux/rmnet_ipa_fd_ioctl.h @@ -0,0 +1,173 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RMNET_IPA_FD_IOCTL_H +#define _RMNET_IPA_FD_IOCTL_H + +#include +#include +#include + +/** + * unique magic number of the IPA_WAN device + */ +#define WAN_IOC_MAGIC 0x69 + +#define WAN_IOCTL_ADD_FLT_RULE 0 +#define WAN_IOCTL_ADD_FLT_INDEX 1 +#define WAN_IOCTL_VOTE_FOR_BW_MBPS 2 +#define WAN_IOCTL_POLL_TETHERING_STATS 3 +#define WAN_IOCTL_SET_DATA_QUOTA 4 +#define WAN_IOCTL_SET_TETHER_CLIENT_PIPE 5 +#define WAN_IOCTL_QUERY_TETHER_STATS 6 +#define WAN_IOCTL_RESET_TETHER_STATS 7 +#define WAN_IOCTL_QUERY_DL_FILTER_STATS 8 +#define WAN_IOCTL_ADD_FLT_RULE_EX 9 +#define WAN_IOCTL_QUERY_TETHER_STATS_ALL 10 + +/* User space may not have this defined. */ +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif + +/** + * struct wan_ioctl_poll_tethering_stats - structure used for + * WAN_IOCTL_POLL_TETHERING_STATS IOCTL. + * + * @polling_interval_secs: Polling interval in seconds. + * @reset_stats: Indicate whether to reset the stats (use 1) or not. + * + * The structure to be used by the user space in order to request for the + * tethering stats to be polled. Setting the interval to 0 indicates to stop + * the polling process. + */ +struct wan_ioctl_poll_tethering_stats { + uint64_t polling_interval_secs; + uint8_t reset_stats; +}; + +/** + * struct wan_ioctl_set_data_quota - structure used for + * WAN_IOCTL_SET_DATA_QUOTA IOCTL. + * + * @interface_name: Name of the interface on which to set the quota. + * @quota_mbytes: Quota (in Mbytes) for the above interface. + * @set_quota: Indicate whether to set the quota (use 1) or + * unset the quota. + * + * The structure to be used by the user space in order to request + * a quota to be set on a specific interface (by specifying its name). + */ +struct wan_ioctl_set_data_quota { + char interface_name[IFNAMSIZ]; + uint64_t quota_mbytes; + uint8_t set_quota; +}; + +struct wan_ioctl_set_tether_client_pipe { + /* enum of tether interface */ + enum ipacm_client_enum ipa_client; + uint8_t reset_client; + uint32_t ul_src_pipe_len; + uint32_t ul_src_pipe_list[QMI_IPA_MAX_PIPES_V01]; + uint32_t dl_dst_pipe_len; + uint32_t dl_dst_pipe_list[QMI_IPA_MAX_PIPES_V01]; +}; + +struct wan_ioctl_query_tether_stats { + /* Name of the upstream interface */ + char upstreamIface[IFNAMSIZ]; + /* Name of the tethered interface */ + char tetherIface[IFNAMSIZ]; + /* enum of tether interface */ + enum ipacm_client_enum ipa_client; + uint64_t ipv4_tx_packets; + uint64_t ipv4_tx_bytes; + uint64_t ipv4_rx_packets; + uint64_t ipv4_rx_bytes; + uint64_t ipv6_tx_packets; + uint64_t ipv6_tx_bytes; + uint64_t ipv6_rx_packets; + uint64_t ipv6_rx_bytes; +}; + +struct wan_ioctl_query_tether_stats_all { + /* Name of the upstream interface */ + char upstreamIface[IFNAMSIZ]; + /* enum of tether interface */ + enum ipacm_client_enum ipa_client; + uint8_t reset_stats; + uint64_t tx_bytes; + uint64_t rx_bytes; +}; + +struct wan_ioctl_reset_tether_stats { + /* Name of the upstream interface, not support now */ + char upstreamIface[IFNAMSIZ]; + /* Indicate whether to reset the stats (use 1) or not */ + uint8_t reset_stats; +}; + +struct wan_ioctl_query_dl_filter_stats { + /* Indicate whether to reset the filter stats (use 1) or not*/ + uint8_t reset_stats; + /* Modem response QMI */ + struct ipa_get_data_stats_resp_msg_v01 stats_resp; + /* provide right index to 1st firewall rule */ + uint32_t index; +}; + +#define WAN_IOC_ADD_FLT_RULE _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ADD_FLT_RULE, \ + struct ipa_install_fltr_rule_req_msg_v01 *) + +#define WAN_IOC_ADD_FLT_RULE_INDEX _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ADD_FLT_INDEX, \ + struct ipa_fltr_installed_notif_req_msg_v01 *) + +#define WAN_IOC_VOTE_FOR_BW_MBPS _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_VOTE_FOR_BW_MBPS, \ + uint32_t *) + +#define WAN_IOC_POLL_TETHERING_STATS _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_POLL_TETHERING_STATS, \ + struct wan_ioctl_poll_tethering_stats *) + +#define WAN_IOC_SET_DATA_QUOTA _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_SET_DATA_QUOTA, \ + struct wan_ioctl_set_data_quota *) + +#define WAN_IOC_SET_TETHER_CLIENT_PIPE _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_SET_TETHER_CLIENT_PIPE, \ + struct wan_ioctl_set_tether_client_pipe *) + +#define WAN_IOC_QUERY_TETHER_STATS _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_TETHER_STATS, \ + struct wan_ioctl_query_tether_stats *) + +#define WAN_IOC_RESET_TETHER_STATS _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_RESET_TETHER_STATS, \ + struct wan_ioctl_reset_tether_stats *) + +#define WAN_IOC_QUERY_DL_FILTER_STATS _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_DL_FILTER_STATS, \ + struct wan_ioctl_query_dl_filter_stats *) + +#define WAN_IOC_ADD_FLT_RULE_EX _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ADD_FLT_RULE_EX, \ + struct ipa_install_fltr_rule_req_ex_msg_v01 *) + +#define WAN_IOC_QUERY_TETHER_STATS_ALL _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_TETHER_STATS_ALL, \ + struct wan_ioctl_query_tether_stats_all *) + +#endif /* _RMNET_IPA_FD_IOCTL_H */ -- GitLab