Loading drivers/platform/msm/ipa/Makefile +1 −2 Original line number Diff line number Diff line Loading @@ -2,7 +2,6 @@ obj-$(CONFIG_IPA) += ipat.o ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \ ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o odu_bridge.o \ ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o \ ipa_uc.o ipa_uc_wdi.o ipa_uc.o ipa_uc_wdi.o ipa_dma.o obj-$(CONFIG_RMNET_IPA) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o drivers/platform/msm/ipa/ipa_dma.c 0 → 100644 +785 −0 Original line number Diff line number Diff line /* Copyright (c) 2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/debugfs.h> #include <linux/export.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/msm_ipa.h> #include <linux/mutex.h> #include <linux/ipa.h> #include "ipa_i.h" #define IPA_DMA_POLLING_MIN_SLEEP_RX 1010 #define IPA_DMA_POLLING_MAX_SLEEP_RX 1050 #define IPA_DMA_SYS_DESC_MAX_FIFO_SZ 0x7FF8 #define IPA_DMA_MAX_PKT_SZ 0xFFFF #define IPA_DMA_MAX_PENDING_SYNC (IPA_SYS_DESC_FIFO_SZ / \ sizeof(struct sps_iovec) - 1) #define IPA_DMA_MAX_PENDING_ASYNC (IPA_DMA_SYS_DESC_MAX_FIFO_SZ / \ sizeof(struct sps_iovec) - 1) #define IPADMA_DRV_NAME "ipa_dma" #define IPADMA_DBG(fmt, args...) \ pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \ __func__, __LINE__, ## args) #define IPADMA_ERR(fmt, args...) \ pr_err(IPADMA_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) #define IPADMA_FUNC_ENTRY() \ IPADMA_DBG("ENTRY\n") #define IPADMA_FUNC_EXIT() \ IPADMA_DBG("EXIT\n") #define IS_INIT(msg) \ do { \ if (ipa_dma_ctx == NULL) { \ IPADMA_ERR("IPADMA isn't initialized "msg"\n"); \ return -EPERM; \ } \ } \ while (0) #define OVERLAPPING_CHECK(addr1, addr2, len)\ do { \ if ((max(addr1, addr2) - min(addr1, addr2)) < len) { \ IPADMA_ERR("invalid addresses - " \ "overlapping buffers\n"); \ return -EINVAL; \ } \ } \ while (0) #define LEN_CHECK(len) \ do { \ if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {\ IPADMA_ERR("invalid len, %d\n", len);\ return -EINVAL;\ } \ } \ while (0) #ifdef CONFIG_DEBUG_FS #define IPADMA_MAX_MSG_LEN 1024 static char dbg_buff[IPADMA_MAX_MSG_LEN]; static void ipa_dma_debugfs_init(void); static void ipa_dma_debugfs_destroy(void); #else static void ipa_debugfs_init(void) {} static void ipa_dma_debugfs_destroy(void) {} #endif /** * struct ipa_dma_xfer_wrapper - IPADMA transfer descr wrapper * @phys_addr_src: physical address of the source data to copy * @phys_addr_dest: physical address to store the copied data * @len: len in bytes to copy * @link: linked to the wrappers list on the proper(sync/async) cons pipe * @xfer_done: completion object for sync_memcpy completion * @callback: IPADMA client provided completion callback * @user1: cookie1 for above callback * * This struct can wrap both sync and async memcpy transfers descriptors. */ struct ipa_dma_xfer_wrapper { phys_addr_t phys_addr_src; phys_addr_t phys_addr_dest; u16 len; struct list_head link; struct completion xfer_done; void (*callback)(void *user1); void *user1; }; /** * struct ipa_dma_ctx -IPADMA driver context information * @is_enabled:is ipa_dma enabled? * @destroy_pending: destroy ipa_dma after handling all pending memcpy * @ipa_dma_xfer_wrapper_cache: cache of ipa_dma_xfer_wrapper structs * @sync_lock: lock for synchronisation in sync_memcpy * @async_lock: lock for synchronisation in async_memcpy * @enable_lock: lock for is_enabled * @pending_lock: lock for synchronize is_enable and pending_cnt * @done: no pending works-ipadma can be destroyed * @ipa_dma_sync_prod_hdl: handle of sync memcpy producer * @ipa_dma_async_prod_hdl:handle of async memcpy producer * @ipa_dma_sync_cons_hdl: handle of sync memcpy consumer * @sync_memcpy_pending_cnt: number of pending sync memcopy operations * @async_memcpy_pending_cnt: number of pending async memcopy operations * @total_sync_memcpy: total number of sync memcpy (statistics) * @total_async_memcpy: total number of async memcpy (statistics) */ struct ipa_dma_ctx { bool is_enabled; bool destroy_pending; struct kmem_cache *ipa_dma_xfer_wrapper_cache; struct mutex sync_lock; spinlock_t async_lock; struct mutex enable_lock; spinlock_t pending_lock; struct completion done; u32 ipa_dma_sync_prod_hdl; u32 ipa_dma_async_prod_hdl; u32 ipa_dma_sync_cons_hdl; u32 ipa_dma_async_cons_hdl; atomic_t sync_memcpy_pending_cnt; atomic_t async_memcpy_pending_cnt; atomic_t total_sync_memcpy; atomic_t total_async_memcpy; }; static struct ipa_dma_ctx *ipa_dma_ctx; /** * ipa_dma_init() -Initialize IPADMA. * * This function initialize all IPADMA internal data and connect in dma: * MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS * MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS * * Return codes: 0: success * -EFAULT: IPADMA is already initialized * -ENOMEM: allocating memory error * -EPERM: pipe connection failed */ int ipa_dma_init(void) { struct ipa_dma_ctx *ipa_dma_ctx_t; struct ipa_sys_connect_params sys_in; int res = 0; IPADMA_FUNC_ENTRY(); if (ipa_dma_ctx) { IPADMA_ERR("Already initialized.\n"); return -EFAULT; } ipa_dma_ctx_t = kzalloc(sizeof(*(ipa_dma_ctx)), GFP_KERNEL); if (!ipa_dma_ctx_t) { IPADMA_ERR("kzalloc error.\n"); return -ENOMEM; } ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache = kmem_cache_create("IPA DMA XFER WRAPPER", sizeof(struct ipa_dma_xfer_wrapper), 0, 0, NULL); if (!ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache) { IPAERR(":failed to create ipa dma xfer wrapper cache.\n"); res = -ENOMEM; goto fail_mem_ctrl; } mutex_init(&ipa_dma_ctx_t->enable_lock); spin_lock_init(&ipa_dma_ctx_t->async_lock); mutex_init(&ipa_dma_ctx_t->sync_lock); spin_lock_init(&ipa_dma_ctx_t->pending_lock); init_completion(&ipa_dma_ctx_t->done); ipa_dma_ctx_t->is_enabled = false; ipa_dma_ctx_t->destroy_pending = false; atomic_set(&ipa_dma_ctx_t->async_memcpy_pending_cnt, 0); atomic_set(&ipa_dma_ctx_t->sync_memcpy_pending_cnt, 0); atomic_set(&ipa_dma_ctx_t->total_async_memcpy, 0); atomic_set(&ipa_dma_ctx_t->total_sync_memcpy, 0); /* IPADMA SYNC PROD-source for sync memcpy */ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_PROD; sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS; sys_in.skip_ep_cfg = false; if (ipa_setup_sys_pipe(&sys_in, &ipa_dma_ctx_t->ipa_dma_sync_prod_hdl)) { IPADMA_ERR(":setup sync prod pipe failed\n"); res = -EPERM; goto fail_sync_prod; } /* IPADMA SYNC CONS-destination for sync memcpy */ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS; sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; sys_in.skip_ep_cfg = false; sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; sys_in.notify = NULL; sys_in.priv = NULL; if (ipa_setup_sys_pipe(&sys_in, &ipa_dma_ctx_t->ipa_dma_sync_cons_hdl)) { IPADMA_ERR(":setup sync cons pipe failed.\n"); res = -EPERM; goto fail_sync_cons; } IPADMA_DBG("SYNC MEMCPY pipes are connected\n"); /* IPADMA ASYNC PROD-source for sync memcpy */ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD; sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ; sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS; sys_in.skip_ep_cfg = false; sys_in.notify = NULL; if (ipa_setup_sys_pipe(&sys_in, &ipa_dma_ctx_t->ipa_dma_async_prod_hdl)) { IPADMA_ERR(":setup async prod pipe failed.\n"); res = -EPERM; goto fail_async_prod; } /* IPADMA ASYNC CONS-destination for sync memcpy */ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS; sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ; sys_in.skip_ep_cfg = false; sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; sys_in.notify = ipa_dma_async_memcpy_notify_cb; sys_in.priv = NULL; if (ipa_setup_sys_pipe(&sys_in, &ipa_dma_ctx_t->ipa_dma_async_cons_hdl)) { IPADMA_ERR(":setup async cons pipe failed.\n"); res = -EPERM; goto fail_async_cons; } ipa_dma_debugfs_init(); ipa_dma_ctx = ipa_dma_ctx_t; IPADMA_DBG("ASYNC MEMCPY pipes are connected\n"); IPADMA_FUNC_EXIT(); return res; fail_async_cons: ipa_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl); fail_async_prod: ipa_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl); fail_sync_cons: ipa_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl); fail_sync_prod: kmem_cache_destroy(ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache); fail_mem_ctrl: kfree(ipa_dma_ctx_t); ipa_dma_ctx = NULL; return res; } EXPORT_SYMBOL(ipa_dma_init); /** * ipa_dma_enable() -Vote for IPA clocks. * *Return codes: 0: success * -EINVAL: IPADMA is not initialized * -EPERM: Operation not permitted as ipa_dma is already * enabled */ int ipa_dma_enable(void) { IPADMA_FUNC_ENTRY(); IS_INIT("can't enable"); mutex_lock(&ipa_dma_ctx->enable_lock); if (ipa_dma_ctx->is_enabled) { IPADMA_DBG("Already enabled.\n"); mutex_unlock(&ipa_dma_ctx->enable_lock); return -EPERM; } ipa_inc_client_enable_clks(); ipa_dma_ctx->is_enabled = true; mutex_unlock(&ipa_dma_ctx->enable_lock); IPADMA_FUNC_EXIT(); return 0; } EXPORT_SYMBOL(ipa_dma_enable); /** * ipa_dma_disable()- Unvote for IPA clocks. * * enter to power save mode. * * Return codes: 0: success * -EINVAL: IPADMA is not initialized * -EPERM: Operation not permitted as ipa_dma is already * diabled * -EFAULT: can not disable ipa_dma as there are pending * memcopy works */ int ipa_dma_disable(void) { unsigned long flags; IPADMA_FUNC_ENTRY(); IS_INIT("can't disable"); mutex_lock(&ipa_dma_ctx->enable_lock); spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags); if (!ipa_dma_ctx->is_enabled) { IPADMA_DBG("Already disabled.\n"); spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); mutex_unlock(&ipa_dma_ctx->enable_lock); return -EPERM; } if (atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) || atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) { IPADMA_ERR("There is pending work, can't disable.\n"); spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); mutex_unlock(&ipa_dma_ctx->enable_lock); return -EFAULT; } ipa_dma_ctx->is_enabled = false; spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); ipa_dec_client_disable_clks(); mutex_unlock(&ipa_dma_ctx->enable_lock); IPADMA_FUNC_EXIT(); return 0; } EXPORT_SYMBOL(ipa_dma_disable); /** * ipa_dma_sync_memcpy()- Perform synchronous memcpy using IPA. * * @dest: physical address to store the copied data. * @src: physical address of the source data to copy. * @len: number of bytes to copy. * * Return codes: 0: success * -EINVAL: invalid params * -EPERM: operation not permitted as ipa_dma isn't enable or * initialized * -SPS_ERROR: on sps faliures * -EFAULT: other */ int ipa_dma_sync_memcpy(phys_addr_t dest, phys_addr_t src, int len) { int ep_idx; int res; int i = 0; struct ipa_sys_context *cons_sys; struct ipa_sys_context *prod_sys; struct sps_iovec iov; struct ipa_dma_xfer_wrapper *xfer_descr = NULL; struct ipa_dma_xfer_wrapper *head_descr = NULL; unsigned long flags; IPADMA_FUNC_ENTRY(); IS_INIT("can't memcpy"); OVERLAPPING_CHECK(src, dest, len); LEN_CHECK(len); spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags); if (!ipa_dma_ctx->is_enabled) { IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n"); spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); return -EPERM; } atomic_inc(&ipa_dma_ctx->sync_memcpy_pending_cnt); spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); if (atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) >= IPA_DMA_MAX_PENDING_SYNC) { atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt); IPADMA_DBG("Reached pending requests limit\n"); return -EFAULT; } ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS); cons_sys = ipa_ctx->ep[ep_idx].sys; ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD); prod_sys = ipa_ctx->ep[ep_idx].sys; xfer_descr = kmem_cache_zalloc(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, GFP_KERNEL); if (!xfer_descr) { IPADMA_ERR("failed to alloc xfer descr wrapper\n"); res = -ENOMEM; goto fail_mem_alloc; } xfer_descr->phys_addr_dest = dest; xfer_descr->phys_addr_src = src; xfer_descr->len = len; init_completion(&xfer_descr->xfer_done); mutex_lock(&ipa_dma_ctx->sync_lock); list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list); cons_sys->len++; res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len, NULL, 0); if (res) { IPADMA_ERR("Failed: sps_transfer_one on dest descr\n"); goto fail_sps_send; } res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len, NULL, SPS_IOVEC_FLAG_EOT); if (res) { IPADMA_ERR("Failed: sps_transfer_one on src descr\n"); BUG(); } head_descr = list_first_entry(&cons_sys->head_desc_list, struct ipa_dma_xfer_wrapper, link); /* in case we are not the head of the list, wait for head to wake us */ if (xfer_descr != head_descr) { mutex_unlock(&ipa_dma_ctx->sync_lock); wait_for_completion(&xfer_descr->xfer_done); mutex_lock(&ipa_dma_ctx->sync_lock); head_descr = list_first_entry(&cons_sys->head_desc_list, struct ipa_dma_xfer_wrapper, link); BUG_ON(xfer_descr != head_descr); } mutex_unlock(&ipa_dma_ctx->sync_lock); do { /* wait for transfer to complete */ res = sps_get_iovec(cons_sys->ep->ep_hdl, &iov); if (res) IPADMA_ERR("Failed: get_iovec, returned %d loop#:%d\n" , res, i); usleep_range(IPA_DMA_POLLING_MIN_SLEEP_RX, IPA_DMA_POLLING_MAX_SLEEP_RX); i++; } while (iov.addr == 0); mutex_lock(&ipa_dma_ctx->sync_lock); list_del(&head_descr->link); cons_sys->len--; kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); /* wake the head of the list */ if (!list_empty(&cons_sys->head_desc_list)) { head_descr = list_first_entry(&cons_sys->head_desc_list, struct ipa_dma_xfer_wrapper, link); complete(&head_descr->xfer_done); } mutex_unlock(&ipa_dma_ctx->sync_lock); BUG_ON(dest != iov.addr); BUG_ON(len != iov.size); atomic_inc(&ipa_dma_ctx->total_sync_memcpy); atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt); if (ipa_dma_ctx->destroy_pending && !atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) && !atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) complete(&ipa_dma_ctx->done); IPADMA_FUNC_EXIT(); return res; fail_sps_send: list_del(&xfer_descr->link); cons_sys->len--; mutex_unlock(&ipa_dma_ctx->sync_lock); kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); fail_mem_alloc: atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt); if (ipa_dma_ctx->destroy_pending && !atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) && !atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) complete(&ipa_dma_ctx->done); return res; } EXPORT_SYMBOL(ipa_dma_sync_memcpy); /** * ipa_dma_async_memcpy()- Perform asynchronous memcpy using IPA. * * @dest: physical address to store the copied data. * @src: physical address of the source data to copy. * @len: number of bytes to copy. * @user_cb: callback function to notify the client when the copy was done. * @user_param: cookie for user_cb. * * Return codes: 0: success * -EINVAL: invalid params * -EPERM: operation not permitted as ipa_dma isn't enable or * initialized * -SPS_ERROR: on sps faliures * -EFAULT: descr fifo is full. */ int ipa_dma_async_memcpy(phys_addr_t dest, phys_addr_t src, int len, void (*user_cb)(void *user1), void *user_param) { int ep_idx; int res = 0; struct ipa_dma_xfer_wrapper *xfer_descr = NULL; struct ipa_sys_context *prod_sys; struct ipa_sys_context *cons_sys; unsigned long flags; IPADMA_FUNC_ENTRY(); IS_INIT("can't memcpy"); OVERLAPPING_CHECK(src, dest, len); LEN_CHECK(len); if (!user_cb) { IPADMA_ERR("null pointer: user_cb\n"); return -EINVAL; } spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags); if (!ipa_dma_ctx->is_enabled) { IPADMA_ERR("can't memcpy, IPA_DMA isn't enabled\n"); spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); return -EPERM; } atomic_inc(&ipa_dma_ctx->async_memcpy_pending_cnt); spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); if (atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt) >= IPA_DMA_MAX_PENDING_ASYNC) { atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt); IPADMA_DBG("Reached pending requests limit\n"); return -EFAULT; } ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); cons_sys = ipa_ctx->ep[ep_idx].sys; ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD); prod_sys = ipa_ctx->ep[ep_idx].sys; xfer_descr = kmem_cache_zalloc(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, GFP_KERNEL); if (!xfer_descr) { IPADMA_ERR("failed to alloc xfrer descr wrapper\n"); res = -ENOMEM; goto fail_mem_alloc; } xfer_descr->phys_addr_dest = dest; xfer_descr->phys_addr_src = src; xfer_descr->len = len; xfer_descr->callback = user_cb; xfer_descr->user1 = user_param; spin_lock_irqsave(&ipa_dma_ctx->async_lock, flags); list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list); cons_sys->len++; res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len, xfer_descr, 0); if (res) { IPADMA_ERR("Failed: sps_transfer_one on dest descr\n"); goto fail_sps_send; } res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len, NULL, SPS_IOVEC_FLAG_EOT); if (res) { IPADMA_ERR("Failed: sps_transfer_one on src descr\n"); BUG(); goto fail_sps_send; } spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags); IPADMA_FUNC_EXIT(); return res; fail_sps_send: list_del(&xfer_descr->link); spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags); kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); fail_mem_alloc: atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt); if (ipa_dma_ctx->destroy_pending && !atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) && !atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) complete(&ipa_dma_ctx->done); return res; } EXPORT_SYMBOL(ipa_dma_async_memcpy); /** * ipa_dma_destroy() -teardown IPADMA pipes and release ipadma. * * this is a blocking function, returns just after destroying IPADMA. */ void ipa_dma_destroy(void) { int res = 0; IPADMA_FUNC_ENTRY(); if (!ipa_dma_ctx) { IPADMA_DBG("IPADMA isn't initialized\n"); return; } if (atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) || atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) { ipa_dma_ctx->destroy_pending = true; IPADMA_DBG("There are pending memcpy, wait for completion\n"); wait_for_completion(&ipa_dma_ctx->done); } res = ipa_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_async_cons_hdl); if (res) IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n"); ipa_dma_ctx->ipa_dma_async_cons_hdl = 0; res = ipa_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_sync_cons_hdl); if (res) IPADMA_ERR("teardown IPADMA SYNC CONS failed\n"); ipa_dma_ctx->ipa_dma_sync_cons_hdl = 0; res = ipa_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_async_prod_hdl); if (res) IPADMA_ERR("teardown IPADMA ASYNC PROD failed\n"); ipa_dma_ctx->ipa_dma_async_prod_hdl = 0; res = ipa_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_sync_prod_hdl); if (res) IPADMA_ERR("teardown IPADMA SYNC PROD failed\n"); ipa_dma_ctx->ipa_dma_sync_prod_hdl = 0; ipa_dma_debugfs_destroy(); kmem_cache_destroy(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache); kfree(ipa_dma_ctx); ipa_dma_ctx = NULL; IPADMA_FUNC_EXIT(); return; } EXPORT_SYMBOL(ipa_dma_destroy); /** * ipa_dma_async_memcpy_notify_cb() -Callback function which will be called by * IPA driver after getting notify from SPS driver or poll mode on Rx operation * is completed (data was written to dest descriptor on async_cons ep). * * @priv -not in use. * @evt - event name - IPA_RECIVE. * @data -the iovec. */ void ipa_dma_async_memcpy_notify_cb(void *priv , enum ipa_dp_evt_type evt, unsigned long data) { int ep_idx = 0; struct sps_iovec *iov = (struct sps_iovec *) data; struct ipa_dma_xfer_wrapper *xfer_descr_expected; struct ipa_sys_context *sys; unsigned long flags; IPADMA_FUNC_ENTRY(); ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); sys = ipa_ctx->ep[ep_idx].sys; spin_lock_irqsave(&ipa_dma_ctx->async_lock, flags); xfer_descr_expected = list_first_entry(&sys->head_desc_list, struct ipa_dma_xfer_wrapper, link); list_del(&xfer_descr_expected->link); sys->len--; spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags); BUG_ON(xfer_descr_expected->phys_addr_dest != iov->addr); BUG_ON(xfer_descr_expected->len != iov->size); atomic_inc(&ipa_dma_ctx->total_async_memcpy); atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt); xfer_descr_expected->callback(xfer_descr_expected->user1); kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr_expected); if (ipa_dma_ctx->destroy_pending && !atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) && !atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) complete(&ipa_dma_ctx->done); IPADMA_FUNC_EXIT(); return; } #ifdef CONFIG_DEBUG_FS static struct dentry *dent; static struct dentry *dfile_info; static ssize_t ipa_dma_debugfs_read(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { int nbytes = 0; if (!ipa_dma_ctx) nbytes += scnprintf(dbg_buff, IPADMA_MAX_MSG_LEN, "Not initialized\n"); else nbytes += scnprintf(dbg_buff, IPADMA_MAX_MSG_LEN, "Status:\n IPADMA is %s\n", (ipa_dma_ctx->is_enabled) ? "Enabled" : "Disabled"); nbytes += scnprintf(dbg_buff, IPADMA_MAX_MSG_LEN, "Statistics:\n total sync memcpy: %d\n ", atomic_read(&ipa_dma_ctx->total_sync_memcpy)); nbytes += scnprintf(dbg_buff, IPADMA_MAX_MSG_LEN, "total async memcpy: %d\n ", atomic_read(&ipa_dma_ctx->total_async_memcpy)); nbytes += scnprintf(dbg_buff, IPADMA_MAX_MSG_LEN, "pending sync memcpy jobs: %d\n ", atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt)); nbytes += scnprintf(dbg_buff, IPADMA_MAX_MSG_LEN, "pending async memcpy jobs: %d\n", atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)); return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); } static ssize_t ipa_dma_debugfs_reset_statistics(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { unsigned long missing; s8 in_num = 0; if (sizeof(dbg_buff) < count + 1) return -EFAULT; missing = copy_from_user(dbg_buff, ubuf, count); if (missing) return -EFAULT; dbg_buff[count] = '\0'; if (kstrtos8(dbg_buff, 0, &in_num)) return -EFAULT; switch (in_num) { case 0: if (atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt) || atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt)) IPADMA_DBG("Note, there are pending memcpy\n"); atomic_set(&ipa_dma_ctx->total_async_memcpy, 0); atomic_set(&ipa_dma_ctx->total_sync_memcpy, 0); break; default: IPADMA_ERR("invalid argument: To reset statistics echo 0\n"); break; } return count; } const struct file_operations ipadma_stats_ops = { .read = ipa_dma_debugfs_read, .write = ipa_dma_debugfs_reset_statistics, }; static void ipa_dma_debugfs_init(void) { const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR | S_IWGRP | S_IWOTH; dent = debugfs_create_dir("ipa_dma", 0); if (IS_ERR(dent)) { IPADMA_ERR("fail to create folder ipa_dma\n"); return; } dfile_info = debugfs_create_file("info", read_write_mode, dent, 0, &ipadma_stats_ops); if (!dfile_info || IS_ERR(dfile_info)) { IPADMA_ERR("fail to create file stats\n"); goto fail; } return; fail: debugfs_remove_recursive(dent); } static void ipa_dma_debugfs_destroy(void) { debugfs_remove_recursive(dent); } #endif /* !CONFIG_DEBUG_FS */ Loading
drivers/platform/msm/ipa/Makefile +1 −2 Original line number Diff line number Diff line Loading @@ -2,7 +2,6 @@ obj-$(CONFIG_IPA) += ipat.o ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \ ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o odu_bridge.o \ ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o \ ipa_uc.o ipa_uc_wdi.o ipa_uc.o ipa_uc_wdi.o ipa_dma.o obj-$(CONFIG_RMNET_IPA) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
drivers/platform/msm/ipa/ipa_dma.c 0 → 100644 +785 −0 Original line number Diff line number Diff line /* Copyright (c) 2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/debugfs.h> #include <linux/export.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/msm_ipa.h> #include <linux/mutex.h> #include <linux/ipa.h> #include "ipa_i.h" #define IPA_DMA_POLLING_MIN_SLEEP_RX 1010 #define IPA_DMA_POLLING_MAX_SLEEP_RX 1050 #define IPA_DMA_SYS_DESC_MAX_FIFO_SZ 0x7FF8 #define IPA_DMA_MAX_PKT_SZ 0xFFFF #define IPA_DMA_MAX_PENDING_SYNC (IPA_SYS_DESC_FIFO_SZ / \ sizeof(struct sps_iovec) - 1) #define IPA_DMA_MAX_PENDING_ASYNC (IPA_DMA_SYS_DESC_MAX_FIFO_SZ / \ sizeof(struct sps_iovec) - 1) #define IPADMA_DRV_NAME "ipa_dma" #define IPADMA_DBG(fmt, args...) \ pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \ __func__, __LINE__, ## args) #define IPADMA_ERR(fmt, args...) \ pr_err(IPADMA_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) #define IPADMA_FUNC_ENTRY() \ IPADMA_DBG("ENTRY\n") #define IPADMA_FUNC_EXIT() \ IPADMA_DBG("EXIT\n") #define IS_INIT(msg) \ do { \ if (ipa_dma_ctx == NULL) { \ IPADMA_ERR("IPADMA isn't initialized "msg"\n"); \ return -EPERM; \ } \ } \ while (0) #define OVERLAPPING_CHECK(addr1, addr2, len)\ do { \ if ((max(addr1, addr2) - min(addr1, addr2)) < len) { \ IPADMA_ERR("invalid addresses - " \ "overlapping buffers\n"); \ return -EINVAL; \ } \ } \ while (0) #define LEN_CHECK(len) \ do { \ if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {\ IPADMA_ERR("invalid len, %d\n", len);\ return -EINVAL;\ } \ } \ while (0) #ifdef CONFIG_DEBUG_FS #define IPADMA_MAX_MSG_LEN 1024 static char dbg_buff[IPADMA_MAX_MSG_LEN]; static void ipa_dma_debugfs_init(void); static void ipa_dma_debugfs_destroy(void); #else static void ipa_debugfs_init(void) {} static void ipa_dma_debugfs_destroy(void) {} #endif /** * struct ipa_dma_xfer_wrapper - IPADMA transfer descr wrapper * @phys_addr_src: physical address of the source data to copy * @phys_addr_dest: physical address to store the copied data * @len: len in bytes to copy * @link: linked to the wrappers list on the proper(sync/async) cons pipe * @xfer_done: completion object for sync_memcpy completion * @callback: IPADMA client provided completion callback * @user1: cookie1 for above callback * * This struct can wrap both sync and async memcpy transfers descriptors. */ struct ipa_dma_xfer_wrapper { phys_addr_t phys_addr_src; phys_addr_t phys_addr_dest; u16 len; struct list_head link; struct completion xfer_done; void (*callback)(void *user1); void *user1; }; /** * struct ipa_dma_ctx -IPADMA driver context information * @is_enabled:is ipa_dma enabled? * @destroy_pending: destroy ipa_dma after handling all pending memcpy * @ipa_dma_xfer_wrapper_cache: cache of ipa_dma_xfer_wrapper structs * @sync_lock: lock for synchronisation in sync_memcpy * @async_lock: lock for synchronisation in async_memcpy * @enable_lock: lock for is_enabled * @pending_lock: lock for synchronize is_enable and pending_cnt * @done: no pending works-ipadma can be destroyed * @ipa_dma_sync_prod_hdl: handle of sync memcpy producer * @ipa_dma_async_prod_hdl:handle of async memcpy producer * @ipa_dma_sync_cons_hdl: handle of sync memcpy consumer * @sync_memcpy_pending_cnt: number of pending sync memcopy operations * @async_memcpy_pending_cnt: number of pending async memcopy operations * @total_sync_memcpy: total number of sync memcpy (statistics) * @total_async_memcpy: total number of async memcpy (statistics) */ struct ipa_dma_ctx { bool is_enabled; bool destroy_pending; struct kmem_cache *ipa_dma_xfer_wrapper_cache; struct mutex sync_lock; spinlock_t async_lock; struct mutex enable_lock; spinlock_t pending_lock; struct completion done; u32 ipa_dma_sync_prod_hdl; u32 ipa_dma_async_prod_hdl; u32 ipa_dma_sync_cons_hdl; u32 ipa_dma_async_cons_hdl; atomic_t sync_memcpy_pending_cnt; atomic_t async_memcpy_pending_cnt; atomic_t total_sync_memcpy; atomic_t total_async_memcpy; }; static struct ipa_dma_ctx *ipa_dma_ctx; /** * ipa_dma_init() -Initialize IPADMA. * * This function initialize all IPADMA internal data and connect in dma: * MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS * MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS * * Return codes: 0: success * -EFAULT: IPADMA is already initialized * -ENOMEM: allocating memory error * -EPERM: pipe connection failed */ int ipa_dma_init(void) { struct ipa_dma_ctx *ipa_dma_ctx_t; struct ipa_sys_connect_params sys_in; int res = 0; IPADMA_FUNC_ENTRY(); if (ipa_dma_ctx) { IPADMA_ERR("Already initialized.\n"); return -EFAULT; } ipa_dma_ctx_t = kzalloc(sizeof(*(ipa_dma_ctx)), GFP_KERNEL); if (!ipa_dma_ctx_t) { IPADMA_ERR("kzalloc error.\n"); return -ENOMEM; } ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache = kmem_cache_create("IPA DMA XFER WRAPPER", sizeof(struct ipa_dma_xfer_wrapper), 0, 0, NULL); if (!ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache) { IPAERR(":failed to create ipa dma xfer wrapper cache.\n"); res = -ENOMEM; goto fail_mem_ctrl; } mutex_init(&ipa_dma_ctx_t->enable_lock); spin_lock_init(&ipa_dma_ctx_t->async_lock); mutex_init(&ipa_dma_ctx_t->sync_lock); spin_lock_init(&ipa_dma_ctx_t->pending_lock); init_completion(&ipa_dma_ctx_t->done); ipa_dma_ctx_t->is_enabled = false; ipa_dma_ctx_t->destroy_pending = false; atomic_set(&ipa_dma_ctx_t->async_memcpy_pending_cnt, 0); atomic_set(&ipa_dma_ctx_t->sync_memcpy_pending_cnt, 0); atomic_set(&ipa_dma_ctx_t->total_async_memcpy, 0); atomic_set(&ipa_dma_ctx_t->total_sync_memcpy, 0); /* IPADMA SYNC PROD-source for sync memcpy */ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_PROD; sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS; sys_in.skip_ep_cfg = false; if (ipa_setup_sys_pipe(&sys_in, &ipa_dma_ctx_t->ipa_dma_sync_prod_hdl)) { IPADMA_ERR(":setup sync prod pipe failed\n"); res = -EPERM; goto fail_sync_prod; } /* IPADMA SYNC CONS-destination for sync memcpy */ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS; sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; sys_in.skip_ep_cfg = false; sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; sys_in.notify = NULL; sys_in.priv = NULL; if (ipa_setup_sys_pipe(&sys_in, &ipa_dma_ctx_t->ipa_dma_sync_cons_hdl)) { IPADMA_ERR(":setup sync cons pipe failed.\n"); res = -EPERM; goto fail_sync_cons; } IPADMA_DBG("SYNC MEMCPY pipes are connected\n"); /* IPADMA ASYNC PROD-source for sync memcpy */ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD; sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ; sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS; sys_in.skip_ep_cfg = false; sys_in.notify = NULL; if (ipa_setup_sys_pipe(&sys_in, &ipa_dma_ctx_t->ipa_dma_async_prod_hdl)) { IPADMA_ERR(":setup async prod pipe failed.\n"); res = -EPERM; goto fail_async_prod; } /* IPADMA ASYNC CONS-destination for sync memcpy */ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS; sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ; sys_in.skip_ep_cfg = false; sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; sys_in.notify = ipa_dma_async_memcpy_notify_cb; sys_in.priv = NULL; if (ipa_setup_sys_pipe(&sys_in, &ipa_dma_ctx_t->ipa_dma_async_cons_hdl)) { IPADMA_ERR(":setup async cons pipe failed.\n"); res = -EPERM; goto fail_async_cons; } ipa_dma_debugfs_init(); ipa_dma_ctx = ipa_dma_ctx_t; IPADMA_DBG("ASYNC MEMCPY pipes are connected\n"); IPADMA_FUNC_EXIT(); return res; fail_async_cons: ipa_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl); fail_async_prod: ipa_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl); fail_sync_cons: ipa_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl); fail_sync_prod: kmem_cache_destroy(ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache); fail_mem_ctrl: kfree(ipa_dma_ctx_t); ipa_dma_ctx = NULL; return res; } EXPORT_SYMBOL(ipa_dma_init); /** * ipa_dma_enable() -Vote for IPA clocks. * *Return codes: 0: success * -EINVAL: IPADMA is not initialized * -EPERM: Operation not permitted as ipa_dma is already * enabled */ int ipa_dma_enable(void) { IPADMA_FUNC_ENTRY(); IS_INIT("can't enable"); mutex_lock(&ipa_dma_ctx->enable_lock); if (ipa_dma_ctx->is_enabled) { IPADMA_DBG("Already enabled.\n"); mutex_unlock(&ipa_dma_ctx->enable_lock); return -EPERM; } ipa_inc_client_enable_clks(); ipa_dma_ctx->is_enabled = true; mutex_unlock(&ipa_dma_ctx->enable_lock); IPADMA_FUNC_EXIT(); return 0; } EXPORT_SYMBOL(ipa_dma_enable); /** * ipa_dma_disable()- Unvote for IPA clocks. * * enter to power save mode. * * Return codes: 0: success * -EINVAL: IPADMA is not initialized * -EPERM: Operation not permitted as ipa_dma is already * diabled * -EFAULT: can not disable ipa_dma as there are pending * memcopy works */ int ipa_dma_disable(void) { unsigned long flags; IPADMA_FUNC_ENTRY(); IS_INIT("can't disable"); mutex_lock(&ipa_dma_ctx->enable_lock); spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags); if (!ipa_dma_ctx->is_enabled) { IPADMA_DBG("Already disabled.\n"); spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); mutex_unlock(&ipa_dma_ctx->enable_lock); return -EPERM; } if (atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) || atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) { IPADMA_ERR("There is pending work, can't disable.\n"); spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); mutex_unlock(&ipa_dma_ctx->enable_lock); return -EFAULT; } ipa_dma_ctx->is_enabled = false; spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); ipa_dec_client_disable_clks(); mutex_unlock(&ipa_dma_ctx->enable_lock); IPADMA_FUNC_EXIT(); return 0; } EXPORT_SYMBOL(ipa_dma_disable); /** * ipa_dma_sync_memcpy()- Perform synchronous memcpy using IPA. * * @dest: physical address to store the copied data. * @src: physical address of the source data to copy. * @len: number of bytes to copy. * * Return codes: 0: success * -EINVAL: invalid params * -EPERM: operation not permitted as ipa_dma isn't enable or * initialized * -SPS_ERROR: on sps faliures * -EFAULT: other */ int ipa_dma_sync_memcpy(phys_addr_t dest, phys_addr_t src, int len) { int ep_idx; int res; int i = 0; struct ipa_sys_context *cons_sys; struct ipa_sys_context *prod_sys; struct sps_iovec iov; struct ipa_dma_xfer_wrapper *xfer_descr = NULL; struct ipa_dma_xfer_wrapper *head_descr = NULL; unsigned long flags; IPADMA_FUNC_ENTRY(); IS_INIT("can't memcpy"); OVERLAPPING_CHECK(src, dest, len); LEN_CHECK(len); spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags); if (!ipa_dma_ctx->is_enabled) { IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n"); spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); return -EPERM; } atomic_inc(&ipa_dma_ctx->sync_memcpy_pending_cnt); spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); if (atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) >= IPA_DMA_MAX_PENDING_SYNC) { atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt); IPADMA_DBG("Reached pending requests limit\n"); return -EFAULT; } ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS); cons_sys = ipa_ctx->ep[ep_idx].sys; ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD); prod_sys = ipa_ctx->ep[ep_idx].sys; xfer_descr = kmem_cache_zalloc(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, GFP_KERNEL); if (!xfer_descr) { IPADMA_ERR("failed to alloc xfer descr wrapper\n"); res = -ENOMEM; goto fail_mem_alloc; } xfer_descr->phys_addr_dest = dest; xfer_descr->phys_addr_src = src; xfer_descr->len = len; init_completion(&xfer_descr->xfer_done); mutex_lock(&ipa_dma_ctx->sync_lock); list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list); cons_sys->len++; res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len, NULL, 0); if (res) { IPADMA_ERR("Failed: sps_transfer_one on dest descr\n"); goto fail_sps_send; } res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len, NULL, SPS_IOVEC_FLAG_EOT); if (res) { IPADMA_ERR("Failed: sps_transfer_one on src descr\n"); BUG(); } head_descr = list_first_entry(&cons_sys->head_desc_list, struct ipa_dma_xfer_wrapper, link); /* in case we are not the head of the list, wait for head to wake us */ if (xfer_descr != head_descr) { mutex_unlock(&ipa_dma_ctx->sync_lock); wait_for_completion(&xfer_descr->xfer_done); mutex_lock(&ipa_dma_ctx->sync_lock); head_descr = list_first_entry(&cons_sys->head_desc_list, struct ipa_dma_xfer_wrapper, link); BUG_ON(xfer_descr != head_descr); } mutex_unlock(&ipa_dma_ctx->sync_lock); do { /* wait for transfer to complete */ res = sps_get_iovec(cons_sys->ep->ep_hdl, &iov); if (res) IPADMA_ERR("Failed: get_iovec, returned %d loop#:%d\n" , res, i); usleep_range(IPA_DMA_POLLING_MIN_SLEEP_RX, IPA_DMA_POLLING_MAX_SLEEP_RX); i++; } while (iov.addr == 0); mutex_lock(&ipa_dma_ctx->sync_lock); list_del(&head_descr->link); cons_sys->len--; kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); /* wake the head of the list */ if (!list_empty(&cons_sys->head_desc_list)) { head_descr = list_first_entry(&cons_sys->head_desc_list, struct ipa_dma_xfer_wrapper, link); complete(&head_descr->xfer_done); } mutex_unlock(&ipa_dma_ctx->sync_lock); BUG_ON(dest != iov.addr); BUG_ON(len != iov.size); atomic_inc(&ipa_dma_ctx->total_sync_memcpy); atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt); if (ipa_dma_ctx->destroy_pending && !atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) && !atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) complete(&ipa_dma_ctx->done); IPADMA_FUNC_EXIT(); return res; fail_sps_send: list_del(&xfer_descr->link); cons_sys->len--; mutex_unlock(&ipa_dma_ctx->sync_lock); kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); fail_mem_alloc: atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt); if (ipa_dma_ctx->destroy_pending && !atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) && !atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) complete(&ipa_dma_ctx->done); return res; } EXPORT_SYMBOL(ipa_dma_sync_memcpy); /** * ipa_dma_async_memcpy()- Perform asynchronous memcpy using IPA. * * @dest: physical address to store the copied data. * @src: physical address of the source data to copy. * @len: number of bytes to copy. * @user_cb: callback function to notify the client when the copy was done. * @user_param: cookie for user_cb. * * Return codes: 0: success * -EINVAL: invalid params * -EPERM: operation not permitted as ipa_dma isn't enable or * initialized * -SPS_ERROR: on sps faliures * -EFAULT: descr fifo is full. */ int ipa_dma_async_memcpy(phys_addr_t dest, phys_addr_t src, int len, void (*user_cb)(void *user1), void *user_param) { int ep_idx; int res = 0; struct ipa_dma_xfer_wrapper *xfer_descr = NULL; struct ipa_sys_context *prod_sys; struct ipa_sys_context *cons_sys; unsigned long flags; IPADMA_FUNC_ENTRY(); IS_INIT("can't memcpy"); OVERLAPPING_CHECK(src, dest, len); LEN_CHECK(len); if (!user_cb) { IPADMA_ERR("null pointer: user_cb\n"); return -EINVAL; } spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags); if (!ipa_dma_ctx->is_enabled) { IPADMA_ERR("can't memcpy, IPA_DMA isn't enabled\n"); spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); return -EPERM; } atomic_inc(&ipa_dma_ctx->async_memcpy_pending_cnt); spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); if (atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt) >= IPA_DMA_MAX_PENDING_ASYNC) { atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt); IPADMA_DBG("Reached pending requests limit\n"); return -EFAULT; } ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); cons_sys = ipa_ctx->ep[ep_idx].sys; ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD); prod_sys = ipa_ctx->ep[ep_idx].sys; xfer_descr = kmem_cache_zalloc(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, GFP_KERNEL); if (!xfer_descr) { IPADMA_ERR("failed to alloc xfrer descr wrapper\n"); res = -ENOMEM; goto fail_mem_alloc; } xfer_descr->phys_addr_dest = dest; xfer_descr->phys_addr_src = src; xfer_descr->len = len; xfer_descr->callback = user_cb; xfer_descr->user1 = user_param; spin_lock_irqsave(&ipa_dma_ctx->async_lock, flags); list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list); cons_sys->len++; res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len, xfer_descr, 0); if (res) { IPADMA_ERR("Failed: sps_transfer_one on dest descr\n"); goto fail_sps_send; } res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len, NULL, SPS_IOVEC_FLAG_EOT); if (res) { IPADMA_ERR("Failed: sps_transfer_one on src descr\n"); BUG(); goto fail_sps_send; } spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags); IPADMA_FUNC_EXIT(); return res; fail_sps_send: list_del(&xfer_descr->link); spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags); kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); fail_mem_alloc: atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt); if (ipa_dma_ctx->destroy_pending && !atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) && !atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) complete(&ipa_dma_ctx->done); return res; } EXPORT_SYMBOL(ipa_dma_async_memcpy); /** * ipa_dma_destroy() -teardown IPADMA pipes and release ipadma. * * this is a blocking function, returns just after destroying IPADMA. */ void ipa_dma_destroy(void) { int res = 0; IPADMA_FUNC_ENTRY(); if (!ipa_dma_ctx) { IPADMA_DBG("IPADMA isn't initialized\n"); return; } if (atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) || atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) { ipa_dma_ctx->destroy_pending = true; IPADMA_DBG("There are pending memcpy, wait for completion\n"); wait_for_completion(&ipa_dma_ctx->done); } res = ipa_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_async_cons_hdl); if (res) IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n"); ipa_dma_ctx->ipa_dma_async_cons_hdl = 0; res = ipa_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_sync_cons_hdl); if (res) IPADMA_ERR("teardown IPADMA SYNC CONS failed\n"); ipa_dma_ctx->ipa_dma_sync_cons_hdl = 0; res = ipa_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_async_prod_hdl); if (res) IPADMA_ERR("teardown IPADMA ASYNC PROD failed\n"); ipa_dma_ctx->ipa_dma_async_prod_hdl = 0; res = ipa_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_sync_prod_hdl); if (res) IPADMA_ERR("teardown IPADMA SYNC PROD failed\n"); ipa_dma_ctx->ipa_dma_sync_prod_hdl = 0; ipa_dma_debugfs_destroy(); kmem_cache_destroy(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache); kfree(ipa_dma_ctx); ipa_dma_ctx = NULL; IPADMA_FUNC_EXIT(); return; } EXPORT_SYMBOL(ipa_dma_destroy); /** * ipa_dma_async_memcpy_notify_cb() -Callback function which will be called by * IPA driver after getting notify from SPS driver or poll mode on Rx operation * is completed (data was written to dest descriptor on async_cons ep). * * @priv -not in use. * @evt - event name - IPA_RECIVE. * @data -the iovec. */ void ipa_dma_async_memcpy_notify_cb(void *priv , enum ipa_dp_evt_type evt, unsigned long data) { int ep_idx = 0; struct sps_iovec *iov = (struct sps_iovec *) data; struct ipa_dma_xfer_wrapper *xfer_descr_expected; struct ipa_sys_context *sys; unsigned long flags; IPADMA_FUNC_ENTRY(); ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); sys = ipa_ctx->ep[ep_idx].sys; spin_lock_irqsave(&ipa_dma_ctx->async_lock, flags); xfer_descr_expected = list_first_entry(&sys->head_desc_list, struct ipa_dma_xfer_wrapper, link); list_del(&xfer_descr_expected->link); sys->len--; spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags); BUG_ON(xfer_descr_expected->phys_addr_dest != iov->addr); BUG_ON(xfer_descr_expected->len != iov->size); atomic_inc(&ipa_dma_ctx->total_async_memcpy); atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt); xfer_descr_expected->callback(xfer_descr_expected->user1); kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr_expected); if (ipa_dma_ctx->destroy_pending && !atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) && !atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) complete(&ipa_dma_ctx->done); IPADMA_FUNC_EXIT(); return; } #ifdef CONFIG_DEBUG_FS static struct dentry *dent; static struct dentry *dfile_info; static ssize_t ipa_dma_debugfs_read(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { int nbytes = 0; if (!ipa_dma_ctx) nbytes += scnprintf(dbg_buff, IPADMA_MAX_MSG_LEN, "Not initialized\n"); else nbytes += scnprintf(dbg_buff, IPADMA_MAX_MSG_LEN, "Status:\n IPADMA is %s\n", (ipa_dma_ctx->is_enabled) ? "Enabled" : "Disabled"); nbytes += scnprintf(dbg_buff, IPADMA_MAX_MSG_LEN, "Statistics:\n total sync memcpy: %d\n ", atomic_read(&ipa_dma_ctx->total_sync_memcpy)); nbytes += scnprintf(dbg_buff, IPADMA_MAX_MSG_LEN, "total async memcpy: %d\n ", atomic_read(&ipa_dma_ctx->total_async_memcpy)); nbytes += scnprintf(dbg_buff, IPADMA_MAX_MSG_LEN, "pending sync memcpy jobs: %d\n ", atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt)); nbytes += scnprintf(dbg_buff, IPADMA_MAX_MSG_LEN, "pending async memcpy jobs: %d\n", atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)); return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); } static ssize_t ipa_dma_debugfs_reset_statistics(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { unsigned long missing; s8 in_num = 0; if (sizeof(dbg_buff) < count + 1) return -EFAULT; missing = copy_from_user(dbg_buff, ubuf, count); if (missing) return -EFAULT; dbg_buff[count] = '\0'; if (kstrtos8(dbg_buff, 0, &in_num)) return -EFAULT; switch (in_num) { case 0: if (atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt) || atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt)) IPADMA_DBG("Note, there are pending memcpy\n"); atomic_set(&ipa_dma_ctx->total_async_memcpy, 0); atomic_set(&ipa_dma_ctx->total_sync_memcpy, 0); break; default: IPADMA_ERR("invalid argument: To reset statistics echo 0\n"); break; } return count; } const struct file_operations ipadma_stats_ops = { .read = ipa_dma_debugfs_read, .write = ipa_dma_debugfs_reset_statistics, }; static void ipa_dma_debugfs_init(void) { const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR | S_IWGRP | S_IWOTH; dent = debugfs_create_dir("ipa_dma", 0); if (IS_ERR(dent)) { IPADMA_ERR("fail to create folder ipa_dma\n"); return; } dfile_info = debugfs_create_file("info", read_write_mode, dent, 0, &ipadma_stats_ops); if (!dfile_info || IS_ERR(dfile_info)) { IPADMA_ERR("fail to create file stats\n"); goto fail; } return; fail: debugfs_remove_recursive(dent); } static void ipa_dma_debugfs_destroy(void) { debugfs_remove_recursive(dent); } #endif /* !CONFIG_DEBUG_FS */