Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bc1f1248 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: ipa3: Explicitly enable IPA DMA for IPA MHI"

parents 0935a70e 80cc0d3c
Loading
Loading
Loading
Loading
+23 −4
Original line number Diff line number Diff line
@@ -182,6 +182,12 @@ static int ipa_mhi_read_write_host(enum ipa_mhi_dma_dir dir, void *dev_addr,
			return -ENOMEM;
		}

		res = ipa_dma_enable();
		if (res) {
			IPA_MHI_ERR("failed to enable IPA DMA rc=%d\n", res);
			goto fail_dma_enable;
		}

		if (dir == IPA_MHI_DMA_FROM_HOST) {
			res = ipa_dma_sync_memcpy(mem.phys_base, host_addr,
				size);
@@ -203,8 +209,7 @@ static int ipa_mhi_read_write_host(enum ipa_mhi_dma_dir dir, void *dev_addr,
				goto fail_memcopy;
			}
		}
		dma_free_coherent(pdev, mem.size, mem.base,
			mem.phys_base);
		goto dma_succeed;
	} else {
		void *host_ptr;

@@ -227,9 +232,14 @@ static int ipa_mhi_read_write_host(enum ipa_mhi_dma_dir dir, void *dev_addr,
	IPA_MHI_FUNC_EXIT();
	return 0;

dma_succeed:
	IPA_MHI_FUNC_EXIT();
	res = 0;
fail_memcopy:
	dma_free_coherent(ipa_get_dma_dev(), mem.size, mem.base,
			mem.phys_base);
	if (ipa_dma_disable())
		IPA_MHI_ERR("failed to disable IPA DMA\n");
fail_dma_enable:
	dma_free_coherent(pdev, mem.size, mem.base, mem.phys_base);
	return res;
}

@@ -2408,6 +2418,7 @@ void ipa_mhi_destroy(void)
		goto fail;
	}

	ipa_dma_destroy();
	ipa_mhi_debugfs_destroy();
	destroy_workqueue(ipa_mhi_client_ctx->wq);
	kfree(ipa_mhi_client_ctx);
@@ -2500,6 +2511,12 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params)
		goto fail_create_wq;
	}

	res = ipa_dma_init();
	if (res) {
		IPA_MHI_ERR("failed to init ipa dma %d\n", res);
		goto fail_dma_init;
	}

	/* Create PROD in IPA RM */
	memset(&mhi_prod_params, 0, sizeof(mhi_prod_params));
	mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD;
@@ -2557,6 +2574,8 @@ fail_create_rm_cons:
fail_perf_rm_prod:
	ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
fail_create_rm_prod:
	ipa_dma_destroy();
fail_dma_init:
	destroy_workqueue(ipa_mhi_client_ctx->wq);
fail_create_wq:
	kfree(ipa_mhi_client_ctx);
+23 −13
Original line number Diff line number Diff line
@@ -5255,7 +5255,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
	if (result) {
		IPAERR("Failed to alloc pkt_init payload\n");
		result = -ENODEV;
		goto fail_create_apps_resource;
		goto fail_allok_pkt_init;
	}

	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
@@ -5266,6 +5266,13 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
	init_completion(&ipa3_ctx->init_completion_obj);
	init_completion(&ipa3_ctx->uc_loaded_completion_obj);

	result = ipa3_dma_setup();
	if (result) {
		IPAERR("Failed to setup IPA DMA\n");
		result = -ENODEV;
		goto fail_ipa_dma_setup;
	}

	/*
	 * For GSI, we can't register the GSI driver yet, as it expects
	 * the GSI FW to be up and running before the registration.
@@ -5280,7 +5287,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
			if (result) {
				IPAERR("gsi pre FW loading config failed\n");
				result = -ENODEV;
				goto fail_ipa_init_interrupts;
				goto fail_gsi_pre_fw_load_init;
			}
		}
	} else {
@@ -5291,7 +5298,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
		result = ipa3_post_init(resource_p, ipa_dev);
		if (result) {
			IPAERR("ipa3_post_init failed\n");
			goto fail_ipa_post_init;
			goto fail_gsi_pre_fw_load_init;
		}
	}

@@ -5311,13 +5318,13 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,

	return 0;



fail_cdev_add:
fail_ipa_post_init:
	if (ipa3_bus_scale_table) {
		msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
		ipa3_bus_scale_table = NULL;
	}
fail_ipa_init_interrupts:
fail_gsi_pre_fw_load_init:
	ipa3_dma_shutdown();
fail_ipa_dma_setup:
fail_allok_pkt_init:
	ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
fail_create_apps_resource:
	ipa_rm_exit();
@@ -5362,18 +5369,21 @@ fail_flt_rule_cache:
fail_create_transport_wq:
	destroy_workqueue(ipa3_ctx->power_mgmt_wq);
fail_init_hw:
	ipahal_destroy();
fail_ipahal:
	iounmap(ipa3_ctx->mmio);
fail_remap:
	ipa3_disable_clks();
fail_init_active_client:
	ipa3_active_clients_log_destroy();
fail_init_active_client:
fail_clk:
	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
		msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
fail_ipahal:
	ipa3_bus_scale_table = NULL;
fail_bus_reg:
	ipahal_destroy();
	if (ipa3_bus_scale_table) {
		msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
		ipa3_bus_scale_table = NULL;
	}
fail_bind:
	kfree(ipa3_ctx->ctrl);
fail_mem_ctrl:
+193 −40
Original line number Diff line number Diff line
@@ -81,7 +81,7 @@ static void ipa3_dma_debugfs_destroy(void) {}

/**
 * struct ipa3_dma_ctx -IPADMA driver context information
 * @is_enabled:is ipa_dma enabled?
 * @enable_ref_cnt: ipa dma enable reference count
 * @destroy_pending: destroy ipa_dma after handling all pending memcpy
 * @ipa_dma_xfer_wrapper_cache: cache of ipa3_dma_xfer_wrapper structs
 * @sync_lock: lock for synchronisation in sync_memcpy
@@ -100,7 +100,7 @@ static void ipa3_dma_debugfs_destroy(void) {}
 * @total_uc_memcpy: total number of uc memcpy (statistics)
 */
struct ipa3_dma_ctx {
	bool is_enabled;
	unsigned enable_ref_cnt;
	bool destroy_pending;
	struct kmem_cache *ipa_dma_xfer_wrapper_cache;
	struct mutex sync_lock;
@@ -125,6 +125,70 @@ struct ipa3_dma_ctx {
};
static struct ipa3_dma_ctx *ipa3_dma_ctx;

/**
 * struct ipa3_dma_init_refcnt_ctrl -IPADMA driver init control information
 * @ref_cnt: reference count for initialization operations
 * @lock: lock for the reference count
 */
struct ipa3_dma_init_refcnt_ctrl {
	unsigned ref_cnt;
	struct mutex lock;
};
static struct ipa3_dma_init_refcnt_ctrl *ipa3_dma_init_refcnt_ctrl;

/**
 * ipa3_dma_setup() - One time setup for IPA DMA
 *
 * This function should be called once to setup ipa dma
 *  by creating the init reference count controller
 *
 * Return codes: 0: success
 *		 Negative value: failure
 */
int ipa3_dma_setup(void)
{
	IPADMA_FUNC_ENTRY();

	if (ipa3_dma_init_refcnt_ctrl) {
		IPADMA_ERR("Setup already done\n");
		return -EFAULT;
	}

	ipa3_dma_init_refcnt_ctrl =
		kzalloc(sizeof(*(ipa3_dma_init_refcnt_ctrl)), GFP_KERNEL);

	if (!ipa3_dma_init_refcnt_ctrl) {
		IPADMA_ERR("kzalloc error.\n");
		return -ENOMEM;
	}

	mutex_init(&ipa3_dma_init_refcnt_ctrl->lock);

	IPADMA_FUNC_EXIT();
	return 0;
}

/**
 * ipa3_dma_shutdown() - Clear setup operations.
 *
 * Cleanup for the setup function.
 * Should be called during IPA driver unloading.
 * It assumes all ipa_dma operations are done and ipa_dma is destroyed.
 *
 * Return codes: None.
 */
void ipa3_dma_shutdown(void)
{
	IPADMA_FUNC_ENTRY();

	if (!ipa3_dma_init_refcnt_ctrl)
		return;

	kfree(ipa3_dma_init_refcnt_ctrl);
	ipa3_dma_init_refcnt_ctrl = NULL;

	IPADMA_FUNC_EXIT();
}

/**
 * ipa3_dma_init() -Initialize IPADMA.
@@ -133,8 +197,10 @@ static struct ipa3_dma_ctx *ipa3_dma_ctx;
 *	MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS
 *	MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS
 *
 * Can be executed several times (re-entrant)
 *
 * Return codes: 0: success
 *		-EFAULT: IPADMA is already initialized
 *		-EFAULT: Mismatch between context existence and init ref_cnt
 *		-EINVAL: IPA driver is not initialized
 *		-ENOMEM: allocating memory error
 *		-EPERM: pipe connection failed
@@ -149,21 +215,43 @@ int ipa3_dma_init(void)

	IPADMA_FUNC_ENTRY();

	if (!ipa3_dma_init_refcnt_ctrl) {
		IPADMA_ERR("Setup isn't done yet!\n");
		return -EINVAL;
	}

	mutex_lock(&ipa3_dma_init_refcnt_ctrl->lock);
	if (ipa3_dma_init_refcnt_ctrl->ref_cnt > 0) {
		IPADMA_DBG("Already initialized refcnt=%d\n",
			ipa3_dma_init_refcnt_ctrl->ref_cnt);
		if (!ipa3_dma_ctx) {
			IPADMA_ERR("Context missing. refcnt=%d\n",
				ipa3_dma_init_refcnt_ctrl->ref_cnt);
			res = -EFAULT;
		} else {
			ipa3_dma_init_refcnt_ctrl->ref_cnt++;
		}
		goto init_unlock;
	}

	if (ipa3_dma_ctx) {
		IPADMA_ERR("Already initialized.\n");
		return -EFAULT;
		IPADMA_ERR("Context already exist\n");
		res = -EFAULT;
		goto init_unlock;
	}

	if (!ipa3_is_ready()) {
		IPADMA_ERR("IPA is not ready yet\n");
		return -EINVAL;
		res = -EINVAL;
		goto init_unlock;
	}

	ipa_dma_ctx_t = kzalloc(sizeof(*(ipa3_dma_ctx)), GFP_KERNEL);

	if (!ipa_dma_ctx_t) {
		IPADMA_ERR("kzalloc error.\n");
		return -ENOMEM;
		res = -ENOMEM;
		goto init_unlock;
	}

	ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache =
@@ -180,7 +268,7 @@ int ipa3_dma_init(void)
	mutex_init(&ipa_dma_ctx_t->sync_lock);
	spin_lock_init(&ipa_dma_ctx_t->pending_lock);
	init_completion(&ipa_dma_ctx_t->done);
	ipa_dma_ctx_t->is_enabled = false;
	ipa_dma_ctx_t->enable_ref_cnt = 0;
	ipa_dma_ctx_t->destroy_pending = false;
	atomic_set(&ipa_dma_ctx_t->async_memcpy_pending_cnt, 0);
	atomic_set(&ipa_dma_ctx_t->sync_memcpy_pending_cnt, 0);
@@ -294,10 +382,12 @@ int ipa3_dma_init(void)
	}
	ipa3_dma_debugfs_init();
	ipa3_dma_ctx = ipa_dma_ctx_t;
	ipa3_dma_init_refcnt_ctrl->ref_cnt = 1;
	IPADMA_DBG("ASYNC MEMCPY pipes are connected\n");

	IPADMA_FUNC_EXIT();
	return res;
	goto init_unlock;

fail_async_cons:
	ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl);
fail_async_prod:
@@ -313,6 +403,8 @@ fail_alloc_dummy:
fail_mem_ctrl:
	kfree(ipa_dma_ctx_t);
	ipa3_dma_ctx = NULL;
init_unlock:
	mutex_unlock(&ipa3_dma_init_refcnt_ctrl->lock);
	return res;

}
@@ -320,26 +412,29 @@ fail_mem_ctrl:
/**
 * ipa3_dma_enable() -Vote for IPA clocks.
 *
 * Can be executed several times (re-entrant)
 *
 *Return codes: 0: success
 *		-EINVAL: IPADMA is not initialized
 *		-EPERM: Operation not permitted as ipa_dma is already
 *		 enabled
 */
int ipa3_dma_enable(void)
{
	IPADMA_FUNC_ENTRY();
	if (ipa3_dma_ctx == NULL) {
	if ((ipa3_dma_ctx == NULL) ||
		(ipa3_dma_init_refcnt_ctrl->ref_cnt < 1)) {
		IPADMA_ERR("IPADMA isn't initialized, can't enable\n");
		return -EPERM;
		return -EINVAL;
	}
	mutex_lock(&ipa3_dma_ctx->enable_lock);
	if (ipa3_dma_ctx->is_enabled) {
		IPADMA_ERR("Already enabled.\n");
	if (ipa3_dma_ctx->enable_ref_cnt > 0) {
		IPADMA_ERR("Already enabled refcnt=%d\n",
			ipa3_dma_ctx->enable_ref_cnt);
		ipa3_dma_ctx->enable_ref_cnt++;
		mutex_unlock(&ipa3_dma_ctx->enable_lock);
		return -EPERM;
		return 0;
	}
	IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA");
	ipa3_dma_ctx->is_enabled = true;
	ipa3_dma_ctx->enable_ref_cnt = 1;
	mutex_unlock(&ipa3_dma_ctx->enable_lock);

	IPADMA_FUNC_EXIT();
@@ -379,32 +474,45 @@ static bool ipa3_dma_work_pending(void)
int ipa3_dma_disable(void)
{
	unsigned long flags;
	int res = 0;
	bool dec_clks = false;

	IPADMA_FUNC_ENTRY();
	if (ipa3_dma_ctx == NULL) {
	if ((ipa3_dma_ctx == NULL) ||
		(ipa3_dma_init_refcnt_ctrl->ref_cnt < 1)) {
		IPADMA_ERR("IPADMA isn't initialized, can't disable\n");
		return -EPERM;
		return -EINVAL;
	}
	mutex_lock(&ipa3_dma_ctx->enable_lock);
	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
	if (!ipa3_dma_ctx->is_enabled) {
		IPADMA_ERR("Already disabled.\n");
		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
		mutex_unlock(&ipa3_dma_ctx->enable_lock);
		return -EPERM;
	if (ipa3_dma_ctx->enable_ref_cnt > 1) {
		IPADMA_DBG("Multiple enablement done. refcnt=%d\n",
			ipa3_dma_ctx->enable_ref_cnt);
		ipa3_dma_ctx->enable_ref_cnt--;
		goto completed;
	}

	if (ipa3_dma_ctx->enable_ref_cnt == 0) {
		IPADMA_ERR("Already disabled\n");
		res = -EPERM;
		goto completed;
	}

	if (ipa3_dma_work_pending()) {
		IPADMA_ERR("There is pending work, can't disable.\n");
		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
		mutex_unlock(&ipa3_dma_ctx->enable_lock);
		return -EFAULT;
		res = -EFAULT;
		goto completed;
	}
	ipa3_dma_ctx->is_enabled = false;
	ipa3_dma_ctx->enable_ref_cnt = 0;
	dec_clks = true;
	IPADMA_FUNC_EXIT();

completed:
	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
	if (dec_clks)
		IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA");
	mutex_unlock(&ipa3_dma_ctx->enable_lock);
	IPADMA_FUNC_EXIT();
	return 0;
	return res;
}

/**
@@ -460,7 +568,7 @@ int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len)
		}
	}
	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
	if (!ipa3_dma_ctx->is_enabled) {
	if (!ipa3_dma_ctx->enable_ref_cnt) {
		IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
		return -EPERM;
@@ -751,7 +859,7 @@ int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
		return -EINVAL;
	}
	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
	if (!ipa3_dma_ctx->is_enabled) {
	if (!ipa3_dma_ctx->enable_ref_cnt) {
		IPADMA_ERR("can't memcpy, IPA_DMA isn't enabled\n");
		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
		return -EPERM;
@@ -956,7 +1064,7 @@ int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
	}

	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
	if (!ipa3_dma_ctx->is_enabled) {
	if (!ipa3_dma_ctx->enable_ref_cnt) {
		IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
		return -EPERM;
@@ -990,17 +1098,36 @@ void ipa3_dma_destroy(void)
	int res = 0;

	IPADMA_FUNC_ENTRY();
	if (!ipa3_dma_ctx) {
		IPADMA_ERR("IPADMA isn't initialized\n");

	if (!ipa3_dma_init_refcnt_ctrl) {
		IPADMA_ERR("Setup isn't done\n");
		return;
	}

	mutex_lock(&ipa3_dma_init_refcnt_ctrl->lock);
	if (ipa3_dma_init_refcnt_ctrl->ref_cnt > 1) {
		IPADMA_DBG("Multiple initialization done. refcnt=%d\n",
			ipa3_dma_init_refcnt_ctrl->ref_cnt);
		ipa3_dma_init_refcnt_ctrl->ref_cnt--;
		goto completed;
	}

	if ((!ipa3_dma_ctx) || (ipa3_dma_init_refcnt_ctrl->ref_cnt == 0)) {
		IPADMA_ERR("IPADMA isn't initialized ctx=%pK\n", ipa3_dma_ctx);
		goto completed;
	}

	if (ipa3_dma_work_pending()) {
		ipa3_dma_ctx->destroy_pending = true;
		IPADMA_DBG("There are pending memcpy, wait for completion\n");
		wait_for_completion(&ipa3_dma_ctx->done);
	}

	if (ipa3_dma_ctx->enable_ref_cnt > 0) {
		IPADMA_ERR("IPADMA still enabled\n");
		goto completed;
	}

	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_cons_hdl);
	if (res)
		IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n");
@@ -1026,7 +1153,11 @@ void ipa3_dma_destroy(void)
	kfree(ipa3_dma_ctx);
	ipa3_dma_ctx = NULL;

	ipa3_dma_init_refcnt_ctrl->ref_cnt = 0;
	IPADMA_FUNC_EXIT();

completed:
	mutex_unlock(&ipa3_dma_init_refcnt_ctrl->lock);
}

/**
@@ -1089,15 +1220,31 @@ static ssize_t ipa3_dma_debugfs_read(struct file *file, char __user *ubuf,
				 size_t count, loff_t *ppos)
{
	int nbytes = 0;

	if (!ipa3_dma_init_refcnt_ctrl) {
		nbytes += scnprintf(&dbg_buff[nbytes],
			IPADMA_MAX_MSG_LEN - nbytes,
			"Setup was not done\n");
		goto completed;

	}

	if (!ipa3_dma_ctx) {
		nbytes += scnprintf(&dbg_buff[nbytes],
			IPADMA_MAX_MSG_LEN - nbytes,
			"Not initialized\n");
			"Status:\n	Not initialized (ref_cnt=%d)\n",
			ipa3_dma_init_refcnt_ctrl->ref_cnt);
	} else {
		nbytes += scnprintf(&dbg_buff[nbytes],
			IPADMA_MAX_MSG_LEN - nbytes,
			"Status:\n	IPADMA is %s\n",
			(ipa3_dma_ctx->is_enabled) ? "Enabled" : "Disabled");
			"Status:\n	Initialized (ref_cnt=%d)\n",
			ipa3_dma_init_refcnt_ctrl->ref_cnt);
		nbytes += scnprintf(&dbg_buff[nbytes],
			IPADMA_MAX_MSG_LEN - nbytes,
			"	%s (ref_cnt=%d)\n",
			(ipa3_dma_ctx->enable_ref_cnt > 0) ?
			"Enabled" : "Disabled",
			ipa3_dma_ctx->enable_ref_cnt);
		nbytes += scnprintf(&dbg_buff[nbytes],
			IPADMA_MAX_MSG_LEN - nbytes,
			"Statistics:\n	total sync memcpy: %d\n	",
@@ -1106,6 +1253,10 @@ static ssize_t ipa3_dma_debugfs_read(struct file *file, char __user *ubuf,
			IPADMA_MAX_MSG_LEN - nbytes,
			"total async memcpy: %d\n	",
			atomic_read(&ipa3_dma_ctx->total_async_memcpy));
		nbytes += scnprintf(&dbg_buff[nbytes],
			IPADMA_MAX_MSG_LEN - nbytes,
			"total uc memcpy: %d\n	",
			atomic_read(&ipa3_dma_ctx->total_uc_memcpy));
		nbytes += scnprintf(&dbg_buff[nbytes],
			IPADMA_MAX_MSG_LEN - nbytes,
			"pending sync memcpy jobs: %d\n	",
@@ -1119,6 +1270,8 @@ static ssize_t ipa3_dma_debugfs_read(struct file *file, char __user *ubuf,
			"pending uc memcpy jobs: %d\n",
			atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt));
	}

completed:
	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}

+2 −0
Original line number Diff line number Diff line
@@ -2016,6 +2016,8 @@ void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
			      struct ipa3_uc_hdlrs *hdlrs);
int ipa3_create_nat_device(void);
int ipa3_uc_notify_clk_state(bool enabled);
int ipa3_dma_setup(void);
void ipa3_dma_shutdown(void);
void ipa3_dma_async_memcpy_notify_cb(void *priv,
		enum ipa_dp_evt_type evt, unsigned long data);

+99 −5
Original line number Diff line number Diff line
@@ -364,11 +364,11 @@ static int ipa_test_dma_sync_async_memcpy(int size)
}

/**
 * TEST: test control API - enable/disable dma
 * TEST: test enable/disable dma
 *	1. enable dma
 *	2. disable dma
 */
static int ipa_test_dma_control_api(void *priv)
static int ipa_test_dma_enable_disable(void *priv)
{
	int rc;

@@ -391,6 +391,92 @@ static int ipa_test_dma_control_api(void *priv)
	return 0;
}

/**
 * TEST: test init/enable/disable/destroy dma
 *	1. init dma
 *	2. enable dma
 *	3. disable dma
 *	4. destroy dma
 */
static int ipa_test_dma_init_enbl_disable_destroy(void *priv)
{
	int rc;

	IPA_UT_LOG("Test Start\n");

	rc = ipa_dma_init();
	if (rc) {
		IPA_UT_LOG("DMA Init failed rc=%d\n", rc);
		IPA_UT_TEST_FAIL_REPORT("fail init dma");
		return rc;
	}

	rc = ipa_dma_enable();
	if (rc) {
		ipa_dma_destroy();
		IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
		IPA_UT_TEST_FAIL_REPORT("fail enable dma");
		return rc;
	}

	rc = ipa_dma_disable();
	if (rc) {
		IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
		IPA_UT_TEST_FAIL_REPORT("fail disable dma");
		return rc;
	}

	ipa_dma_destroy();

	return 0;
}

/**
 * TEST: test enablex2/disablex2 dma
 *	1. enable dma
 *	2. enable dma
 *	3. disable dma
 *	4. disable dma
 */
static int ipa_test_dma_enblx2_disablex2(void *priv)
{
	int rc;

	IPA_UT_LOG("Test Start\n");

	rc = ipa_dma_enable();
	if (rc) {
		ipa_dma_destroy();
		IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
		IPA_UT_TEST_FAIL_REPORT("fail enable dma");
		return rc;
	}

	rc = ipa_dma_enable();
	if (rc) {
		ipa_dma_destroy();
		IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
		IPA_UT_TEST_FAIL_REPORT("fail enable dma");
		return rc;
	}

	rc = ipa_dma_disable();
	if (rc) {
		IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
		IPA_UT_TEST_FAIL_REPORT("fail disable dma");
		return rc;
	}

	rc = ipa_dma_disable();
	if (rc) {
		IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
		IPA_UT_TEST_FAIL_REPORT("fail disable dma");
		return rc;
	}

	return 0;
}

/**
 * TEST: memcpy before dma enable
 *
@@ -999,9 +1085,17 @@ static int ipa_test_dma_sync_memcpy_max_pkt_size(void *priv)
IPA_UT_DEFINE_SUITE_START(dma, "DMA for GSI",
	ipa_test_dma_setup, ipa_test_dma_teardown)
{
	IPA_UT_ADD_TEST(control_api,
		"Control API",
		ipa_test_dma_control_api,
	IPA_UT_ADD_TEST(init_enable_disable_destroy,
		"Init->Enable->Disable->Destroy",
		ipa_test_dma_enable_disable,
		true, IPA_HW_v3_0, IPA_HW_MAX),
	IPA_UT_ADD_TEST(initx2_enable_disable_destroyx2,
		"Initx2->Enable->Disable->Destroyx2",
		ipa_test_dma_init_enbl_disable_destroy,
		true, IPA_HW_v3_0, IPA_HW_MAX),
	IPA_UT_ADD_TEST(init_enablex2_disablex2_destroy,
		"Init->Enablex2->Disablex2->Destroy",
		ipa_test_dma_enblx2_disablex2,
		true, IPA_HW_v3_0, IPA_HW_MAX),
	IPA_UT_ADD_TEST(memcpy_before_enable,
		"Call memcpy before dma enable and expect it to fail",
Loading