Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0bc55ada authored by Gauri Joshi's avatar Gauri Joshi
Browse files

msm: mhi_dev: Fix memory leak



mhi_dev_cache_host_cfg will be called during cold boot and also
when processing mhi device reset command from host. Do not re-allocate
the command, event and channel context cache memory if they have been
already allocated.

Change-Id: I55826e05b3d83c9fcf3d4b23e26b91253130671e
Signed-off-by: default avatarSiva Kumar Akkireddi <sivaa@codeaurora.org>
Signed-off-by: default avatarGauri Joshi <gaurjosh@codeaurora.org>
parent 2b66ad4b
Loading
Loading
Loading
Loading
+57 −23
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.*/
/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.*/

/*
 * MSM MHI device core driver.
@@ -2408,32 +2408,52 @@ static int mhi_dev_cache_host_cfg(struct mhi_dev *mhi)
					mhi->cfg.event_rings;
	mhi->ch_ctx_shadow.size = sizeof(struct mhi_dev_ch_ctx) *
					mhi->cfg.channels;

	/*
	 * This func mhi_dev_cache_host_cfg will be called when
	 * processing mhi device reset as well, do not allocate
	 * the command, event and channel context caches if they
	 * were already allocated during device boot, to avoid
	 * memory leak.
	 */
	if (!mhi->cmd_ctx_cache) {
		mhi->cmd_ctx_cache = dma_alloc_coherent(&pdev->dev,
			sizeof(struct mhi_dev_cmd_ctx),
			&mhi->cmd_ctx_cache_dma_handle,
			GFP_KERNEL);
		if (!mhi->cmd_ctx_cache) {
			pr_err("no memory while allocating cmd ctx\n");
		return -ENOMEM;
			rc = -ENOMEM;
			goto exit;
		}
	}

	if (!mhi->ev_ctx_cache) {
		mhi->ev_ctx_cache = dma_alloc_coherent(&pdev->dev,
			sizeof(struct mhi_dev_ev_ctx) *
			mhi->cfg.event_rings,
			&mhi->ev_ctx_cache_dma_handle,
			GFP_KERNEL);
	if (!mhi->ev_ctx_cache)
		return -ENOMEM;
		if (!mhi->ev_ctx_cache) {
			rc = -ENOMEM;
			goto exit;
		}
	}
	memset(mhi->ev_ctx_cache, 0, sizeof(struct mhi_dev_ev_ctx) *
						mhi->cfg.event_rings);

	if (!mhi->ch_ctx_cache) {
		mhi->ch_ctx_cache = dma_alloc_coherent(&pdev->dev,
			sizeof(struct mhi_dev_ch_ctx) *
			mhi->cfg.channels,
			&mhi->ch_ctx_cache_dma_handle,
			GFP_KERNEL);
	if (!mhi->ch_ctx_cache)
		return -ENOMEM;

		if (!mhi->ch_ctx_cache) {
			rc = -ENOMEM;
			goto exit;
		}
	}
	memset(mhi->ch_ctx_cache, 0, sizeof(struct mhi_dev_ch_ctx) *
						mhi->cfg.channels);
	if (MHI_USE_DMA(mhi)) {
		data_transfer.phy_addr = mhi->cmd_ctx_cache_dma_handle;
		data_transfer.host_pa = mhi->cmd_ctx_shadow.host_pa;
@@ -2466,6 +2486,20 @@ static int mhi_dev_cache_host_cfg(struct mhi_dev *mhi)

	return mhi_ring_start(&mhi->ring[0],
			(union mhi_dev_ring_ctx *)mhi->cmd_ctx_cache, mhi);

exit:
	if (mhi->cmd_ctx_cache)
		dma_free_coherent(&pdev->dev,
			sizeof(struct mhi_dev_cmd_ctx),
			mhi->cmd_ctx_cache,
			mhi->cmd_ctx_cache_dma_handle);
	if (mhi->ev_ctx_cache)
		dma_free_coherent(&pdev->dev,
			sizeof(struct mhi_dev_ev_ctx) *
			mhi->cfg.event_rings,
			mhi->ev_ctx_cache,
			mhi->ev_ctx_cache_dma_handle);
	return rc;
}

void mhi_dev_pm_relax(void)