Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d3c7391d authored by zhuo's avatar zhuo
Browse files

msm: camera: memmgr: Add refcount to track umd in use buffers



Currently krefcount is using by umd and kmd. Due to sometimes
there is issue in umd, such as release twice. That maybe causes
buffer release before kmd access the buffer. This commit add
a new refcount to track umd in use buffers and use current krefcount
to track kmd in use buffers. For the same buffer use in kmd and umd
only when all refcount become zero, the buffer start to release.

CRs-Fixed: 3692103
Change-Id: I5a58d9bab4c82bdb192d6a6a3d2b3d254dc04c9e
Signed-off-by: default avatarzhuo <quic_zhuo@quicinc.com>
parent 78d5c31a
Loading
Loading
Loading
Loading
+99 −18
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
 */

#include <linux/module.h>
@@ -188,6 +188,7 @@ static int32_t cam_mem_get_slot(void)
		set_bit(idx, tbl.bitmap);
		tbl.bufq[idx].active = true;
		mutex_init(&tbl.bufq[idx].q_lock);
		mutex_init(&tbl.bufq[idx].ref_lock);
		mutex_unlock(&tbl.m_lock);
		return idx;
	}
@@ -202,7 +203,12 @@ static void cam_mem_put_slot(int32_t idx)
	mutex_lock(&tbl.bufq[idx].q_lock);
	tbl.bufq[idx].active = false;
	mutex_unlock(&tbl.bufq[idx].q_lock);
	mutex_lock(&tbl.bufq[idx].ref_lock);
	memset(&tbl.bufq[idx].krefcount, 0, sizeof(struct kref));
	memset(&tbl.bufq[idx].urefcount, 0, sizeof(struct kref));
	mutex_unlock(&tbl.bufq[idx].ref_lock);
	mutex_destroy(&tbl.bufq[idx].q_lock);
	mutex_destroy(&tbl.bufq[idx].ref_lock);
	clear_bit(idx, tbl.bitmap);
	mutex_unlock(&tbl.m_lock);
}
@@ -295,16 +301,18 @@ int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
		return -EINVAL;
	}

	if (tbl.bufq[idx].kmdvaddr &&
		kref_get_unless_zero(&tbl.bufq[idx].krefcount)) {
	mutex_lock(&tbl.bufq[idx].ref_lock);
	if (tbl.bufq[idx].kmdvaddr && kref_get_unless_zero(&tbl.bufq[idx].krefcount)) {
		*vaddr_ptr = tbl.bufq[idx].kmdvaddr;
		*len = tbl.bufq[idx].len;
	} else {
		mutex_unlock(&tbl.bufq[idx].ref_lock);
		CAM_ERR(CAM_MEM,
			"No KMD access request, vaddr= %p, idx= %d, handle= %d",
			tbl.bufq[idx].kmdvaddr, idx, buf_handle);
		return -EINVAL;
	}
	mutex_unlock(&tbl.bufq[idx].ref_lock);

	return 0;
}
@@ -721,7 +729,12 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
	memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
		sizeof(int32_t) * cmd->num_hdl);
	tbl.bufq[idx].is_imported = false;

	if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS)
		kref_init(&tbl.bufq[idx].krefcount);

	kref_init(&tbl.bufq[idx].urefcount);

	tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_USER;
	mutex_unlock(&tbl.bufq[idx].q_lock);

@@ -828,7 +841,9 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
	memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
		sizeof(int32_t) * cmd->num_hdl);
	tbl.bufq[idx].is_imported = true;
	if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS)
		kref_init(&tbl.bufq[idx].krefcount);
	kref_init(&tbl.bufq[idx].urefcount);
	tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_USER;
	mutex_unlock(&tbl.bufq[idx].q_lock);

@@ -957,7 +972,12 @@ static int cam_mem_mgr_cleanup_table(void)
		tbl.bufq[i].dma_buf = NULL;
		tbl.bufq[i].active = false;
		mutex_unlock(&tbl.bufq[i].q_lock);
		mutex_lock(&tbl.bufq[i].ref_lock);
		memset(&tbl.bufq[i].krefcount, 0, sizeof(struct kref));
		memset(&tbl.bufq[i].urefcount, 0, sizeof(struct kref));
		mutex_unlock(&tbl.bufq[i].ref_lock);
		mutex_destroy(&tbl.bufq[i].q_lock);
		mutex_destroy(&tbl.bufq[i].ref_lock);
	}

	bitmap_zero(tbl.bitmap, tbl.bits);
@@ -980,16 +1000,17 @@ void cam_mem_mgr_deinit(void)
	mutex_destroy(&tbl.m_lock);
}

static void cam_mem_util_unmap(struct kref *kref)
static void cam_mem_util_unmap_dummy(struct kref *kref)
{
	CAM_DBG(CAM_MEM, "Cam mem util unmap dummy");
}

static void cam_mem_util_unmap(int32_t idx)
{
	int rc = 0;
	int32_t idx;
	enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
	enum cam_smmu_mapping_client client;
	struct cam_mem_buf_queue *bufq =
		container_of(kref, typeof(*bufq), krefcount);

	idx = CAM_MEM_MGR_GET_HDL_IDX(bufq->buf_handle);
	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
		CAM_ERR(CAM_MEM, "Incorrect index");
		return;
@@ -1060,6 +1081,8 @@ static void cam_mem_util_unmap(struct kref *kref)
	tbl.bufq[idx].len = 0;
	tbl.bufq[idx].num_hdl = 0;
	tbl.bufq[idx].active = false;
	memset(&tbl.bufq[idx].krefcount, 0, sizeof(struct kref));
	memset(&tbl.bufq[idx].urefcount, 0, sizeof(struct kref));
	mutex_unlock(&tbl.bufq[idx].q_lock);
	mutex_destroy(&tbl.bufq[idx].q_lock);
	clear_bit(idx, tbl.bitmap);
@@ -1067,10 +1090,28 @@ static void cam_mem_util_unmap(struct kref *kref)

}

static void cam_mem_util_unmap_wrapper(struct kref *kref)
{
	int32_t idx;
	struct cam_mem_buf_queue *bufq = container_of(kref, typeof(*bufq), krefcount);

	idx = CAM_MEM_MGR_GET_HDL_IDX(bufq->buf_handle);
	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
		CAM_ERR(CAM_MEM, "idx: %d not valid", idx);
		return;
	}

	cam_mem_util_unmap(idx);

	mutex_destroy(&tbl.bufq[idx].ref_lock);
}

void cam_mem_put_cpu_buf(int32_t buf_handle)
{
	int rc = 0;
	int idx;
	uint32_t krefcount = 0, urefcount = 0;
	bool unmap = false;

	if (!buf_handle) {
		CAM_ERR(CAM_MEM, "Invalid buf_handle");
@@ -1096,10 +1137,28 @@ void cam_mem_put_cpu_buf(int32_t buf_handle)
		return;
	}

	if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
	mutex_lock(&tbl.bufq[idx].ref_lock);
	kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_dummy);

	krefcount = kref_read(&tbl.bufq[idx].krefcount);
	urefcount = kref_read(&tbl.bufq[idx].urefcount);

	if ((krefcount == 1) && (urefcount == 0))
		unmap = true;

	if (unmap) {
		cam_mem_util_unmap(idx);
		CAM_DBG(CAM_MEM,
			"Called unmap from here, buf_handle: %u, idx: %d",
			buf_handle, idx);
			"Called unmap from here, buf_handle: %u, idx: %d", buf_handle, idx);
	} else if (krefcount == 0) {
		CAM_ERR(CAM_MEM,
			"Unbalanced release Called buf_handle: %u, idx: %d",
			tbl.bufq[idx].buf_handle, idx);
	}
	mutex_unlock(&tbl.bufq[idx].ref_lock);

	if (unmap)
		mutex_destroy(&tbl.bufq[idx].ref_lock);

}
EXPORT_SYMBOL(cam_mem_put_cpu_buf);
@@ -1109,6 +1168,8 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
{
	int idx;
	int rc = 0;
	uint32_t krefcount = 0, urefcount = 0;
	bool unmap = false;

	if (!atomic_read(&cam_mem_mgr_state)) {
		CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
@@ -1141,10 +1202,30 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)

	CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx);

	if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
	mutex_lock(&tbl.bufq[idx].ref_lock);
	kref_put(&tbl.bufq[idx].urefcount, cam_mem_util_unmap_dummy);

	urefcount = kref_read(&tbl.bufq[idx].urefcount);

	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
		krefcount = kref_read(&tbl.bufq[idx].krefcount);
		if ((krefcount == 1) && (urefcount == 0))
			unmap = true;
	} else {
		if (urefcount == 0)
			unmap = true;
	}

	if (unmap) {
		cam_mem_util_unmap(idx);
		CAM_DBG(CAM_MEM,
			"Called unmap from here, buf_handle: %u, idx: %d",
			cmd->buf_handle, idx);
			"Called unmap from here, buf_handle: %u, idx: %d", cmd->buf_handle, idx);
	}

	mutex_unlock(&tbl.bufq[idx].ref_lock);

	if (unmap)
		mutex_destroy(&tbl.bufq[idx].ref_lock);

	return rc;
}
@@ -1326,7 +1407,7 @@ int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
	}

	CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
	if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
	if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_wrapper))
		CAM_DBG(CAM_MEM,
			"Called unmap from here, buf_handle: %u, idx: %d",
			tbl.bufq[idx].buf_handle, idx);
@@ -1506,7 +1587,7 @@ int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
	}

	CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
	if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
	if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_wrapper))
		CAM_DBG(CAM_MEM,
			"Called unmap from here, buf_handle: %u, idx: %d",
			inp->mem_handle, idx);
+7 −2
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
 * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
 * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
 */

#ifndef _CAM_MEM_MGR_H_
@@ -44,8 +44,11 @@ enum cam_smmu_mapping_client {
 * @is_imported:    Flag indicating if buffer is imported from an FD in user
 *                  space
 * @krefcount:      Reference counter to track whether the buffer is
 *                  mapped and in use
 *                  mapped and in use by kmd
 * @smmu_mapping_client: Client buffer (User or kernel)
 * @urefcount:      Reference counter to track whether the buffer is
 *                  mapped and in use by umd
 * @ref_lock:       Mutex lock for refcount
 */
struct cam_mem_buf_queue {
	struct dma_buf *dma_buf;
@@ -63,6 +66,8 @@ struct cam_mem_buf_queue {
	bool is_imported;
	struct kref krefcount;
	enum cam_smmu_mapping_client smmu_mapping_client;
	struct kref urefcount;
	struct mutex ref_lock;
};

/**