Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cf1dd71a authored by Linux Build Service Account's avatar Linux Build Service Account
Browse files

Merge 6b4232c9 on remote branch

Change-Id: I1e900deb68c5ecb92ff64c006afa6e6df261c86f
parents fd36e1bb 6b4232c9
Loading
Loading
Loading
Loading
+99 −18
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
 */

#include <linux/module.h>
@@ -188,6 +188,7 @@ static int32_t cam_mem_get_slot(void)
		set_bit(idx, tbl.bitmap);
		tbl.bufq[idx].active = true;
		mutex_init(&tbl.bufq[idx].q_lock);
		mutex_init(&tbl.bufq[idx].ref_lock);
		mutex_unlock(&tbl.m_lock);
		return idx;
	}
@@ -202,7 +203,12 @@ static void cam_mem_put_slot(int32_t idx)
	mutex_lock(&tbl.bufq[idx].q_lock);
	tbl.bufq[idx].active = false;
	mutex_unlock(&tbl.bufq[idx].q_lock);
	mutex_lock(&tbl.bufq[idx].ref_lock);
	memset(&tbl.bufq[idx].krefcount, 0, sizeof(struct kref));
	memset(&tbl.bufq[idx].urefcount, 0, sizeof(struct kref));
	mutex_unlock(&tbl.bufq[idx].ref_lock);
	mutex_destroy(&tbl.bufq[idx].q_lock);
	mutex_destroy(&tbl.bufq[idx].ref_lock);
	clear_bit(idx, tbl.bitmap);
	mutex_unlock(&tbl.m_lock);
}
@@ -295,16 +301,18 @@ int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
		return -EINVAL;
	}

	if (tbl.bufq[idx].kmdvaddr &&
		kref_get_unless_zero(&tbl.bufq[idx].krefcount)) {
	mutex_lock(&tbl.bufq[idx].ref_lock);
	if (tbl.bufq[idx].kmdvaddr && kref_get_unless_zero(&tbl.bufq[idx].krefcount)) {
		*vaddr_ptr = tbl.bufq[idx].kmdvaddr;
		*len = tbl.bufq[idx].len;
	} else {
		mutex_unlock(&tbl.bufq[idx].ref_lock);
		CAM_ERR(CAM_MEM,
			"No KMD access request, vaddr= %p, idx= %d, handle= %d",
			tbl.bufq[idx].kmdvaddr, idx, buf_handle);
		return -EINVAL;
	}
	mutex_unlock(&tbl.bufq[idx].ref_lock);

	return 0;
}
@@ -721,7 +729,12 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
	memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
		sizeof(int32_t) * cmd->num_hdl);
	tbl.bufq[idx].is_imported = false;

	if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS)
		kref_init(&tbl.bufq[idx].krefcount);

	kref_init(&tbl.bufq[idx].urefcount);

	tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_USER;
	mutex_unlock(&tbl.bufq[idx].q_lock);

@@ -828,7 +841,9 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
	memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
		sizeof(int32_t) * cmd->num_hdl);
	tbl.bufq[idx].is_imported = true;
	if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS)
		kref_init(&tbl.bufq[idx].krefcount);
	kref_init(&tbl.bufq[idx].urefcount);
	tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_USER;
	mutex_unlock(&tbl.bufq[idx].q_lock);

@@ -957,7 +972,12 @@ static int cam_mem_mgr_cleanup_table(void)
		tbl.bufq[i].dma_buf = NULL;
		tbl.bufq[i].active = false;
		mutex_unlock(&tbl.bufq[i].q_lock);
		mutex_lock(&tbl.bufq[i].ref_lock);
		memset(&tbl.bufq[i].krefcount, 0, sizeof(struct kref));
		memset(&tbl.bufq[i].urefcount, 0, sizeof(struct kref));
		mutex_unlock(&tbl.bufq[i].ref_lock);
		mutex_destroy(&tbl.bufq[i].q_lock);
		mutex_destroy(&tbl.bufq[i].ref_lock);
	}

	bitmap_zero(tbl.bitmap, tbl.bits);
@@ -980,16 +1000,17 @@ void cam_mem_mgr_deinit(void)
	mutex_destroy(&tbl.m_lock);
}

static void cam_mem_util_unmap(struct kref *kref)
static void cam_mem_util_unmap_dummy(struct kref *kref)
{
	CAM_DBG(CAM_MEM, "Cam mem util unmap dummy");
}

static void cam_mem_util_unmap(int32_t idx)
{
	int rc = 0;
	int32_t idx;
	enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
	enum cam_smmu_mapping_client client;
	struct cam_mem_buf_queue *bufq =
		container_of(kref, typeof(*bufq), krefcount);

	idx = CAM_MEM_MGR_GET_HDL_IDX(bufq->buf_handle);
	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
		CAM_ERR(CAM_MEM, "Incorrect index");
		return;
@@ -1060,6 +1081,8 @@ static void cam_mem_util_unmap(struct kref *kref)
	tbl.bufq[idx].len = 0;
	tbl.bufq[idx].num_hdl = 0;
	tbl.bufq[idx].active = false;
	memset(&tbl.bufq[idx].krefcount, 0, sizeof(struct kref));
	memset(&tbl.bufq[idx].urefcount, 0, sizeof(struct kref));
	mutex_unlock(&tbl.bufq[idx].q_lock);
	mutex_destroy(&tbl.bufq[idx].q_lock);
	clear_bit(idx, tbl.bitmap);
@@ -1067,10 +1090,28 @@ static void cam_mem_util_unmap(struct kref *kref)

}

static void cam_mem_util_unmap_wrapper(struct kref *kref)
{
	int32_t idx;
	struct cam_mem_buf_queue *bufq = container_of(kref, typeof(*bufq), krefcount);

	idx = CAM_MEM_MGR_GET_HDL_IDX(bufq->buf_handle);
	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
		CAM_ERR(CAM_MEM, "idx: %d not valid", idx);
		return;
	}

	cam_mem_util_unmap(idx);

	mutex_destroy(&tbl.bufq[idx].ref_lock);
}

void cam_mem_put_cpu_buf(int32_t buf_handle)
{
	int rc = 0;
	int idx;
	uint32_t krefcount = 0, urefcount = 0;
	bool unmap = false;

	if (!buf_handle) {
		CAM_ERR(CAM_MEM, "Invalid buf_handle");
@@ -1096,10 +1137,28 @@ void cam_mem_put_cpu_buf(int32_t buf_handle)
		return;
	}

	if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
	mutex_lock(&tbl.bufq[idx].ref_lock);
	kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_dummy);

	krefcount = kref_read(&tbl.bufq[idx].krefcount);
	urefcount = kref_read(&tbl.bufq[idx].urefcount);

	if ((krefcount == 1) && (urefcount == 0))
		unmap = true;

	if (unmap) {
		cam_mem_util_unmap(idx);
		CAM_DBG(CAM_MEM,
			"Called unmap from here, buf_handle: %u, idx: %d",
			buf_handle, idx);
			"Called unmap from here, buf_handle: %u, idx: %d", buf_handle, idx);
	} else if (krefcount == 0) {
		CAM_ERR(CAM_MEM,
			"Unbalanced release Called buf_handle: %u, idx: %d",
			tbl.bufq[idx].buf_handle, idx);
	}
	mutex_unlock(&tbl.bufq[idx].ref_lock);

	if (unmap)
		mutex_destroy(&tbl.bufq[idx].ref_lock);

}
EXPORT_SYMBOL(cam_mem_put_cpu_buf);
@@ -1109,6 +1168,8 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
{
	int idx;
	int rc = 0;
	uint32_t krefcount = 0, urefcount = 0;
	bool unmap = false;

	if (!atomic_read(&cam_mem_mgr_state)) {
		CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
@@ -1141,10 +1202,30 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)

	CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx);

	if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
	mutex_lock(&tbl.bufq[idx].ref_lock);
	kref_put(&tbl.bufq[idx].urefcount, cam_mem_util_unmap_dummy);

	urefcount = kref_read(&tbl.bufq[idx].urefcount);

	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
		krefcount = kref_read(&tbl.bufq[idx].krefcount);
		if ((krefcount == 1) && (urefcount == 0))
			unmap = true;
	} else {
		if (urefcount == 0)
			unmap = true;
	}

	if (unmap) {
		cam_mem_util_unmap(idx);
		CAM_DBG(CAM_MEM,
			"Called unmap from here, buf_handle: %u, idx: %d",
			cmd->buf_handle, idx);
			"Called unmap from here, buf_handle: %u, idx: %d", cmd->buf_handle, idx);
	}

	mutex_unlock(&tbl.bufq[idx].ref_lock);

	if (unmap)
		mutex_destroy(&tbl.bufq[idx].ref_lock);

	return rc;
}
@@ -1326,7 +1407,7 @@ int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
	}

	CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
	if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
	if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_wrapper))
		CAM_DBG(CAM_MEM,
			"Called unmap from here, buf_handle: %u, idx: %d",
			tbl.bufq[idx].buf_handle, idx);
@@ -1506,7 +1587,7 @@ int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
	}

	CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
	if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
	if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_wrapper))
		CAM_DBG(CAM_MEM,
			"Called unmap from here, buf_handle: %u, idx: %d",
			inp->mem_handle, idx);
+7 −2
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
 * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
 * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
 */

#ifndef _CAM_MEM_MGR_H_
@@ -44,8 +44,11 @@ enum cam_smmu_mapping_client {
 * @is_imported:    Flag indicating if buffer is imported from an FD in user
 *                  space
 * @krefcount:      Reference counter to track whether the buffer is
 *                  mapped and in use
 *                  mapped and in use by kmd
 * @smmu_mapping_client: Client buffer (User or kernel)
 * @urefcount:      Reference counter to track whether the buffer is
 *                  mapped and in use by umd
 * @ref_lock:       Mutex lock for refcount
 */
struct cam_mem_buf_queue {
	struct dma_buf *dma_buf;
@@ -63,6 +66,8 @@ struct cam_mem_buf_queue {
	bool is_imported;
	struct kref krefcount;
	enum cam_smmu_mapping_client smmu_mapping_client;
	struct kref urefcount;
	struct mutex ref_lock;
};

/**
+10 −10
Original line number Diff line number Diff line
@@ -150,10 +150,11 @@ int32_t cam_sensor_handle_random_write(
	struct list_head **list)
{
	struct i2c_settings_list  *i2c_list;
	int32_t rc = 0, cnt;
	int32_t rc = 0, cnt, payload_count;

	payload_count = cam_cmd_i2c_random_wr->header.count;
	i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings,
		cam_cmd_i2c_random_wr->header.count);
						payload_count);
	if (i2c_list == NULL ||
		i2c_list->i2c_settings.reg_setting == NULL) {
		CAM_ERR(CAM_SENSOR, "Failed in allocating i2c_list");
@@ -162,15 +163,14 @@ int32_t cam_sensor_handle_random_write(

	*cmd_length_in_bytes = (sizeof(struct i2c_rdwr_header) +
		sizeof(struct i2c_random_wr_payload) *
		(cam_cmd_i2c_random_wr->header.count));
		payload_count);
	i2c_list->op_code = CAM_SENSOR_I2C_WRITE_RANDOM;
	i2c_list->i2c_settings.addr_type =
		cam_cmd_i2c_random_wr->header.addr_type;
	i2c_list->i2c_settings.data_type =
		cam_cmd_i2c_random_wr->header.data_type;

	for (cnt = 0; cnt < (cam_cmd_i2c_random_wr->header.count);
		cnt++) {
	for (cnt = 0; cnt < payload_count; cnt++) {
		i2c_list->i2c_settings.reg_setting[cnt].reg_addr =
			cam_cmd_i2c_random_wr->random_wr_payload[cnt].reg_addr;
		i2c_list->i2c_settings.reg_setting[cnt].reg_data =
@@ -190,10 +190,11 @@ static int32_t cam_sensor_handle_continuous_write(
	struct list_head **list)
{
	struct i2c_settings_list *i2c_list;
	int32_t rc = 0, cnt;
	int32_t rc = 0, cnt, payload_count;

	payload_count = cam_cmd_i2c_continuous_wr->header.count;
	i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings,
		cam_cmd_i2c_continuous_wr->header.count);
						payload_count);
	if (i2c_list == NULL ||
		i2c_list->i2c_settings.reg_setting == NULL) {
		CAM_ERR(CAM_SENSOR, "Failed in allocating i2c_list");
@@ -203,7 +204,7 @@ static int32_t cam_sensor_handle_continuous_write(
	*cmd_length_in_bytes = (sizeof(struct i2c_rdwr_header) +
		sizeof(cam_cmd_i2c_continuous_wr->reg_addr) +
		sizeof(struct cam_cmd_read) *
		(cam_cmd_i2c_continuous_wr->header.count));
		(payload_count));
	if (cam_cmd_i2c_continuous_wr->header.op_code ==
		CAMERA_SENSOR_I2C_OP_CONT_WR_BRST)
		i2c_list->op_code = CAM_SENSOR_I2C_WRITE_BURST;
@@ -220,8 +221,7 @@ static int32_t cam_sensor_handle_continuous_write(
	i2c_list->i2c_settings.size =
		cam_cmd_i2c_continuous_wr->header.count;

	for (cnt = 0; cnt < (cam_cmd_i2c_continuous_wr->header.count);
		cnt++) {
	for (cnt = 0; cnt < payload_count; cnt++) {
		i2c_list->i2c_settings.reg_setting[cnt].reg_addr =
			cam_cmd_i2c_continuous_wr->reg_addr;
		i2c_list->i2c_settings.reg_setting[cnt].reg_data =