Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Unverified Commit 5f1ec403 authored by Michael Bestas's avatar Michael Bestas
Browse files

Merge tag 'LA.UM.9.14.r1-20500.01-LAHAINA.QSSI12.0' of...

Merge tag 'LA.UM.9.14.r1-20500.01-LAHAINA.QSSI12.0' of https://git.codelinaro.org/clo/la/platform/vendor/opensource/camera-kernel into android12-5.4-lahaina

"LA.UM.9.14.r1-20500.01-LAHAINA.QSSI12.0"

* tag 'LA.UM.9.14.r1-20500.01-LAHAINA.QSSI12.0' of https://git.codelinaro.org/clo/la/platform/vendor/opensource/camera-kernel:
  msm: camera: memmgr: Avoid TOCTOU buffer access on multiple use of same fd
  msm: camera: utils: modify debug function logic
  msm: camera: fd: fix compile error for kernel 5.4
  msm: camera: reqmgr: Reset the slot if it is applied

Change-Id: If3f45f7ce8b70d747a77845b43b7b8ec4aa9c9f9
parents d379f686 48ed7e9b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -62,7 +62,7 @@ static int cam_fd_dev_open(struct v4l2_subdev *sd,
	return 0;
}

static int cam_fd_dev_close_internal(struct v4l2_subdev *sd,
int cam_fd_dev_close_internal(struct v4l2_subdev *sd,
	struct v4l2_subdev_fh *fh)
{
	struct cam_fd_dev *fd_dev = &g_fd_dev;
+8 −7
Original line number Diff line number Diff line
@@ -577,7 +577,7 @@ static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
static int cam_mem_util_map_hw_va(uint32_t flags,
	int32_t *mmu_hdls,
	int32_t num_hdls,
	int fd,
	int fd, struct dma_buf *dmabuf,
	dma_addr_t *hw_vaddr,
	size_t *len,
	enum cam_smmu_region_id region,
@@ -606,7 +606,8 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
				fd,
				dir,
				hw_vaddr,
				len);
				len,
				dmabuf);

			if (rc < 0) {
				CAM_ERR(CAM_MEM,
@@ -624,7 +625,8 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
				(dma_addr_t *)hw_vaddr,
				len,
				region,
				is_internal);
				is_internal,
				dmabuf);

			if (rc < 0) {
				CAM_ERR(CAM_MEM,
@@ -716,6 +718,7 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
			cmd->mmu_hdls,
			cmd->num_hdl,
			fd,
			dmabuf,
			&hw_vaddr,
			&len,
			region,
@@ -856,6 +859,7 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
			cmd->mmu_hdls,
			cmd->num_hdl,
			cmd->fd,
			dmabuf,
			&hw_vaddr,
			&len,
			CAM_SMMU_REGION_IO,
@@ -1109,8 +1113,6 @@ static int cam_mem_util_unmap(int32_t idx,
		if (cam_mem_util_unmap_hw_va(idx, region, client))
			CAM_ERR(CAM_MEM, "Failed, dmabuf=%pK",
				tbl.bufq[idx].dma_buf);
		if (client == CAM_SMMU_MAPPING_KERNEL)
			tbl.bufq[idx].dma_buf = NULL;
	}

	mutex_lock(&tbl.m_lock);
@@ -1126,7 +1128,6 @@ static int cam_mem_util_unmap(int32_t idx,
		tbl.bufq[idx].is_imported,
		tbl.bufq[idx].dma_buf);

	if (tbl.bufq[idx].dma_buf)
	dma_buf_put(tbl.bufq[idx].dma_buf);

	tbl.bufq[idx].fd = -1;
+18 −0
Original line number Diff line number Diff line
@@ -759,6 +759,24 @@ static int __cam_req_mgr_check_next_req_slot(

	CAM_DBG(CAM_CRM, "idx: %d: slot->status %d", idx, slot->status);

	/*
	 * Some slot can't be reset due to irq congestion and
	 * performance issue, we need to reset it when we
	 * want to move to this slot.
	 */
	if (slot->status == CRM_SLOT_STATUS_REQ_APPLIED) {
		CAM_WARN(CAM_CRM,
			"slot[%d] wasn't reset, reset it now",
			idx);
		if (in_q->last_applied_idx == idx) {
			CAM_WARN(CAM_CRM,
				"last_applied_idx: %d",
				in_q->last_applied_idx);
			in_q->last_applied_idx = -1;
		}
		__cam_req_mgr_reset_req_slot(link, idx);
	}

	/* Check if there is new req from CSL, if not complete req */
	if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
		rc = __cam_req_mgr_check_for_lower_pd_devices(link);
+10 −29
Original line number Diff line number Diff line
@@ -262,7 +262,7 @@ static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
	bool dis_delayed_unmap, enum dma_data_direction dma_dir,
	dma_addr_t *paddr_ptr, size_t *len_ptr,
	enum cam_smmu_region_id region_id, bool is_internal);
	enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *dmabuf);

static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
	struct dma_buf *buf, enum dma_data_direction dma_dir,
@@ -2028,7 +2028,7 @@ static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
	if (IS_ERR_OR_NULL(attach)) {
		rc = PTR_ERR(attach);
		CAM_ERR(CAM_SMMU, "Error: dma buf attach failed");
		goto err_put;
		goto err_out;
	}

	if (region_id == CAM_SMMU_REGION_SHARED) {
@@ -2175,8 +2175,6 @@ static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
	dma_buf_unmap_attachment(attach, table, dma_dir);
err_detach:
	dma_buf_detach(buf, attach);
err_put:
	dma_buf_put(buf);
err_out:
	return rc;
}
@@ -2185,14 +2183,10 @@ static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
	bool dis_delayed_unmap, enum dma_data_direction dma_dir,
	dma_addr_t *paddr_ptr, size_t *len_ptr,
	enum cam_smmu_region_id region_id, bool is_internal)
	enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *buf)
{
	int rc = -1;
	struct cam_dma_buff_info *mapping_info = NULL;
	struct dma_buf *buf = NULL;

	/* returns the dma_buf structure related to an fd */
	buf = dma_buf_get(ion_fd);

	rc = cam_smmu_map_buffer_validate(buf, idx, dma_dir, paddr_ptr, len_ptr,
		region_id, dis_delayed_unmap, &mapping_info);
@@ -2316,7 +2310,6 @@ static int cam_smmu_unmap_buf_and_remove_from_list(


	dma_buf_detach(mapping_info->buf, mapping_info->attach);
	dma_buf_put(mapping_info->buf);

	if (iommu_cb_set.map_profile_enable) {
		CAM_GET_TIMESTAMP(ts2);
@@ -2817,10 +2810,9 @@ int cam_smmu_put_scratch_iova(int handle,

static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
		 enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
		 size_t *len_ptr)
		 size_t *len_ptr, struct dma_buf *dmabuf)
{
	int rc = 0;
	struct dma_buf *dmabuf = NULL;
	struct dma_buf_attachment *attach = NULL;
	struct sg_table *table = NULL;
	struct cam_sec_buff_info *mapping_info;
@@ -2829,15 +2821,6 @@ static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
	*paddr_ptr = (dma_addr_t)NULL;
	*len_ptr = (size_t)0;

	dmabuf = dma_buf_get(ion_fd);
	if (IS_ERR_OR_NULL((void *)(dmabuf))) {
		CAM_ERR(CAM_SMMU,
			"Error: dma buf get failed, idx=%d, ion_fd=%d",
			idx, ion_fd);
		rc = PTR_ERR(dmabuf);
		goto err_out;
	}

	/*
	 * ion_phys() is deprecated. call dma_buf_attach() and
	 * dma_buf_map_attachment() to get the buffer's physical
@@ -2849,7 +2832,7 @@ static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
			"Error: dma buf attach failed, idx=%d, ion_fd=%d",
			idx, ion_fd);
		rc = PTR_ERR(attach);
		goto err_put;
		goto err_out;
	}

	attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
@@ -2895,15 +2878,14 @@ static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
	dma_buf_unmap_attachment(attach, table, dma_dir);
err_detach:
	dma_buf_detach(dmabuf, attach);
err_put:
	dma_buf_put(dmabuf);
err_out:
	return rc;
}

int cam_smmu_map_stage2_iova(int handle,
		int ion_fd, enum cam_smmu_map_dir dir,
		dma_addr_t *paddr_ptr, size_t *len_ptr)
		dma_addr_t *paddr_ptr, size_t *len_ptr,
		struct dma_buf *dmabuf)
{
	int idx, rc;
	enum dma_data_direction dma_dir;
@@ -2962,7 +2944,7 @@ int cam_smmu_map_stage2_iova(int handle,
		goto get_addr_end;
	}
	rc = cam_smmu_map_stage2_buffer_and_add_to_list(idx, ion_fd, dma_dir,
			paddr_ptr, len_ptr);
			paddr_ptr, len_ptr, dmabuf);
	if (rc < 0) {
		CAM_ERR(CAM_SMMU,
			"Error: mapping or add list fail, idx=%d, handle=%d, fd=%d, rc=%d",
@@ -2998,7 +2980,6 @@ static int cam_smmu_secure_unmap_buf_and_remove_from_list(
	dma_buf_unmap_attachment(mapping_info->attach,
		mapping_info->table, mapping_info->dir);
	dma_buf_detach(mapping_info->buf, mapping_info->attach);
	dma_buf_put(mapping_info->buf);
	mapping_info->buf = NULL;

	list_del_init(&mapping_info->list);
@@ -3116,7 +3097,7 @@ static int cam_smmu_map_iova_validate_params(int handle,
int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
	size_t *len_ptr, enum cam_smmu_region_id region_id,
	bool is_internal)
	bool is_internal, struct dma_buf *dmabuf)
{
	int idx, rc = 0;
	struct timespec64 *ts = NULL;
@@ -3180,7 +3161,7 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,

	rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd,
		dis_delayed_unmap, dma_dir, paddr_ptr, len_ptr,
		region_id, is_internal);
		region_id, is_internal, dmabuf);
	if (rc < 0) {
		CAM_ERR(CAM_SMMU,
			"mapping or add list fail cb:%s idx=%d, fd=%d, region=%d, rc=%d",
+2 −2
Original line number Diff line number Diff line
@@ -128,7 +128,7 @@ int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
 */
int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
	enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr,
	enum cam_smmu_region_id region_id, bool is_internal);
	enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *dmabuf);

/**
 * @brief        : Maps kernel space IOVA for calling driver
@@ -304,7 +304,7 @@ int cam_smmu_put_iova(int handle, int ion_fd);
 */
int cam_smmu_map_stage2_iova(int handle,
	int ion_fd, enum cam_smmu_map_dir dir, dma_addr_t *dma_addr,
	size_t *len_ptr);
	size_t *len_ptr, struct dma_buf *dmabuf);

/**
 * @brief Unmaps secure memopry for SMMU handle
Loading