Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 61c06b6b authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: camera: memmgr: Fix vaddr saving and cpu access issues" into msm-4.14

parents 5171608c 2a453a90
Loading
Loading
Loading
Loading
+88 −39
Original line number Diff line number Diff line
@@ -26,7 +26,7 @@

static struct cam_mem_table tbl;

static int cam_mem_util_map_cpu_va(struct dma_buf *buf,
static int cam_mem_util_map_cpu_va(struct dma_buf *dmabuf,
	uint64_t *vaddr,
	size_t *len)
{
@@ -37,9 +37,11 @@ static int cam_mem_util_map_cpu_va(struct dma_buf *buf,
	 * dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
	 * need to be called in pair to avoid stability issue.
	 */
	rc = dma_buf_begin_cpu_access(buf, DMA_BIDIRECTIONAL);
	if (rc)
	rc = dma_buf_begin_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
	if (rc) {
		CAM_ERR(CAM_SMMU, "dma begin access failed rc=%d", rc);
		return rc;
	}

	/*
	 * Code could be simplified if ION support of dma_buf_vmap is
@@ -48,29 +50,61 @@ static int cam_mem_util_map_cpu_va(struct dma_buf *buf,
	 * to _kmap each individual page and then only use the virtual
	 * address returned from the first call to _kmap.
	 */
	for (i = 0; i < PAGE_ALIGN(buf->size) / PAGE_SIZE; i++) {
		addr = dma_buf_kmap(buf, i);
	for (i = 0; i < PAGE_ALIGN(dmabuf->size) / PAGE_SIZE; i++) {
		addr = dma_buf_kmap(dmabuf, i);
		if (IS_ERR_OR_NULL(addr)) {
			CAM_ERR(CAM_CRM, "kernel map fail");
			for (j = 0; j < i; j++)
				dma_buf_kunmap(buf,
				dma_buf_kunmap(dmabuf,
					j,
					(void *)(*vaddr + j * PAGE_SIZE));
					(void *)(*vaddr + (j * PAGE_SIZE)));
			*vaddr = 0;
			*len = 0;
			rc = -ENOSPC;
			goto fail;
		}
		if (i == 0)
			*vaddr = *(uint64_t *)addr;
			*vaddr = (uint64_t)addr;
	}

	*len = buf->size;
	*len = dmabuf->size;

	return 0;

fail:
	dma_buf_end_cpu_access(buf, DMA_BIDIRECTIONAL);
	dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
	return rc;
}

static int cam_mem_util_unmap_cpu_va(struct dma_buf *dmabuf,
	uint64_t vaddr)
{
	int i, rc = 0, page_num;

	if (!dmabuf || !vaddr) {
		CAM_ERR(CAM_CRM, "Invalid input args %pK %llX", dmabuf, vaddr);
		return -EINVAL;
	}

	page_num = PAGE_ALIGN(dmabuf->size) / PAGE_SIZE;

	for (i = 0; i < page_num; i++) {
		dma_buf_kunmap(dmabuf, i,
			(void *)(vaddr + (i * PAGE_SIZE)));
	}

	/*
	 * dma_buf_begin_cpu_access() and
	 * dma_buf_end_cpu_access() need to be called in pair
	 * to avoid stability issue.
	 */
	rc = dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
	if (rc) {
		CAM_ERR(CAM_CRM, "Failed in end cpu access, dmabuf=%pK",
			dmabuf);
		return rc;
	}

	return rc;
}

@@ -335,6 +369,13 @@ static int cam_mem_util_get_dma_buf_fd(size_t len,
		goto get_fd_fail;
	}

	/*
	 * increment the ref count so that ref count becomes 2 here
	 * when we close fd, refcount becomes 1 and when we do
	 * dmap_put_buf, ref count becomes 0 and memory will be freed.
	 */
	dma_buf_get(*fd);

	return rc;

get_fd_fail:
@@ -441,12 +482,15 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
		return dir;
	}

	CAM_DBG(CAM_CRM, "map_hw_va : flags = %x, dir=%d, num_hdls=%d",
		flags, dir, num_hdls);

	if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
		for (i = 0; i < num_hdls; i++) {
			rc = cam_smmu_map_stage2_iova(mmu_hdls[i],
				fd,
				dir,
				(dma_addr_t *)hw_vaddr,
				hw_vaddr,
				len);

			if (rc < 0) {
@@ -489,8 +533,8 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
{
	int rc;
	int32_t idx;
	struct dma_buf *dmabuf;
	int fd;
	struct dma_buf *dmabuf = NULL;
	int fd = -1;
	dma_addr_t hw_vaddr = 0;
	size_t len;

@@ -540,9 +584,12 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
			&hw_vaddr,
			&len,
			region);
		if (rc)

		if (rc) {
			CAM_ERR(CAM_CRM, "Failed in map_hw_va, rc=%d", rc);
			goto map_hw_fail;
		}
	}

	mutex_lock(&tbl.bufq[idx].q_lock);
	tbl.bufq[idx].fd = fd;
@@ -683,6 +730,10 @@ static int cam_mem_util_unmap_hw_va(int32_t idx,
	num_hdls = tbl.bufq[idx].num_hdl;
	fd = tbl.bufq[idx].fd;

	CAM_DBG(CAM_CRM,
		"unmap_hw_va : fd=%x, flags=0x%x, num_hdls=%d, client=%d",
		fd, flags, num_hdls, client);

	if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
		for (i = 0; i < num_hdls; i++) {
			rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
@@ -762,6 +813,7 @@ static int cam_mem_mgr_cleanup_table(void)
		mutex_unlock(&tbl.bufq[i].q_lock);
		mutex_destroy(&tbl.bufq[i].q_lock);
	}

	bitmap_zero(tbl.bitmap, tbl.bits);
	/* We need to reserve slot 0 because 0 is invalid */
	set_bit(0, tbl.bitmap);
@@ -784,10 +836,8 @@ void cam_mem_mgr_deinit(void)
static int cam_mem_util_unmap(int32_t idx,
	enum cam_smmu_mapping_client client)
{
	int i, rc = 0, page_num;
	int rc = 0;
	enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
	struct dma_buf *dmabuf = NULL;
	uint64_t kvaddr;

	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
		CAM_ERR(CAM_CRM, "Incorrect index");
@@ -805,23 +855,19 @@ static int cam_mem_util_unmap(int32_t idx,
		return 0;
	}


	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
		if (tbl.bufq[idx].dma_buf && tbl.bufq[idx].kmdvaddr)
			dmabuf = tbl.bufq[idx].dma_buf;
			page_num = PAGE_ALIGN(dmabuf->size) / PAGE_SIZE;
			kvaddr = tbl.bufq[idx].kmdvaddr;
			for (i = 0; i < page_num; i++)
				dma_buf_kunmap(dmabuf, i,
					(void *)(kvaddr + i * PAGE_SIZE));

	/*
	 * dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
	 * need to be called in pair to avoid stability issue.
	 */
	rc = dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
	if (rc)
	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
		if (tbl.bufq[idx].dma_buf && tbl.bufq[idx].kmdvaddr) {
			rc = cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf,
				tbl.bufq[idx].kmdvaddr);
			if (rc) {
				CAM_ERR(CAM_CRM,
					"Failed, dmabuf=%pK, kmdvaddr=%pK",
					tbl.bufq[idx].dma_buf,
					tbl.bufq[idx].kmdvaddr);
				return rc;
			}
		}
	}

	/* SHARED flag gets precedence, all other flags after it */
	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
@@ -896,7 +942,7 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
		return -EINVAL;
	}

	CAM_DBG(CAM_CRM, "Releasing hdl = %u", cmd->buf_handle);
	CAM_DBG(CAM_CRM, "Releasing hdl = %x", cmd->buf_handle);
	rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);

	return rc;
@@ -917,7 +963,6 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
	int32_t idx;
	int32_t smmu_hdl = 0;
	int32_t num_hdl = 0;
	int i;

	enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;

@@ -953,6 +998,11 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
		CAM_DBG(CAM_CRM, "Got dma_buf = %pK", buf);
	}

	/*
	 * we are mapping kva always here,
	 * update flags so that we do unmap properly
	 */
	inp->flags |= CAM_MEM_FLAG_KMD_ACCESS;
	rc = cam_mem_util_map_cpu_va(buf, &kvaddr, &request_len);
	if (rc) {
		CAM_ERR(CAM_CRM, "Failed to get kernel vaddr");
@@ -1023,8 +1073,7 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
	cam_smmu_unmap_kernel_iova(inp->smmu_hdl,
		buf, region);
smmu_fail:
	for (i = 0; i < PAGE_ALIGN(buf->size) / PAGE_SIZE; i++)
		dma_buf_kunmap(buf, i, (void *)(kvaddr + i * PAGE_SIZE));
	cam_mem_util_unmap_cpu_va(buf, kvaddr);
map_fail:
	dma_buf_put(buf);
ion_fail: