Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4f69528a authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "soc: qcom: mem-buf: Ensure dma-bufs are freed when refcount is 0"

parents f2013ec1 a7234c9b
Loading
Loading
Loading
Loading
+31 −0
Original line number Diff line number Diff line
@@ -688,6 +688,37 @@ void dma_buf_put(struct dma_buf *dmabuf)
}
EXPORT_SYMBOL_GPL(dma_buf_put);

/**
 * dma_buf_put_sync - decreases refcount of the buffer
 * @dmabuf:	[in]	buffer to reduce refcount of
 *
 * Uses file's refcounting done implicitly by __fput_sync().
 *
 * If, as a result of this call, the refcount becomes 0, the 'release' file
 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
 * in turn, and frees the memory allocated for dmabuf when exported.
 *
 * This function is different than dma_buf_put() in the sense that it guarantees
 * that the 'release' file operation related to this fd is called, and that the
 * memory is released, when the refcount becomes 0. dma_buf_put() does not
 * have the same guarantee when invoked by a kernel thread (e.g. a worker
 * thread), and the refcount reaches 0; in that case, the buffer is added to
 * the delayed_fput_list, and freed asynchronously.
 *
 * This function should not be called in atomic context, and should only be
 * called by kernel threads. If in doubt, use dma_buf_put().
 */
void dma_buf_put_sync(struct dma_buf *dmabuf)
{
	if (WARN_ON(!dmabuf || !dmabuf->file))
		return;

	might_sleep();

	dma_buf_ref_mod(to_msm_dma_buf(dmabuf), -1);
	__fput_sync(dmabuf->file);
}

/**
 * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
 * calls attach() of dma_buf_ops to allow device-specific attach functionality
+14 −4
Original line number Diff line number Diff line
@@ -277,6 +277,12 @@ static int mem_buf_rmt_alloc_ion_mem(struct mem_buf_xfer_mem *xfer_mem)
		xfer_mem->secure_alloc = false;
	}

	/*
	 * If the buffer needs to be freed because of error handling, ensure
	 * that dma_buf_put_sync() is invoked, instead of dma_buf_put(). Doing
	 * so ensures that the memory is freed before the next allocation
	 * request is serviced.
	 */
	dmabuf = ion_alloc(xfer_mem->size, heap_id, ion_flags);
	if (IS_ERR(dmabuf)) {
		pr_err("%s ion_alloc failure sz: 0x%x heap_id: %d flags: 0x%x rc: %d\n",
@@ -289,7 +295,7 @@ static int mem_buf_rmt_alloc_ion_mem(struct mem_buf_xfer_mem *xfer_mem)
	if (IS_ERR(attachment)) {
		pr_err("%s dma_buf_attach failure rc: %d\n",  __func__,
		       PTR_ERR(attachment));
		dma_buf_put(dmabuf);
		dma_buf_put_sync(dmabuf);
		return PTR_ERR(attachment);
	}

@@ -298,7 +304,7 @@ static int mem_buf_rmt_alloc_ion_mem(struct mem_buf_xfer_mem *xfer_mem)
		pr_err("%s dma_buf_map_attachment failure rc: %d\n", __func__,
		       PTR_ERR(mem_sgt));
		dma_buf_detach(dmabuf, attachment);
		dma_buf_put(dmabuf);
		dma_buf_put_sync(dmabuf);
		return PTR_ERR(mem_sgt);
	}

@@ -330,7 +336,11 @@ static void mem_buf_rmt_free_ion_mem(struct mem_buf_xfer_mem *xfer_mem)
	pr_debug("%s: Freeing ION memory\n", __func__);
	dma_buf_unmap_attachment(attachment, mem_sgt, DMA_BIDIRECTIONAL);
	dma_buf_detach(dmabuf, attachment);
	dma_buf_put(ion_mem_data->dmabuf);
	/*
	 * Use dma_buf_put_sync() instead of dma_buf_put() to ensure that the
	 * memory is actually freed, before the next allocation request.
	 */
	dma_buf_put_sync(ion_mem_data->dmabuf);
	pr_debug("%s: ION memory freed\n", __func__);
}

@@ -2146,7 +2156,7 @@ static int mem_buf_probe(struct platform_device *pdev)
		return ret;
	}

	mem_buf_wq = alloc_workqueue("mem_buf_wq", WQ_HIGHPRI | WQ_UNBOUND, 0);
	mem_buf_wq = alloc_ordered_workqueue("mem_buf_wq", WQ_HIGHPRI);
	if (!mem_buf_wq) {
		dev_err(dev, "Unable to initialize workqueue\n");
		return -EINVAL;
+1 −0
Original line number Diff line number Diff line
@@ -531,6 +531,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd);
void dma_buf_put(struct dma_buf *dmabuf);
void dma_buf_put_sync(struct dma_buf *dmabuf);

struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
					enum dma_data_direction);