Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f664a526 authored by Christian König's avatar Christian König Committed by Christian König
Browse files

dma-buf: remove kmap_atomic interface



Neither used nor correctly implemented anywhere. Just completely remove
the interface.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: default avatarSumit Semwal <sumit.semwal@linaro.org>
Link: https://patchwork.freedesktop.org/patch/226645/
parent a19741e5
Loading
Loading
Loading
Loading
+2 −52
Original line number Diff line number Diff line
@@ -405,7 +405,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
			  || !exp_info->ops->map_dma_buf
			  || !exp_info->ops->unmap_dma_buf
			  || !exp_info->ops->release
			  || !exp_info->ops->map_atomic
			  || !exp_info->ops->map
			  || !exp_info->ops->mmap)) {
		return ERR_PTR(-EINVAL);
@@ -687,26 +686,14 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 *      void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
 *      void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
 *
 *   There are also atomic variants of these interfaces. Like for kmap they
 *   facilitate non-blocking fast-paths. Neither the importer nor the exporter
 *   (in the callback) is allowed to block when using these.
 *
 *   Interfaces::
 *      void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
 *      void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*);
 *
 *   For importers all the restrictions of using kmap apply, like the limited
 *   supply of kmap_atomic slots. Hence an importer shall only hold onto at
 *   max 2 atomic dma_buf kmaps at the same time (in any given process context).
 *   Implementing the functions is optional for exporters and for importers all
 *   the restrictions of using kmap apply.
 *
 *   dma_buf kmap calls outside of the range specified in begin_cpu_access are
 *   undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
 *   the partial chunks at the beginning and end but may return stale or bogus
 *   data outside of the range (in these partial chunks).
 *
 *   Note that these calls need to always succeed. The exporter needs to
 *   complete any preparations that might fail in begin_cpu_access.
 *
 *   For some cases the overhead of kmap can be too high, a vmap interface
 *   is introduced. This interface should be used very carefully, as vmalloc
 *   space is a limited resources on many architectures.
@@ -859,43 +846,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);

/**
 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
 * space. The same restrictions as for kmap_atomic and friends apply.
 * @dmabuf:	[in]	buffer to map page from.
 * @page_num:	[in]	page in PAGE_SIZE units to map.
 *
 * This call must always succeed, any necessary preparations that might fail
 * need to be done in begin_cpu_access.
 */
void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
{
	WARN_ON(!dmabuf);

	if (!dmabuf->ops->map_atomic)
		return NULL;
	return dmabuf->ops->map_atomic(dmabuf, page_num);
}
EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);

/**
 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
 * @dmabuf:	[in]	buffer to unmap page from.
 * @page_num:	[in]	page in PAGE_SIZE units to unmap.
 * @vaddr:	[in]	kernel space pointer obtained from dma_buf_kmap_atomic.
 *
 * This call must always succeed.
 */
void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
			   void *vaddr)
{
	WARN_ON(!dmabuf);

	if (dmabuf->ops->unmap_atomic)
		dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
}
EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);

/**
 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
 * same restrictions as for kmap and friends apply.
+0 −2
Original line number Diff line number Diff line
@@ -238,9 +238,7 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops = {
	.release = drm_gem_dmabuf_release,
	.begin_cpu_access = amdgpu_gem_begin_cpu_access,
	.map = drm_gem_dmabuf_kmap,
	.map_atomic = drm_gem_dmabuf_kmap_atomic,
	.unmap = drm_gem_dmabuf_kunmap,
	.unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
	.mmap = drm_gem_dmabuf_mmap,
	.vmap = drm_gem_dmabuf_vmap,
	.vunmap = drm_gem_dmabuf_vunmap,
+0 −2
Original line number Diff line number Diff line
@@ -490,8 +490,6 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
	.map_dma_buf	= armada_gem_prime_map_dma_buf,
	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
	.release	= drm_gem_dmabuf_release,
	.map_atomic	= armada_gem_dmabuf_no_kmap,
	.unmap_atomic	= armada_gem_dmabuf_no_kunmap,
	.map		= armada_gem_dmabuf_no_kmap,
	.unmap		= armada_gem_dmabuf_no_kunmap,
	.mmap		= armada_gem_dmabuf_mmap,
+0 −31
Original line number Diff line number Diff line
@@ -433,35 +433,6 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);

/**
 * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
 * @dma_buf: buffer to be mapped
 * @page_num: page number within the buffer
 *
 * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
 */
void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
				 unsigned long page_num)
{
	return NULL;
}
EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);

/**
 * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
 * @dma_buf: buffer to be unmapped
 * @page_num: page number within the buffer
 * @addr: virtual address of the buffer
 *
 * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
 */
void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
				  unsigned long page_num, void *addr)
{

}
EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);

/**
 * drm_gem_dmabuf_kmap - map implementation for GEM
 * @dma_buf: buffer to be mapped
@@ -519,9 +490,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
	.unmap_dma_buf = drm_gem_unmap_dma_buf,
	.release = drm_gem_dmabuf_release,
	.map = drm_gem_dmabuf_kmap,
	.map_atomic = drm_gem_dmabuf_kmap_atomic,
	.unmap = drm_gem_dmabuf_kunmap,
	.unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
	.mmap = drm_gem_dmabuf_mmap,
	.vmap = drm_gem_dmabuf_vmap,
	.vunmap = drm_gem_dmabuf_vunmap,
+0 −11
Original line number Diff line number Diff line
@@ -111,15 +111,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
	i915_gem_object_unpin_map(obj);
}

static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
{
	return NULL;
}

static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
{

}
static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
@@ -225,9 +216,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
	.unmap_dma_buf = i915_gem_unmap_dma_buf,
	.release = drm_gem_dmabuf_release,
	.map = i915_gem_dmabuf_kmap,
	.map_atomic = i915_gem_dmabuf_kmap_atomic,
	.unmap = i915_gem_dmabuf_kunmap,
	.unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
	.mmap = i915_gem_dmabuf_mmap,
	.vmap = i915_gem_dmabuf_vmap,
	.vunmap = i915_gem_dmabuf_vunmap,
Loading