Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f98c2135 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Just a couple of dma-buf related fixes and some amdgpu fixes, along
  with a regression fix for radeon off but default feature, but makes my
  30" monitor happy again"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux:
  drm/radeon/mst: cleanup code indentation
  drm/radeon/mst: fix regression in lane/link handling.
  drm/amdgpu: add invalidate_page callback for userptrs
  drm/amdgpu: Revert "remove the userptr rmn->lock"
  drm/amdgpu: clean up path handling for powerplay
  drm/amd/powerplay: fix memory leak of tdp_table
  dma-buf/fence: fix fence_is_later v2
  dma-buf: Update docs for SYNC ioctl
  drm: remove excess description
  dma-buf, drm, ion: Propagate error code from dma_buf_start_cpu_access()
  drm/atmel-hlcdc: use helper to get crtc state
  drm/atomic: use helper to get crtc state
parents 11caf57f 4604202c
Loading
Loading
Loading
Loading
+6 −5
Original line number Diff line number Diff line
@@ -352,7 +352,8 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases:

   No special interfaces, userspace simply calls mmap on the dma-buf fd, making
   sure that the cache synchronization ioctl (DMA_BUF_IOCTL_SYNC) is *always*
   used when the access happens. This is discussed next paragraphs.
   used when the access happens. Note that DMA_BUF_IOCTL_SYNC can fail with
   -EAGAIN or -EINTR, in which case it must be restarted.

   Some systems might need some sort of cache coherency management e.g. when
   CPU and GPU domains are being accessed through dma-buf at the same time. To
@@ -366,10 +367,10 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases:
       want (with the new data being consumed by the GPU or say scanout device)
     - munmap once you don't need the buffer any more

    Therefore, for correctness and optimal performance, systems with the memory
    cache shared by the GPU and CPU i.e. the "coherent" and also the
    "incoherent" are always required to use SYNC_START and SYNC_END before and
    after, respectively, when accessing the mapped address.
    For correctness and optimal performance, it is always required to use
    SYNC_START and SYNC_END before and after, respectively, when accessing the
    mapped address. Userspace cannot rely on coherent access, even when there
    are systems where it just works without calling these ioctls.

2. Supporting existing mmap interfaces in importers

+12 −7
Original line number Diff line number Diff line
@@ -259,6 +259,7 @@ static long dma_buf_ioctl(struct file *file,
	struct dma_buf *dmabuf;
	struct dma_buf_sync sync;
	enum dma_data_direction direction;
	int ret;

	dmabuf = file->private_data;

@@ -285,11 +286,11 @@ static long dma_buf_ioctl(struct file *file,
		}

		if (sync.flags & DMA_BUF_SYNC_END)
			dma_buf_end_cpu_access(dmabuf, direction);
			ret = dma_buf_end_cpu_access(dmabuf, direction);
		else
			dma_buf_begin_cpu_access(dmabuf, direction);
			ret = dma_buf_begin_cpu_access(dmabuf, direction);

		return 0;
		return ret;
	default:
		return -ENOTTY;
	}
@@ -611,15 +612,19 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
 * @dmabuf:	[in]	buffer to complete cpu access for.
 * @direction:	[in]	length of range for cpu access.
 *
 * This call must always succeed.
 * Can return negative error values, returns 0 on success.
 */
void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
			   enum dma_data_direction direction)
{
	int ret = 0;

	WARN_ON(!dmabuf);

	if (dmabuf->ops->end_cpu_access)
		dmabuf->ops->end_cpu_access(dmabuf, direction);
		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);

	return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);

+86 −34
Original line number Diff line number Diff line
@@ -48,7 +48,8 @@ struct amdgpu_mn {
	/* protected by adev->mn_lock */
	struct hlist_node	node;

	/* objects protected by mm->mmap_sem */
	/* objects protected by lock */
	struct mutex		lock;
	struct rb_root		objects;
};

@@ -72,7 +73,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
	struct amdgpu_bo *bo, *next_bo;

	mutex_lock(&adev->mn_lock);
	down_write(&rmn->mm->mmap_sem);
	mutex_lock(&rmn->lock);
	hash_del(&rmn->node);
	rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
					     it.rb) {
@@ -82,7 +83,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
		}
		kfree(node);
	}
	up_write(&rmn->mm->mmap_sem);
	mutex_unlock(&rmn->lock);
	mutex_unlock(&adev->mn_lock);
	mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
	kfree(rmn);
@@ -105,40 +106,23 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
}

/**
 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
 * amdgpu_mn_invalidate_node - unmap all BOs of a node
 *
 * @mn: our notifier
 * @mn: the mm this callback is about
 * @start: start of updated range
 * @end: end of updated range
 * @node: the node with the BOs to unmap
 *
 * We block for all BOs between start and end to be idle and
 * unmap them by move them into system domain again.
 * We block for all BOs and unmap them by move them
 * into system domain again.
 */
static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
					     struct mm_struct *mm,
static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
				      unsigned long start,
				      unsigned long end)
{
	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
	struct interval_tree_node *it;

	/* notification is exclusive, but interval is inclusive */
	end -= 1;

	it = interval_tree_iter_first(&rmn->objects, start, end);
	while (it) {
		struct amdgpu_mn_node *node;
	struct amdgpu_bo *bo;
	long r;

		node = container_of(it, struct amdgpu_mn_node, it);
		it = interval_tree_iter_next(it, start, end);

	list_for_each_entry(bo, &node->bos, mn_list) {

			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
							  end))
		if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
			continue;

		r = amdgpu_bo_reserve(bo, true);
@@ -160,10 +144,77 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
		amdgpu_bo_unreserve(bo);
	}
}

/**
 * amdgpu_mn_invalidate_page - callback to notify about mm change
 *
 * @mn: our notifier
 * @mn: the mm this callback is about
 * @address: address of invalidate page
 *
 * Invalidation of a single page. Blocks for all BOs mapping it
 * and unmap them by move them into system domain again.
 */
static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
				      struct mm_struct *mm,
				      unsigned long address)
{
	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
	struct interval_tree_node *it;

	mutex_lock(&rmn->lock);

	it = interval_tree_iter_first(&rmn->objects, address, address);
	if (it) {
		struct amdgpu_mn_node *node;

		node = container_of(it, struct amdgpu_mn_node, it);
		amdgpu_mn_invalidate_node(node, address, address);
	}

	mutex_unlock(&rmn->lock);
}

/**
 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
 *
 * @mn: our notifier
 * @mn: the mm this callback is about
 * @start: start of updated range
 * @end: end of updated range
 *
 * We block for all BOs between start and end to be idle and
 * unmap them by move them into system domain again.
 */
static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
					     struct mm_struct *mm,
					     unsigned long start,
					     unsigned long end)
{
	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
	struct interval_tree_node *it;

	/* notification is exclusive, but interval is inclusive */
	end -= 1;

	mutex_lock(&rmn->lock);

	it = interval_tree_iter_first(&rmn->objects, start, end);
	while (it) {
		struct amdgpu_mn_node *node;

		node = container_of(it, struct amdgpu_mn_node, it);
		it = interval_tree_iter_next(it, start, end);

		amdgpu_mn_invalidate_node(node, start, end);
	}

	mutex_unlock(&rmn->lock);
}

static const struct mmu_notifier_ops amdgpu_mn_ops = {
	.release = amdgpu_mn_release,
	.invalidate_page = amdgpu_mn_invalidate_page,
	.invalidate_range_start = amdgpu_mn_invalidate_range_start,
};

@@ -196,6 +247,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
	rmn->adev = adev;
	rmn->mm = mm;
	rmn->mn.ops = &amdgpu_mn_ops;
	mutex_init(&rmn->lock);
	rmn->objects = RB_ROOT;

	r = __mmu_notifier_register(&rmn->mn, mm);
@@ -242,7 +294,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)

	INIT_LIST_HEAD(&bos);

	down_write(&rmn->mm->mmap_sem);
	mutex_lock(&rmn->lock);

	while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
		kfree(node);
@@ -256,7 +308,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
	if (!node) {
		node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
		if (!node) {
			up_write(&rmn->mm->mmap_sem);
			mutex_unlock(&rmn->lock);
			return -ENOMEM;
		}
	}
@@ -271,7 +323,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)

	interval_tree_insert(&node->it, &rmn->objects);

	up_write(&rmn->mm->mmap_sem);
	mutex_unlock(&rmn->lock);

	return 0;
}
@@ -297,7 +349,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
		return;
	}

	down_write(&rmn->mm->mmap_sem);
	mutex_lock(&rmn->lock);

	/* save the next list entry for later */
	head = bo->mn_list.next;
@@ -312,6 +364,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
		kfree(node);
	}

	up_write(&rmn->mm->mmap_sem);
	mutex_unlock(&rmn->lock);
	mutex_unlock(&adev->mn_lock);
}
+7 −7
Original line number Diff line number Diff line

subdir-ccflags-y += -Iinclude/drm  \
		-Idrivers/gpu/drm/amd/powerplay/inc/  \
		-Idrivers/gpu/drm/amd/include/asic_reg  \
		-Idrivers/gpu/drm/amd/include  \
		-Idrivers/gpu/drm/amd/powerplay/smumgr\
		-Idrivers/gpu/drm/amd/powerplay/hwmgr \
		-Idrivers/gpu/drm/amd/powerplay/eventmgr
		-I$(FULL_AMD_PATH)/powerplay/inc/  \
		-I$(FULL_AMD_PATH)/include/asic_reg  \
		-I$(FULL_AMD_PATH)/include  \
		-I$(FULL_AMD_PATH)/powerplay/smumgr\
		-I$(FULL_AMD_PATH)/powerplay/hwmgr \
		-I$(FULL_AMD_PATH)/powerplay/eventmgr

AMD_PP_PATH = ../powerplay

PP_LIBS = smumgr hwmgr eventmgr

AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix drivers/gpu/drm/amd/powerplay/,$(PP_LIBS)))
AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS)))

include $(AMD_POWERPLAY)

+3 −1
Original line number Diff line number Diff line
@@ -512,8 +512,10 @@ static int get_cac_tdp_table(

	hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);

	if (NULL == hwmgr->dyn_state.cac_dtp_table)
	if (NULL == hwmgr->dyn_state.cac_dtp_table) {
		kfree(tdp_table);
		return -ENOMEM;
	}

	memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);

Loading