Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4604202c authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux into drm-next

some amd fixes
* 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux:
  drm/radeon/mst: cleanup code indentation
  drm/radeon/mst: fix regression in lane/link handling.
  drm/amdgpu: add invalidate_page callback for userptrs
  drm/amdgpu: Revert "remove the userptr rmn->lock"
  drm/amdgpu: clean up path handling for powerplay
  drm/amd/powerplay: fix memory leak of tdp_table
parents 17efca93 1135035d
Loading
Loading
Loading
Loading
+86 −34
Original line number Original line Diff line number Diff line
@@ -48,7 +48,8 @@ struct amdgpu_mn {
	/* protected by adev->mn_lock */
	/* protected by adev->mn_lock */
	struct hlist_node	node;
	struct hlist_node	node;


	/* objects protected by mm->mmap_sem */
	/* objects protected by lock */
	struct mutex		lock;
	struct rb_root		objects;
	struct rb_root		objects;
};
};


@@ -72,7 +73,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
	struct amdgpu_bo *bo, *next_bo;
	struct amdgpu_bo *bo, *next_bo;


	mutex_lock(&adev->mn_lock);
	mutex_lock(&adev->mn_lock);
	down_write(&rmn->mm->mmap_sem);
	mutex_lock(&rmn->lock);
	hash_del(&rmn->node);
	hash_del(&rmn->node);
	rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
	rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
					     it.rb) {
					     it.rb) {
@@ -82,7 +83,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
		}
		}
		kfree(node);
		kfree(node);
	}
	}
	up_write(&rmn->mm->mmap_sem);
	mutex_unlock(&rmn->lock);
	mutex_unlock(&adev->mn_lock);
	mutex_unlock(&adev->mn_lock);
	mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
	mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
	kfree(rmn);
	kfree(rmn);
@@ -105,40 +106,23 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
}
}


/**
/**
 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
 * amdgpu_mn_invalidate_node - unmap all BOs of a node
 *
 *
 * @mn: our notifier
 * @node: the node with the BOs to unmap
 * @mn: the mm this callback is about
 * @start: start of updated range
 * @end: end of updated range
 *
 *
 * We block for all BOs between start and end to be idle and
 * We block for all BOs and unmap them by move them
 * unmap them by move them into system domain again.
 * into system domain again.
 */
 */
static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
					     struct mm_struct *mm,
				      unsigned long start,
				      unsigned long start,
				      unsigned long end)
				      unsigned long end)
{
{
	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
	struct interval_tree_node *it;

	/* notification is exclusive, but interval is inclusive */
	end -= 1;

	it = interval_tree_iter_first(&rmn->objects, start, end);
	while (it) {
		struct amdgpu_mn_node *node;
	struct amdgpu_bo *bo;
	struct amdgpu_bo *bo;
	long r;
	long r;


		node = container_of(it, struct amdgpu_mn_node, it);
		it = interval_tree_iter_next(it, start, end);

	list_for_each_entry(bo, &node->bos, mn_list) {
	list_for_each_entry(bo, &node->bos, mn_list) {


			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
		if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
							  end))
			continue;
			continue;


		r = amdgpu_bo_reserve(bo, true);
		r = amdgpu_bo_reserve(bo, true);
@@ -160,10 +144,77 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
		amdgpu_bo_unreserve(bo);
		amdgpu_bo_unreserve(bo);
	}
	}
}
}

/**
 * amdgpu_mn_invalidate_page - callback to notify about mm change
 *
 * @mn: our notifier
 * @mn: the mm this callback is about
 * @address: address of invalidate page
 *
 * Invalidation of a single page. Blocks for all BOs mapping it
 * and unmap them by move them into system domain again.
 */
static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
				      struct mm_struct *mm,
				      unsigned long address)
{
	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
	struct interval_tree_node *it;

	mutex_lock(&rmn->lock);

	it = interval_tree_iter_first(&rmn->objects, address, address);
	if (it) {
		struct amdgpu_mn_node *node;

		node = container_of(it, struct amdgpu_mn_node, it);
		amdgpu_mn_invalidate_node(node, address, address);
	}

	mutex_unlock(&rmn->lock);
}

/**
 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
 *
 * @mn: our notifier
 * @mn: the mm this callback is about
 * @start: start of updated range
 * @end: end of updated range
 *
 * We block for all BOs between start and end to be idle and
 * unmap them by move them into system domain again.
 */
static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
					     struct mm_struct *mm,
					     unsigned long start,
					     unsigned long end)
{
	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
	struct interval_tree_node *it;

	/* notification is exclusive, but interval is inclusive */
	end -= 1;

	mutex_lock(&rmn->lock);

	it = interval_tree_iter_first(&rmn->objects, start, end);
	while (it) {
		struct amdgpu_mn_node *node;

		node = container_of(it, struct amdgpu_mn_node, it);
		it = interval_tree_iter_next(it, start, end);

		amdgpu_mn_invalidate_node(node, start, end);
	}

	mutex_unlock(&rmn->lock);
}
}


static const struct mmu_notifier_ops amdgpu_mn_ops = {
static const struct mmu_notifier_ops amdgpu_mn_ops = {
	.release = amdgpu_mn_release,
	.release = amdgpu_mn_release,
	.invalidate_page = amdgpu_mn_invalidate_page,
	.invalidate_range_start = amdgpu_mn_invalidate_range_start,
	.invalidate_range_start = amdgpu_mn_invalidate_range_start,
};
};


@@ -196,6 +247,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
	rmn->adev = adev;
	rmn->adev = adev;
	rmn->mm = mm;
	rmn->mm = mm;
	rmn->mn.ops = &amdgpu_mn_ops;
	rmn->mn.ops = &amdgpu_mn_ops;
	mutex_init(&rmn->lock);
	rmn->objects = RB_ROOT;
	rmn->objects = RB_ROOT;


	r = __mmu_notifier_register(&rmn->mn, mm);
	r = __mmu_notifier_register(&rmn->mn, mm);
@@ -242,7 +294,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)


	INIT_LIST_HEAD(&bos);
	INIT_LIST_HEAD(&bos);


	down_write(&rmn->mm->mmap_sem);
	mutex_lock(&rmn->lock);


	while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
	while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
		kfree(node);
		kfree(node);
@@ -256,7 +308,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
	if (!node) {
	if (!node) {
		node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
		node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
		if (!node) {
		if (!node) {
			up_write(&rmn->mm->mmap_sem);
			mutex_unlock(&rmn->lock);
			return -ENOMEM;
			return -ENOMEM;
		}
		}
	}
	}
@@ -271,7 +323,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)


	interval_tree_insert(&node->it, &rmn->objects);
	interval_tree_insert(&node->it, &rmn->objects);


	up_write(&rmn->mm->mmap_sem);
	mutex_unlock(&rmn->lock);


	return 0;
	return 0;
}
}
@@ -297,7 +349,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
		return;
		return;
	}
	}


	down_write(&rmn->mm->mmap_sem);
	mutex_lock(&rmn->lock);


	/* save the next list entry for later */
	/* save the next list entry for later */
	head = bo->mn_list.next;
	head = bo->mn_list.next;
@@ -312,6 +364,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
		kfree(node);
		kfree(node);
	}
	}


	up_write(&rmn->mm->mmap_sem);
	mutex_unlock(&rmn->lock);
	mutex_unlock(&adev->mn_lock);
	mutex_unlock(&adev->mn_lock);
}
}
+7 −7
Original line number Original line Diff line number Diff line


subdir-ccflags-y += -Iinclude/drm  \
subdir-ccflags-y += -Iinclude/drm  \
		-Idrivers/gpu/drm/amd/powerplay/inc/  \
		-I$(FULL_AMD_PATH)/powerplay/inc/  \
		-Idrivers/gpu/drm/amd/include/asic_reg  \
		-I$(FULL_AMD_PATH)/include/asic_reg  \
		-Idrivers/gpu/drm/amd/include  \
		-I$(FULL_AMD_PATH)/include  \
		-Idrivers/gpu/drm/amd/powerplay/smumgr\
		-I$(FULL_AMD_PATH)/powerplay/smumgr\
		-Idrivers/gpu/drm/amd/powerplay/hwmgr \
		-I$(FULL_AMD_PATH)/powerplay/hwmgr \
		-Idrivers/gpu/drm/amd/powerplay/eventmgr
		-I$(FULL_AMD_PATH)/powerplay/eventmgr


AMD_PP_PATH = ../powerplay
AMD_PP_PATH = ../powerplay


PP_LIBS = smumgr hwmgr eventmgr
PP_LIBS = smumgr hwmgr eventmgr


AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix drivers/gpu/drm/amd/powerplay/,$(PP_LIBS)))
AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS)))


include $(AMD_POWERPLAY)
include $(AMD_POWERPLAY)


+3 −1
Original line number Original line Diff line number Diff line
@@ -512,8 +512,10 @@ static int get_cac_tdp_table(


	hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);
	hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);


	if (NULL == hwmgr->dyn_state.cac_dtp_table)
	if (NULL == hwmgr->dyn_state.cac_dtp_table) {
		kfree(tdp_table);
		return -ENOMEM;
		return -ENOMEM;
	}


	memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);
	memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);


+6 −16
Original line number Original line Diff line number Diff line
@@ -510,6 +510,7 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
{
{
	struct radeon_encoder_mst *mst_enc;
	struct radeon_encoder_mst *mst_enc;
	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
	struct radeon_connector_atom_dig *dig_connector;
	int bpp = 24;
	int bpp = 24;


	mst_enc = radeon_encoder->enc_priv;
	mst_enc = radeon_encoder->enc_priv;
@@ -523,22 +524,11 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,




	drm_mode_set_crtcinfo(adjusted_mode, 0);
	drm_mode_set_crtcinfo(adjusted_mode, 0);
	{
	  struct radeon_connector_atom_dig *dig_connector;
	  int ret;

	dig_connector = mst_enc->connector->con_priv;
	dig_connector = mst_enc->connector->con_priv;
	  ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
	dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
					     dig_connector->dpcd, adjusted_mode->clock,
	dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
					     &dig_connector->dp_lane_count,
					     &dig_connector->dp_clock);
	  if (ret) {
		  dig_connector->dp_lane_count = 0;
		  dig_connector->dp_clock = 0;
	  }
	DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
	DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
		      dig_connector->dp_lane_count, dig_connector->dp_clock);
		      dig_connector->dp_lane_count, dig_connector->dp_clock);
	}
	return true;
	return true;
}
}