Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 179ca3bb authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-fixes-4.8' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

radeon and amdgpu fixes for 4.8.  Nothing major:
- fix a performance regression due to the LRU changes in 4.7
- 32 bit fixes
- fix a PLL regression
- misc bug fixes

* 'drm-fixes-4.8' of git://people.freedesktop.org/~agd5f/linux:
  drm/amdgpu: skip TV/CV in display parsing
  drm/amdgpu: avoid a possible array overflow
  drm/amdgpu: fix lru size grouping v2
  drm/amdgpu: fix timeout value check in amd_sched_job_recovery
  drm/amdgpu: fix sdma_v2_4_ring_test_ib
  drm/amdgpu: fix amdgpu_move_blit on 32bit systems
  drm/radeon: fix radeon_move_blit on 32bit systems
  drm/radeon: only apply the SS fractional workaround to RS[78]80
parents 30bffd1b 611a1507
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -426,6 +426,8 @@ struct amdgpu_mman {

	/* custom LRU management */
	struct amdgpu_mman_lru			log2_size[AMDGPU_TTM_LRU_SIZE];
	/* guard for log2_size array, don't add anything in between */
	struct amdgpu_mman_lru			guard;
};

int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+13 −0
Original line number Diff line number Diff line
@@ -321,6 +321,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
			    (le16_to_cpu(path->usConnObjectId) &
			     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;

			/* Skip TV/CV support */
			if ((le16_to_cpu(path->usDeviceTag) ==
			     ATOM_DEVICE_TV1_SUPPORT) ||
			    (le16_to_cpu(path->usDeviceTag) ==
			     ATOM_DEVICE_CV_SUPPORT))
				continue;

			if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
				DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
					  con_obj_id, le16_to_cpu(path->usDeviceTag));
				continue;
			}

			connector_type =
				object_connector_convert[con_obj_id];
			connector_object_id = con_obj_id;
+10 −2
Original line number Diff line number Diff line
@@ -251,8 +251,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,

	adev = amdgpu_get_adev(bo->bdev);
	ring = adev->mman.buffer_funcs_ring;
	old_start = old_mem->start << PAGE_SHIFT;
	new_start = new_mem->start << PAGE_SHIFT;
	old_start = (u64)old_mem->start << PAGE_SHIFT;
	new_start = (u64)new_mem->start << PAGE_SHIFT;

	switch (old_mem->mem_type) {
	case TTM_PL_VRAM:
@@ -950,6 +950,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
	struct list_head *res = lru->lru[tbo->mem.mem_type];

	lru->lru[tbo->mem.mem_type] = &tbo->lru;
	while ((++lru)->lru[tbo->mem.mem_type] == res)
		lru->lru[tbo->mem.mem_type] = &tbo->lru;

	return res;
}
@@ -960,6 +962,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
	struct list_head *res = lru->swap_lru;

	lru->swap_lru = &tbo->swap;
	while ((++lru)->swap_lru == res)
		lru->swap_lru = &tbo->swap;

	return res;
}
@@ -1011,6 +1015,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
		lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
	}

	for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
		adev->mman.guard.lru[j] = NULL;
	adev->mman.guard.swap_lru = NULL;

	adev->mman.initialized = true;
	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
				adev->mc.real_vram_size >> PAGE_SHIFT);
+1 −1
Original line number Diff line number Diff line
@@ -714,7 +714,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
		DRM_ERROR("amdgpu: IB test timed out\n");
		r = -ETIMEDOUT;
		goto err1;
	} else if (r) {
	} else if (r < 0) {
		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
		goto err1;
	}
+1 −1
Original line number Diff line number Diff line
@@ -405,7 +405,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
	spin_lock(&sched->job_list_lock);
	s_job = list_first_entry_or_null(&sched->ring_mirror_list,
					 struct amd_sched_job, node);
	if (s_job)
	if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
		schedule_delayed_work(&s_job->work_tdr, sched->timeout);

	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
Loading