Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 25d0d91a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-fixes-for-4.8-rc4' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "A bunch of fixes covering i915, amdgpu, one tegra and some core DRM
  ones.  Nothing too strange at this point"

* tag 'drm-fixes-for-4.8-rc4' of git://people.freedesktop.org/~airlied/linux: (21 commits)
  drm/atomic: Don't potentially reset color_mgmt_changed on successive property updates.
  drm: Protect fb_defio in drivers with CONFIG_KMS_FBDEV_EMULATION
  drm/amdgpu: skip TV/CV in display parsing
  drm/amdgpu: avoid a possible array overflow
  drm/amdgpu: fix lru size grouping v2
  drm/tegra: dsi: Enhance runtime power management
  drm/i915: Fix botched merge that downgrades CSR versions.
  drm/i915/skl: Ensure pipes with changed wms get added to the state
  drm/i915/gen9: Only copy WM results for changed pipes to skl_hw
  drm/i915/skl: Add support for the SAGV, fix underrun hangs
  drm/i915/gen6+: Interpret mailbox error flags
  drm/i915: Reattach comment, complete type specification
  drm/i915: Unconditionally flush any chipset buffers before execbuf
  drm/i915/gen9: Drop invalid WARN() during data rate calculation
  drm/i915/gen9: Initialize intel_state->active_crtcs during WM sanitization (v2)
  drm: Reject page_flip for !DRIVER_MODESET
  drm/amdgpu: fix timeout value check in amd_sched_job_recovery
  drm/amdgpu: fix sdma_v2_4_ring_test_ib
  drm/amdgpu: fix amdgpu_move_blit on 32bit systems
  drm/radeon: fix radeon_move_blit on 32bit systems
  ...
parents 908e373f add1fa75
Loading
Loading
Loading
Loading
+2 −0
Original line number Original line Diff line number Diff line
@@ -426,6 +426,8 @@ struct amdgpu_mman {


	/* custom LRU management */
	/* custom LRU management */
	struct amdgpu_mman_lru			log2_size[AMDGPU_TTM_LRU_SIZE];
	struct amdgpu_mman_lru			log2_size[AMDGPU_TTM_LRU_SIZE];
	/* guard for log2_size array, don't add anything in between */
	struct amdgpu_mman_lru			guard;
};
};


int amdgpu_copy_buffer(struct amdgpu_ring *ring,
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+13 −0
Original line number Original line Diff line number Diff line
@@ -321,6 +321,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
			    (le16_to_cpu(path->usConnObjectId) &
			    (le16_to_cpu(path->usConnObjectId) &
			     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
			     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;


			/* Skip TV/CV support */
			if ((le16_to_cpu(path->usDeviceTag) ==
			     ATOM_DEVICE_TV1_SUPPORT) ||
			    (le16_to_cpu(path->usDeviceTag) ==
			     ATOM_DEVICE_CV_SUPPORT))
				continue;

			if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
				DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
					  con_obj_id, le16_to_cpu(path->usDeviceTag));
				continue;
			}

			connector_type =
			connector_type =
				object_connector_convert[con_obj_id];
				object_connector_convert[con_obj_id];
			connector_object_id = con_obj_id;
			connector_object_id = con_obj_id;
+10 −2
Original line number Original line Diff line number Diff line
@@ -251,8 +251,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,


	adev = amdgpu_get_adev(bo->bdev);
	adev = amdgpu_get_adev(bo->bdev);
	ring = adev->mman.buffer_funcs_ring;
	ring = adev->mman.buffer_funcs_ring;
	old_start = old_mem->start << PAGE_SHIFT;
	old_start = (u64)old_mem->start << PAGE_SHIFT;
	new_start = new_mem->start << PAGE_SHIFT;
	new_start = (u64)new_mem->start << PAGE_SHIFT;


	switch (old_mem->mem_type) {
	switch (old_mem->mem_type) {
	case TTM_PL_VRAM:
	case TTM_PL_VRAM:
@@ -950,6 +950,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
	struct list_head *res = lru->lru[tbo->mem.mem_type];
	struct list_head *res = lru->lru[tbo->mem.mem_type];


	lru->lru[tbo->mem.mem_type] = &tbo->lru;
	lru->lru[tbo->mem.mem_type] = &tbo->lru;
	while ((++lru)->lru[tbo->mem.mem_type] == res)
		lru->lru[tbo->mem.mem_type] = &tbo->lru;


	return res;
	return res;
}
}
@@ -960,6 +962,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
	struct list_head *res = lru->swap_lru;
	struct list_head *res = lru->swap_lru;


	lru->swap_lru = &tbo->swap;
	lru->swap_lru = &tbo->swap;
	while ((++lru)->swap_lru == res)
		lru->swap_lru = &tbo->swap;


	return res;
	return res;
}
}
@@ -1011,6 +1015,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
		lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
		lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
	}
	}


	for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
		adev->mman.guard.lru[j] = NULL;
	adev->mman.guard.swap_lru = NULL;

	adev->mman.initialized = true;
	adev->mman.initialized = true;
	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
				adev->mc.real_vram_size >> PAGE_SHIFT);
				adev->mc.real_vram_size >> PAGE_SHIFT);
+1 −1
Original line number Original line Diff line number Diff line
@@ -714,7 +714,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
		DRM_ERROR("amdgpu: IB test timed out\n");
		DRM_ERROR("amdgpu: IB test timed out\n");
		r = -ETIMEDOUT;
		r = -ETIMEDOUT;
		goto err1;
		goto err1;
	} else if (r) {
	} else if (r < 0) {
		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
		goto err1;
		goto err1;
	}
	}
+1 −1
Original line number Original line Diff line number Diff line
@@ -405,7 +405,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
	spin_lock(&sched->job_list_lock);
	spin_lock(&sched->job_list_lock);
	s_job = list_first_entry_or_null(&sched->ring_mirror_list,
	s_job = list_first_entry_or_null(&sched->ring_mirror_list,
					 struct amd_sched_job, node);
					 struct amd_sched_job, node);
	if (s_job)
	if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
		schedule_delayed_work(&s_job->work_tdr, sched->timeout);
		schedule_delayed_work(&s_job->work_tdr, sched->timeout);


	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
Loading