Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b1d778b9 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-fixes-3.8' of git://people.freedesktop.org/~agd5f/linux into drm-next

Misc fixes for reset and new packets for userspace usage.

* 'drm-fixes-3.8' of git://people.freedesktop.org/~agd5f/linux:
  drm/radeon: add WAIT_UNTIL to evergreen VM safe reg list
  drm/radeon: add support for MEM_WRITE packet
  drm/radeon: restore modeset late in GPU reset path
  drm/radeon: avoid deadlock in pm path when waiting for fence
  drm/radeon: don't leave fence blocked process on failed GPU reset
parents 344f9067 668bbc81
Loading
Loading
Loading
Loading
+30 −0
Original line number Diff line number Diff line
@@ -2654,6 +2654,35 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
			ib[idx+4] = upper_32_bits(offset) & 0xff;
		}
		break;
	case PACKET3_MEM_WRITE:
	{
		u64 offset;

		if (pkt->count != 3) {
			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
			return -EINVAL;
		}
		r = evergreen_cs_packet_next_reloc(p, &reloc);
		if (r) {
			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
			return -EINVAL;
		}
		offset = radeon_get_ib_value(p, idx+0);
		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
		if (offset & 0x7) {
			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
			return -EINVAL;
		}
		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
			DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
				  offset + 8, radeon_bo_size(reloc->robj));
			return -EINVAL;
		}
		offset += reloc->lobj.gpu_offset;
		ib[idx+0] = offset;
		ib[idx+1] = upper_32_bits(offset) & 0xff;
		break;
	}
	case PACKET3_COPY_DW:
		if (pkt->count != 4) {
			DRM_ERROR("bad COPY_DW (invalid count)\n");
@@ -3287,6 +3316,7 @@ static bool evergreen_vm_reg_valid(u32 reg)

	/* check config regs */
	switch (reg) {
	case WAIT_UNTIL:
	case GRBM_GFX_INDEX:
	case CP_STRMOUT_CNTL:
	case CP_COHER_CNTL:
+29 −0
Original line number Diff line number Diff line
@@ -2294,6 +2294,35 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
			ib[idx+4] = upper_32_bits(offset) & 0xff;
		}
		break;
	case PACKET3_MEM_WRITE:
	{
		u64 offset;

		if (pkt->count != 3) {
			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
			return -EINVAL;
		}
		r = r600_cs_packet_next_reloc(p, &reloc);
		if (r) {
			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
			return -EINVAL;
		}
		offset = radeon_get_ib_value(p, idx+0);
		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
		if (offset & 0x7) {
			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
			return -EINVAL;
		}
		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
			DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
				  offset + 8, radeon_bo_size(reloc->robj));
			return -EINVAL;
		}
		offset += reloc->lobj.gpu_offset;
		ib[idx+0] = offset;
		ib[idx+1] = upper_32_bits(offset) & 0xff;
		break;
	}
	case PACKET3_COPY_DW:
		if (pkt->count != 4) {
			DRM_ERROR("bad COPY_DW (invalid count)\n");
+2 −1
Original line number Diff line number Diff line
@@ -225,12 +225,13 @@ struct radeon_fence {
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
void radeon_fence_driver_force_completion(struct radeon_device *rdev);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
			  struct radeon_fence **fences,
			  bool intr);
+14 −3
Original line number Diff line number Diff line
@@ -1164,6 +1164,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
	struct drm_crtc *crtc;
	struct drm_connector *connector;
	int i, r;
	bool force_completion = false;

	if (dev == NULL || dev->dev_private == NULL) {
		return -ENODEV;
@@ -1206,8 +1207,16 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)

	mutex_lock(&rdev->ring_lock);
	/* wait for gpu to finish processing current batch */
	for (i = 0; i < RADEON_NUM_RINGS; i++)
		radeon_fence_wait_empty_locked(rdev, i);
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
		r = radeon_fence_wait_empty_locked(rdev, i);
		if (r) {
			/* delay GPU reset to resume */
			force_completion = true;
		}
	}
	if (force_completion) {
		radeon_fence_driver_force_completion(rdev);
	}
	mutex_unlock(&rdev->ring_lock);

	radeon_save_bios_scratch_regs(rdev);
@@ -1338,7 +1347,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
	}

	radeon_restore_bios_scratch_regs(rdev);
	drm_helper_resume_force_mode(rdev->ddev);

	if (!r) {
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1358,11 +1366,14 @@ int radeon_gpu_reset(struct radeon_device *rdev)
			}
		}
	} else {
		radeon_fence_driver_force_completion(rdev);
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
			kfree(ring_data[i]);
		}
	}

	drm_helper_resume_force_mode(rdev->ddev);

	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
	if (r) {
		/* bad news, how to tell it to userspace ? */
+2 −1
Original line number Diff line number Diff line
@@ -68,9 +68,10 @@
 *   2.25.0 - eg+: new info request for num SE and num SH
 *   2.26.0 - r600-eg: fix htile size computation
 *   2.27.0 - r600-SI: Add CS ioctl support for async DMA
 *   2.28.0 - r600-eg: Add MEM_WRITE packet support
 */
#define KMS_DRIVER_MAJOR	2
#define KMS_DRIVER_MINOR	27
#define KMS_DRIVER_MINOR	28
#define KMS_DRIVER_PATCHLEVEL	0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
Loading