Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 73ccd696 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-next-3.9' of git://people.freedesktop.org/~agd5f/linux into drm-next

Alex writes:
- CS ioctl cleanup and unification.  Unification of a lot of functionality
that was duplicated across multiple generates of hardware.
- Add support for Oland GPUs
- Deprecate UMS support.  Mesa and the ddx dropped support for UMS and
apparently very few people still use it since the UMS CS ioctl was broken
for several kernels and no one reported it.  It was fixed in 3.8/stable.
- Rework GPU reset.  Use the status registers to determine what blocks
to reset.  This better matches the recommended reset programming model.
This also allows us to properly reset blocks besides GFX and DMA.
- Switch the VM set page code to use an IB rather than the ring.  This
fixes overflow issues when doing large page table updates using a small
ring like DMA.
- Several small cleanups and bug fixes.

* 'drm-next-3.9' of git://people.freedesktop.org/~agd5f/linux: (38 commits)
  drm/radeon/dce6: fix display powergating
  drm/radeon: add Oland pci ids
  drm/radeon: radeon-asic updates for Oland
  drm/radeon: add ucode loading support for Oland
  drm/radeon: fill in gpu init for Oland
  drm/radeon: add Oland chip family
  drm/radeon: switch back to using the DMA ring for VM PT updates
  drm/radeon: use IBs for VM page table updates v2
  drm/radeon: don't reset the MC on IGPs/APUs
  drm/radeon: use the reset mask to determine if rings are hung
  drm/radeon: halt engines before disabling MC (si)
  drm/radeon: halt engines before disabling MC (cayman/TN)
  drm/radeon: halt engines before disabling MC (evergreen)
  drm/radeon: halt engines before disabling MC (6xx/7xx)
  drm/radeon: use status regs to determine what to reset (si)
  drm/radeon: use status regs to determine what to reset (cayman)
  drm/radeon: use status regs to determine what to reset (evergreen)
  drm/radeon: use status regs to determine what to reset (6xx/7xx)
  drm/radeon: rework GPU reset on cayman/TN
  drm/radeon: rework GPU reset on cayman/TN
  ...
parents ed914f69 0e3d50bf
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -96,6 +96,7 @@ config DRM_RADEON
        select DRM_TTM
	select POWER_SUPPLY
	select HWMON
	select BACKLIGHT_CLASS_DEVICE
	help
	  Choose this option if you have an ATI Radeon graphics card.  There
	  are both PCI and AGP versions.  You don't need to choose this to
+5 −28
Original line number Diff line number Diff line
config DRM_RADEON_KMS
	bool "Enable modesetting on radeon by default - NEW DRIVER"
config DRM_RADEON_UMS
	bool "Enable userspace modesetting on radeon (DEPRECATED)"
	depends on DRM_RADEON
	select BACKLIGHT_CLASS_DEVICE
	help
	  Choose this option if you want kernel modesetting enabled by default.
	  Choose this option if you still need userspace modesetting.

	  This is a completely new driver. It's only part of the existing drm
	  for compatibility reasons. It requires an entirely different graphics
	  stack above it and works very differently from the old drm stack.
	  i.e. don't enable this unless you know what you are doing it may
	  cause issues or bugs compared to the previous userspace driver stack.

	  When kernel modesetting is enabled the IOCTL of radeon/drm
	  driver are considered as invalid and an error message is printed
	  in the log and they return failure.

	  KMS enabled userspace will use new API to talk with the radeon/drm
	  driver. The new API provide functions to create/destroy/share/mmap
	  buffer object which are then managed by the kernel memory manager
	  (here TTM). In order to submit command to the GPU the userspace
	  provide a buffer holding the command stream, along this buffer
	  userspace have to provide a list of buffer object used by the
	  command stream. The kernel radeon driver will then place buffer
	  in GPU accessible memory and will update command stream to reflect
	  the position of the different buffers.

	  The kernel will also perform security check on command stream
	  provided by the user, we want to catch and forbid any illegal use
	  of the GPU such as DMA into random system memory or into memory
	  not owned by the process supplying the command stream.
	  Userspace modesetting is deprecated for quite some time now, so
	  enable this only if you have ancient versions of the DDX drivers.
+7 −3
Original line number Diff line number Diff line
@@ -56,8 +56,12 @@ $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h

$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h

radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
	radeon_irq.o r300_cmdbuf.o r600_cp.o
radeon-y := radeon_drv.o

# add UMS driver
radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
	radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o

# add KMS driver
radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
	radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
@@ -67,7 +71,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
	radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
	radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
	rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
	r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
	r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
	r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
	evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
	evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
+2 −4
Original line number Diff line number Diff line
@@ -252,8 +252,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
		radeon_crtc->enabled = true;
		/* adjust pm to dpms changes BEFORE enabling crtcs */
		radeon_pm_compute_clocks(rdev);
		if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
			atombios_powergate_crtc(crtc, ATOM_DISABLE);
		atombios_enable_crtc(crtc, ATOM_ENABLE);
		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
			atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
@@ -271,8 +269,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
			atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
		atombios_enable_crtc(crtc, ATOM_DISABLE);
		radeon_crtc->enabled = false;
		if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
			atombios_powergate_crtc(crtc, ATOM_ENABLE);
		/* adjust pm to dpms changes AFTER disabling crtcs */
		radeon_pm_compute_clocks(rdev);
		break;
@@ -1844,6 +1840,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
	int i;

	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
	if (ASIC_IS_DCE6(rdev))
		atombios_powergate_crtc(crtc, ATOM_ENABLE);

	for (i = 0; i < rdev->num_crtc; i++) {
		if (rdev->mode_info.crtcs[i] &&
+251 −102
Original line number Diff line number Diff line
@@ -2308,32 +2308,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
	return 0;
}

bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
{
	u32 srbm_status;
	u32 grbm_status;
	u32 grbm_status_se0, grbm_status_se1;

	srbm_status = RREG32(SRBM_STATUS);
	grbm_status = RREG32(GRBM_STATUS);
	grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
	grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
	if (!(grbm_status & GUI_ACTIVE)) {
		radeon_ring_lockup_update(ring);
		return false;
	}
	/* force CP activities */
	radeon_ring_force_activity(rdev, ring);
	return radeon_ring_test_lockup(rdev, ring);
}

static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
	u32 grbm_reset = 0;

	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
		return;

	dev_info(rdev->dev, "  GRBM_STATUS               = 0x%08X\n",
		RREG32(GRBM_STATUS));
	dev_info(rdev->dev, "  GRBM_STATUS_SE0           = 0x%08X\n",
@@ -2342,6 +2318,8 @@ static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
		RREG32(GRBM_STATUS_SE1));
	dev_info(rdev->dev, "  SRBM_STATUS               = 0x%08X\n",
		RREG32(SRBM_STATUS));
	dev_info(rdev->dev, "  SRBM_STATUS2              = 0x%08X\n",
		RREG32(SRBM_STATUS2));
	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
		RREG32(CP_STALLED_STAT1));
	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
@@ -2350,112 +2328,283 @@ static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
		RREG32(CP_BUSY_STAT));
	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
		RREG32(CP_STAT));
	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
		RREG32(DMA_STATUS_REG));
	if (rdev->family >= CHIP_CAYMAN) {
		dev_info(rdev->dev, "  R_00D834_DMA_STATUS_REG   = 0x%08X\n",
			 RREG32(DMA_STATUS_REG + 0x800));
	}
}

	/* Disable CP parsing/prefetching */
	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
bool evergreen_is_display_hung(struct radeon_device *rdev)
{
	u32 crtc_hung = 0;
	u32 crtc_status[6];
	u32 i, j, tmp;

	/* reset all the gfx blocks */
	grbm_reset = (SOFT_RESET_CP |
		      SOFT_RESET_CB |
		      SOFT_RESET_DB |
		      SOFT_RESET_PA |
		      SOFT_RESET_SC |
		      SOFT_RESET_SPI |
		      SOFT_RESET_SH |
		      SOFT_RESET_SX |
		      SOFT_RESET_TC |
		      SOFT_RESET_TA |
		      SOFT_RESET_VC |
		      SOFT_RESET_VGT);
	for (i = 0; i < rdev->num_crtc; i++) {
		if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
			crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
			crtc_hung |= (1 << i);
		}
	}

	dev_info(rdev->dev, "  GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
	WREG32(GRBM_SOFT_RESET, grbm_reset);
	(void)RREG32(GRBM_SOFT_RESET);
	udelay(50);
	WREG32(GRBM_SOFT_RESET, 0);
	(void)RREG32(GRBM_SOFT_RESET);
	for (j = 0; j < 10; j++) {
		for (i = 0; i < rdev->num_crtc; i++) {
			if (crtc_hung & (1 << i)) {
				tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
				if (tmp != crtc_status[i])
					crtc_hung &= ~(1 << i);
			}
		}
		if (crtc_hung == 0)
			return false;
		udelay(100);
	}

	dev_info(rdev->dev, "  GRBM_STATUS               = 0x%08X\n",
		RREG32(GRBM_STATUS));
	dev_info(rdev->dev, "  GRBM_STATUS_SE0           = 0x%08X\n",
		RREG32(GRBM_STATUS_SE0));
	dev_info(rdev->dev, "  GRBM_STATUS_SE1           = 0x%08X\n",
		RREG32(GRBM_STATUS_SE1));
	dev_info(rdev->dev, "  SRBM_STATUS               = 0x%08X\n",
		RREG32(SRBM_STATUS));
	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
		RREG32(CP_STALLED_STAT1));
	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
		RREG32(CP_STALLED_STAT2));
	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
		RREG32(CP_BUSY_STAT));
	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
		RREG32(CP_STAT));
	return true;
}

static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
{
	u32 reset_mask = 0;
	u32 tmp;

	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
		return;
	/* GRBM_STATUS */
	tmp = RREG32(GRBM_STATUS);
	if (tmp & (PA_BUSY | SC_BUSY |
		   SH_BUSY | SX_BUSY |
		   TA_BUSY | VGT_BUSY |
		   DB_BUSY | CB_BUSY |
		   SPI_BUSY | VGT_BUSY_NO_DMA))
		reset_mask |= RADEON_RESET_GFX;

	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
		RREG32(DMA_STATUS_REG));
	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
		   CP_BUSY | CP_COHERENCY_BUSY))
		reset_mask |= RADEON_RESET_CP;

	/* Disable DMA */
	tmp = RREG32(DMA_RB_CNTL);
	tmp &= ~DMA_RB_ENABLE;
	WREG32(DMA_RB_CNTL, tmp);
	if (tmp & GRBM_EE_BUSY)
		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;

	/* Reset dma */
	WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
	RREG32(SRBM_SOFT_RESET);
	udelay(50);
	WREG32(SRBM_SOFT_RESET, 0);
	/* DMA_STATUS_REG */
	tmp = RREG32(DMA_STATUS_REG);
	if (!(tmp & DMA_IDLE))
		reset_mask |= RADEON_RESET_DMA;

	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
		RREG32(DMA_STATUS_REG));
	/* SRBM_STATUS2 */
	tmp = RREG32(SRBM_STATUS2);
	if (tmp & DMA_BUSY)
		reset_mask |= RADEON_RESET_DMA;

	/* SRBM_STATUS */
	tmp = RREG32(SRBM_STATUS);
	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
		reset_mask |= RADEON_RESET_RLC;

	if (tmp & IH_BUSY)
		reset_mask |= RADEON_RESET_IH;

	if (tmp & SEM_BUSY)
		reset_mask |= RADEON_RESET_SEM;

	if (tmp & GRBM_RQ_PENDING)
		reset_mask |= RADEON_RESET_GRBM;

	if (tmp & VMC_BUSY)
		reset_mask |= RADEON_RESET_VMC;

	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
		   MCC_BUSY | MCD_BUSY))
		reset_mask |= RADEON_RESET_MC;

	if (evergreen_is_display_hung(rdev))
		reset_mask |= RADEON_RESET_DISPLAY;

	/* VM_L2_STATUS */
	tmp = RREG32(VM_L2_STATUS);
	if (tmp & L2_BUSY)
		reset_mask |= RADEON_RESET_VMC;

	return reset_mask;
}

static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
	struct evergreen_mc_save save;

	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
		reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);

	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
		reset_mask &= ~RADEON_RESET_DMA;
	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
	u32 tmp;

	if (reset_mask == 0)
		return 0;
		return;

	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);

	evergreen_print_gpu_status_regs(rdev);

	/* Disable CP parsing/prefetching */
	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);

	if (reset_mask & RADEON_RESET_DMA) {
		/* Disable DMA */
		tmp = RREG32(DMA_RB_CNTL);
		tmp &= ~DMA_RB_ENABLE;
		WREG32(DMA_RB_CNTL, tmp);
	}

	udelay(50);

	evergreen_mc_stop(rdev, &save);
	if (evergreen_mc_wait_for_idle(rdev)) {
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
	}

	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
		evergreen_gpu_soft_reset_gfx(rdev);
	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
		grbm_soft_reset |= SOFT_RESET_DB |
			SOFT_RESET_CB |
			SOFT_RESET_PA |
			SOFT_RESET_SC |
			SOFT_RESET_SPI |
			SOFT_RESET_SX |
			SOFT_RESET_SH |
			SOFT_RESET_TC |
			SOFT_RESET_TA |
			SOFT_RESET_VC |
			SOFT_RESET_VGT;
	}

	if (reset_mask & RADEON_RESET_CP) {
		grbm_soft_reset |= SOFT_RESET_CP |
			SOFT_RESET_VGT;

		srbm_soft_reset |= SOFT_RESET_GRBM;
	}

	if (reset_mask & RADEON_RESET_DMA)
		evergreen_gpu_soft_reset_dma(rdev);
		srbm_soft_reset |= SOFT_RESET_DMA;

	if (reset_mask & RADEON_RESET_DISPLAY)
		srbm_soft_reset |= SOFT_RESET_DC;

	if (reset_mask & RADEON_RESET_RLC)
		srbm_soft_reset |= SOFT_RESET_RLC;

	if (reset_mask & RADEON_RESET_SEM)
		srbm_soft_reset |= SOFT_RESET_SEM;

	if (reset_mask & RADEON_RESET_IH)
		srbm_soft_reset |= SOFT_RESET_IH;

	if (reset_mask & RADEON_RESET_GRBM)
		srbm_soft_reset |= SOFT_RESET_GRBM;

	if (reset_mask & RADEON_RESET_VMC)
		srbm_soft_reset |= SOFT_RESET_VMC;

	if (!(rdev->flags & RADEON_IS_IGP)) {
		if (reset_mask & RADEON_RESET_MC)
			srbm_soft_reset |= SOFT_RESET_MC;
	}

	if (grbm_soft_reset) {
		tmp = RREG32(GRBM_SOFT_RESET);
		tmp |= grbm_soft_reset;
		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
		WREG32(GRBM_SOFT_RESET, tmp);
		tmp = RREG32(GRBM_SOFT_RESET);

		udelay(50);

		tmp &= ~grbm_soft_reset;
		WREG32(GRBM_SOFT_RESET, tmp);
		tmp = RREG32(GRBM_SOFT_RESET);
	}

	if (srbm_soft_reset) {
		tmp = RREG32(SRBM_SOFT_RESET);
		tmp |= srbm_soft_reset;
		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
		WREG32(SRBM_SOFT_RESET, tmp);
		tmp = RREG32(SRBM_SOFT_RESET);

		udelay(50);

		tmp &= ~srbm_soft_reset;
		WREG32(SRBM_SOFT_RESET, tmp);
		tmp = RREG32(SRBM_SOFT_RESET);
	}

	/* Wait a little for things to settle down */
	udelay(50);

	evergreen_mc_resume(rdev, &save);
	return 0;
	udelay(50);

	evergreen_print_gpu_status_regs(rdev);
}

int evergreen_asic_reset(struct radeon_device *rdev)
{
	return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
	u32 reset_mask;

	reset_mask = evergreen_gpu_check_soft_reset(rdev);

	if (reset_mask)
		r600_set_bios_scratch_engine_hung(rdev, true);

	evergreen_gpu_soft_reset(rdev, reset_mask);

	reset_mask = evergreen_gpu_check_soft_reset(rdev);

	if (!reset_mask)
		r600_set_bios_scratch_engine_hung(rdev, false);

	return 0;
}

/**
 * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
 *
 * @rdev: radeon_device pointer
 * @ring: radeon_ring structure holding ring information
 *
 * Check if the GFX engine is locked up.
 * Returns true if the engine appears to be locked up, false if not.
 */
bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
	u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);

	if (!(reset_mask & (RADEON_RESET_GFX |
			    RADEON_RESET_COMPUTE |
					       RADEON_RESET_DMA));
			    RADEON_RESET_CP))) {
		radeon_ring_lockup_update(ring);
		return false;
	}
	/* force CP activities */
	radeon_ring_force_activity(rdev, ring);
	return radeon_ring_test_lockup(rdev, ring);
}

/**
 * evergreen_dma_is_lockup - Check if the DMA engine is locked up
 *
 * @rdev: radeon_device pointer
 * @ring: radeon_ring structure holding ring information
 *
 * Check if the async DMA engine is locked up.
 * Returns true if the engine appears to be locked up, false if not.
 */
bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
	u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);

	if (!(reset_mask & RADEON_RESET_DMA)) {
		radeon_ring_lockup_update(ring);
		return false;
	}
	/* force ring activities */
	radeon_ring_force_activity(rdev, ring);
	return radeon_ring_test_lockup(rdev, ring);
}

/* Interrupts */
@@ -3280,14 +3429,14 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
	struct radeon_ring *ring = &rdev->ring[fence->ring];
	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
	/* write the fence */
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
	radeon_ring_write(ring, addr & 0xfffffffc);
	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
	radeon_ring_write(ring, fence->seq);
	/* generate an interrupt */
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
	/* flush HDP */
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
	radeon_ring_write(ring, 1);
}
@@ -3310,7 +3459,7 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
		while ((next_rptr & 7) != 5)
			next_rptr++;
		next_rptr += 3;
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
		radeon_ring_write(ring, next_rptr);
@@ -3320,8 +3469,8 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
	 * Pad as necessary with NOPs.
	 */
	while ((ring->wptr & 7) != 5)
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));

@@ -3380,7 +3529,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
		if (cur_size_in_dw > 0xFFFFF)
			cur_size_in_dw = 0xFFFFF;
		size_in_dw -= cur_size_in_dw;
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
		radeon_ring_write(ring, dst_offset & 0xfffffffc);
		radeon_ring_write(ring, src_offset & 0xfffffffc);
		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
@@ -3488,7 +3637,7 @@ static int evergreen_startup(struct radeon_device *rdev)
	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
			     DMA_RB_RPTR, DMA_RB_WPTR,
			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
	if (r)
		return r;

Loading