Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7b1f2485 authored by Christian König's avatar Christian König Committed by Dave Airlie
Browse files

drm/radeon: make all functions work with multiple rings.



Give all asic and radeon_ring_* functions a
radeon_cp parameter, so they know the ring to work with.

Signed-off-by: default avatarChristian König <deathsimple@vodafone.de>
Reviewed-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 15d3332f
Loading
Loading
Loading
Loading
+65 −58
Original line number Diff line number Diff line
@@ -1311,18 +1311,20 @@ void evergreen_mc_program(struct radeon_device *rdev)
 */
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
	struct radeon_cp *cp = &rdev->cp;

	/* set to DX10/11 mode */
	radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
	radeon_ring_write(rdev, 1);
	radeon_ring_write(cp, PACKET3(PACKET3_MODE_CONTROL, 0));
	radeon_ring_write(cp, 1);
	/* FIXME: implement */
	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
	radeon_ring_write(rdev,
	radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
	radeon_ring_write(cp,
#ifdef __BIG_ENDIAN
			  (2 << 0) |
#endif
			  (ib->gpu_addr & 0xFFFFFFFC));
	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
	radeon_ring_write(rdev, ib->length_dw);
	radeon_ring_write(cp, upper_32_bits(ib->gpu_addr) & 0xFF);
	radeon_ring_write(cp, ib->length_dw);
}


@@ -1360,71 +1362,73 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)

static int evergreen_cp_start(struct radeon_device *rdev)
{
	struct radeon_cp *cp = &rdev->cp;
	int r, i;
	uint32_t cp_me;

	r = radeon_ring_lock(rdev, 7);
	r = radeon_ring_lock(rdev, cp, 7);
	if (r) {
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
		return r;
	}
	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
	radeon_ring_write(rdev, 0x1);
	radeon_ring_write(rdev, 0x0);
	radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, 0);
	radeon_ring_unlock_commit(rdev);
	radeon_ring_write(cp, PACKET3(PACKET3_ME_INITIALIZE, 5));
	radeon_ring_write(cp, 0x1);
	radeon_ring_write(cp, 0x0);
	radeon_ring_write(cp, rdev->config.evergreen.max_hw_contexts - 1);
	radeon_ring_write(cp, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, 0);
	radeon_ring_unlock_commit(rdev, cp);

	cp_me = 0xff;
	WREG32(CP_ME_CNTL, cp_me);

	r = radeon_ring_lock(rdev, evergreen_default_size + 19);
	r = radeon_ring_lock(rdev, cp, evergreen_default_size + 19);
	if (r) {
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
		return r;
	}

	/* setup clear context state */
	radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
	radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
	radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
	radeon_ring_write(cp, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);

	for (i = 0; i < evergreen_default_size; i++)
		radeon_ring_write(rdev, evergreen_default_state[i]);
		radeon_ring_write(cp, evergreen_default_state[i]);

	radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
	radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
	radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
	radeon_ring_write(cp, PACKET3_PREAMBLE_END_CLEAR_STATE);

	/* set clear context state */
	radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
	radeon_ring_write(rdev, 0);
	radeon_ring_write(cp, PACKET3(PACKET3_CLEAR_STATE, 0));
	radeon_ring_write(cp, 0);

	/* SQ_VTX_BASE_VTX_LOC */
	radeon_ring_write(rdev, 0xc0026f00);
	radeon_ring_write(rdev, 0x00000000);
	radeon_ring_write(rdev, 0x00000000);
	radeon_ring_write(rdev, 0x00000000);
	radeon_ring_write(cp, 0xc0026f00);
	radeon_ring_write(cp, 0x00000000);
	radeon_ring_write(cp, 0x00000000);
	radeon_ring_write(cp, 0x00000000);

	/* Clear consts */
	radeon_ring_write(rdev, 0xc0036f00);
	radeon_ring_write(rdev, 0x00000bc4);
	radeon_ring_write(rdev, 0xffffffff);
	radeon_ring_write(rdev, 0xffffffff);
	radeon_ring_write(rdev, 0xffffffff);
	radeon_ring_write(cp, 0xc0036f00);
	radeon_ring_write(cp, 0x00000bc4);
	radeon_ring_write(cp, 0xffffffff);
	radeon_ring_write(cp, 0xffffffff);
	radeon_ring_write(cp, 0xffffffff);

	radeon_ring_write(rdev, 0xc0026900);
	radeon_ring_write(rdev, 0x00000316);
	radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
	radeon_ring_write(rdev, 0x00000010); /*  */
	radeon_ring_write(cp, 0xc0026900);
	radeon_ring_write(cp, 0x00000316);
	radeon_ring_write(cp, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
	radeon_ring_write(cp, 0x00000010); /*  */

	radeon_ring_unlock_commit(rdev);
	radeon_ring_unlock_commit(rdev, cp);

	return 0;
}

int evergreen_cp_resume(struct radeon_device *rdev)
{
	struct radeon_cp *cp = &rdev->cp;
	u32 tmp;
	u32 rb_bufsz;
	int r;
@@ -1442,7 +1446,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
	RREG32(GRBM_SOFT_RESET);

	/* Set ring buffer size */
	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
	rb_bufsz = drm_order(cp->ring_size / 8);
	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
	tmp |= BUF_SWAP_32BIT;
@@ -1456,8 +1460,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
	/* Initialize the ring buffer's read and write pointers */
	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
	WREG32(CP_RB_RPTR_WR, 0);
	rdev->cp.wptr = 0;
	WREG32(CP_RB_WPTR, rdev->cp.wptr);
	cp->wptr = 0;
	WREG32(CP_RB_WPTR, cp->wptr);

	/* set the wb address wether it's enabled or not */
	WREG32(CP_RB_RPTR_ADDR,
@@ -1475,16 +1479,16 @@ int evergreen_cp_resume(struct radeon_device *rdev)
	mdelay(1);
	WREG32(CP_RB_CNTL, tmp);

	WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
	WREG32(CP_RB_BASE, cp->gpu_addr >> 8);
	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));

	rdev->cp.rptr = RREG32(CP_RB_RPTR);
	cp->rptr = RREG32(CP_RB_RPTR);

	evergreen_cp_start(rdev);
	rdev->cp.ready = true;
	r = radeon_ring_test(rdev);
	cp->ready = true;
	r = radeon_ring_test(rdev, cp);
	if (r) {
		rdev->cp.ready = false;
		cp->ready = false;
		return r;
	}
	return 0;
@@ -2353,7 +2357,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
	return 0;
}

bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
{
	u32 srbm_status;
	u32 grbm_status;
@@ -2366,19 +2370,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
	grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
	grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
	if (!(grbm_status & GUI_ACTIVE)) {
		r100_gpu_lockup_update(lockup, &rdev->cp);
		r100_gpu_lockup_update(lockup, cp);
		return false;
	}
	/* force CP activities */
	r = radeon_ring_lock(rdev, 2);
	r = radeon_ring_lock(rdev, cp, 2);
	if (!r) {
		/* PACKET2 NOP */
		radeon_ring_write(rdev, 0x80000000);
		radeon_ring_write(rdev, 0x80000000);
		radeon_ring_unlock_commit(rdev);
		radeon_ring_write(cp, 0x80000000);
		radeon_ring_write(cp, 0x80000000);
		radeon_ring_unlock_commit(rdev, cp);
	}
	rdev->cp.rptr = RREG32(CP_RB_RPTR);
	return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
	cp->rptr = RREG32(CP_RB_RPTR);
	return r100_gpu_cp_is_lockup(rdev, lockup, cp);
}

static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -3052,6 +3056,7 @@ restart_ih:

static int evergreen_startup(struct radeon_device *rdev)
{
	struct radeon_cp *cp = &rdev->cp;
	int r;

	/* enable pcie gen2 link */
@@ -3115,7 +3120,7 @@ static int evergreen_startup(struct radeon_device *rdev)
	}
	evergreen_irq_set(rdev);

	r = radeon_ring_init(rdev, rdev->cp.ring_size);
	r = radeon_ring_init(rdev, cp, cp->ring_size);
	if (r)
		return r;
	r = evergreen_cp_load_microcode(rdev);
@@ -3150,7 +3155,7 @@ int evergreen_resume(struct radeon_device *rdev)
		return r;
	}

	r = r600_ib_test(rdev);
	r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
	if (r) {
		DRM_ERROR("radeon: failed testing IB (%d).\n", r);
		return r;
@@ -3162,9 +3167,11 @@ int evergreen_resume(struct radeon_device *rdev)

int evergreen_suspend(struct radeon_device *rdev)
{
	struct radeon_cp *cp = &rdev->cp;

	/* FIXME: we should wait for ring to be empty */
	r700_cp_stop(rdev);
	rdev->cp.ready = false;
	cp->ready = false;
	evergreen_irq_suspend(rdev);
	radeon_wb_disable(rdev);
	evergreen_pcie_gart_disable(rdev);
@@ -3244,7 +3251,7 @@ int evergreen_init(struct radeon_device *rdev)
		return r;

	rdev->cp.ring_obj = NULL;
	r600_ring_init(rdev, 1024 * 1024);
	r600_ring_init(rdev, &rdev->cp, 1024 * 1024);

	rdev->ih.ring_obj = NULL;
	r600_ih_ring_init(rdev, 64 * 1024);
@@ -3270,7 +3277,7 @@ int evergreen_init(struct radeon_device *rdev)
			DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
			rdev->accel_working = false;
		}
		r = r600_ib_test(rdev);
		r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
		if (r) {
			DRM_ERROR("radeon: failed testing IB (%d).\n", r);
			rdev->accel_working = false;
+125 −117
Original line number Diff line number Diff line
@@ -49,6 +49,7 @@ static void
set_render_target(struct radeon_device *rdev, int format,
		  int w, int h, u64 gpu_addr)
{
	struct radeon_cp *cp = &rdev->cp;
	u32 cb_color_info;
	int pitch, slice;

@@ -62,23 +63,23 @@ set_render_target(struct radeon_device *rdev, int format,
	pitch = (w / 8) - 1;
	slice = ((w * h) / 64) - 1;

	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
	radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
	radeon_ring_write(rdev, gpu_addr >> 8);
	radeon_ring_write(rdev, pitch);
	radeon_ring_write(rdev, slice);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, cb_color_info);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
	radeon_ring_write(cp, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
	radeon_ring_write(cp, gpu_addr >> 8);
	radeon_ring_write(cp, pitch);
	radeon_ring_write(cp, slice);
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, cb_color_info);
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, (w - 1) | ((h - 1) << 16));
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, 0);
}

/* emits 5dw */
@@ -87,6 +88,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
		    u32 sync_type, u32 size,
		    u64 mc_addr)
{
	struct radeon_cp *cp = &rdev->cp;
	u32 cp_coher_size;

	if (size == 0xffffffff)
@@ -99,39 +101,40 @@ cp_set_surface_sync(struct radeon_device *rdev,
		 * to the RB directly. For IBs, the CP programs this as part of the
		 * surface_sync packet.
		 */
		radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
		radeon_ring_write(rdev, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
		radeon_ring_write(rdev, 0); /* CP_COHER_CNTL2 */
		radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
		radeon_ring_write(cp, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
		radeon_ring_write(cp, 0); /* CP_COHER_CNTL2 */
	}
	radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
	radeon_ring_write(rdev, sync_type);
	radeon_ring_write(rdev, cp_coher_size);
	radeon_ring_write(rdev, mc_addr >> 8);
	radeon_ring_write(rdev, 10); /* poll interval */
	radeon_ring_write(cp, PACKET3(PACKET3_SURFACE_SYNC, 3));
	radeon_ring_write(cp, sync_type);
	radeon_ring_write(cp, cp_coher_size);
	radeon_ring_write(cp, mc_addr >> 8);
	radeon_ring_write(cp, 10); /* poll interval */
}

/* emits 11dw + 1 surface sync = 16dw */
static void
set_shaders(struct radeon_device *rdev)
{
	struct radeon_cp *cp = &rdev->cp;
	u64 gpu_addr;

	/* VS */
	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
	radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
	radeon_ring_write(rdev, gpu_addr >> 8);
	radeon_ring_write(rdev, 2);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
	radeon_ring_write(cp, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
	radeon_ring_write(cp, gpu_addr >> 8);
	radeon_ring_write(cp, 2);
	radeon_ring_write(cp, 0);

	/* PS */
	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
	radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
	radeon_ring_write(rdev, gpu_addr >> 8);
	radeon_ring_write(rdev, 1);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, 2);
	radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
	radeon_ring_write(cp, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
	radeon_ring_write(cp, gpu_addr >> 8);
	radeon_ring_write(cp, 1);
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, 2);

	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
	cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -141,6 +144,7 @@ set_shaders(struct radeon_device *rdev)
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
	struct radeon_cp *cp = &rdev->cp;
	u32 sq_vtx_constant_word2, sq_vtx_constant_word3;

	/* high addr, stride */
@@ -155,16 +159,16 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
		SQ_VTCX_SEL_Z(SQ_SEL_Z) |
		SQ_VTCX_SEL_W(SQ_SEL_W);

	radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
	radeon_ring_write(rdev, 0x580);
	radeon_ring_write(rdev, gpu_addr & 0xffffffff);
	radeon_ring_write(rdev, 48 - 1); /* size */
	radeon_ring_write(rdev, sq_vtx_constant_word2);
	radeon_ring_write(rdev, sq_vtx_constant_word3);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
	radeon_ring_write(cp, PACKET3(PACKET3_SET_RESOURCE, 8));
	radeon_ring_write(cp, 0x580);
	radeon_ring_write(cp, gpu_addr & 0xffffffff);
	radeon_ring_write(cp, 48 - 1); /* size */
	radeon_ring_write(cp, sq_vtx_constant_word2);
	radeon_ring_write(cp, sq_vtx_constant_word3);
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));

	if ((rdev->family == CHIP_CEDAR) ||
	    (rdev->family == CHIP_PALM) ||
@@ -185,6 +189,7 @@ set_tex_resource(struct radeon_device *rdev,
		 int format, int w, int h, int pitch,
		 u64 gpu_addr, u32 size)
{
	struct radeon_cp *cp = &rdev->cp;
	u32 sq_tex_resource_word0, sq_tex_resource_word1;
	u32 sq_tex_resource_word4, sq_tex_resource_word7;

@@ -208,16 +213,16 @@ set_tex_resource(struct radeon_device *rdev,
	cp_set_surface_sync(rdev,
			    PACKET3_TC_ACTION_ENA, size, gpu_addr);

	radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, sq_tex_resource_word0);
	radeon_ring_write(rdev, sq_tex_resource_word1);
	radeon_ring_write(rdev, gpu_addr >> 8);
	radeon_ring_write(rdev, gpu_addr >> 8);
	radeon_ring_write(rdev, sq_tex_resource_word4);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, 0);
	radeon_ring_write(rdev, sq_tex_resource_word7);
	radeon_ring_write(cp, PACKET3(PACKET3_SET_RESOURCE, 8));
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, sq_tex_resource_word0);
	radeon_ring_write(cp, sq_tex_resource_word1);
	radeon_ring_write(cp, gpu_addr >> 8);
	radeon_ring_write(cp, gpu_addr >> 8);
	radeon_ring_write(cp, sq_tex_resource_word4);
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, 0);
	radeon_ring_write(cp, sq_tex_resource_word7);
}

/* emits 12 */
@@ -225,6 +230,7 @@ static void
set_scissors(struct radeon_device *rdev, int x1, int y1,
	     int x2, int y2)
{
	struct radeon_cp *cp = &rdev->cp;
	/* workaround some hw bugs */
	if (x2 == 0)
		x1 = 1;
@@ -235,43 +241,44 @@ set_scissors(struct radeon_device *rdev, int x1, int y1,
			x2 = 2;
	}

	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
	radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
	radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
	radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
	radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
	radeon_ring_write(cp, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
	radeon_ring_write(cp, (x1 << 0) | (y1 << 16));
	radeon_ring_write(cp, (x2 << 0) | (y2 << 16));

	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
	radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
	radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
	radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
	radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
	radeon_ring_write(cp, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
	radeon_ring_write(cp, (x1 << 0) | (y1 << 16) | (1 << 31));
	radeon_ring_write(cp, (x2 << 0) | (y2 << 16));

	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
	radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
	radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
	radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
	radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
	radeon_ring_write(cp, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
	radeon_ring_write(cp, (x1 << 0) | (y1 << 16) | (1 << 31));
	radeon_ring_write(cp, (x2 << 0) | (y2 << 16));
}

/* emits 10 */
static void
draw_auto(struct radeon_device *rdev)
{
	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
	radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
	radeon_ring_write(rdev, DI_PT_RECTLIST);
	struct radeon_cp *cp = &rdev->cp;
	radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
	radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
	radeon_ring_write(cp, DI_PT_RECTLIST);

	radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
	radeon_ring_write(rdev,
	radeon_ring_write(cp, PACKET3(PACKET3_INDEX_TYPE, 0));
	radeon_ring_write(cp,
#ifdef __BIG_ENDIAN
			  (2 << 2) |
#endif
			  DI_INDEX_SIZE_16_BIT);

	radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
	radeon_ring_write(rdev, 1);
	radeon_ring_write(cp, PACKET3(PACKET3_NUM_INSTANCES, 0));
	radeon_ring_write(cp, 1);

	radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
	radeon_ring_write(rdev, 3);
	radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
	radeon_ring_write(cp, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
	radeon_ring_write(cp, 3);
	radeon_ring_write(cp, DI_SRC_SEL_AUTO_INDEX);

}

@@ -279,6 +286,7 @@ draw_auto(struct radeon_device *rdev)
static void
set_default_state(struct radeon_device *rdev)
{
	struct radeon_cp *cp = &rdev->cp;
	u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
	u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
	u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
@@ -292,8 +300,8 @@ set_default_state(struct radeon_device *rdev)
	int dwords;

	/* set clear context state */
	radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
	radeon_ring_write(rdev, 0);
	radeon_ring_write(cp, PACKET3(PACKET3_CLEAR_STATE, 0));
	radeon_ring_write(cp, 0);

	if (rdev->family < CHIP_CAYMAN) {
		switch (rdev->family) {
@@ -550,60 +558,60 @@ set_default_state(struct radeon_device *rdev)
					    NUM_LS_STACK_ENTRIES(num_ls_stack_entries));

		/* disable dyn gprs */
		radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
		radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
		radeon_ring_write(rdev, 0);
		radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
		radeon_ring_write(cp, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
		radeon_ring_write(cp, 0);

		/* setup LDS */
		radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
		radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
		radeon_ring_write(rdev, 0x10001000);
		radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
		radeon_ring_write(cp, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
		radeon_ring_write(cp, 0x10001000);

		/* SQ config */
		radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
		radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
		radeon_ring_write(rdev, sq_config);
		radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
		radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
		radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
		radeon_ring_write(rdev, 0);
		radeon_ring_write(rdev, 0);
		radeon_ring_write(rdev, sq_thread_resource_mgmt);
		radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
		radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
		radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
		radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
		radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 11));
		radeon_ring_write(cp, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
		radeon_ring_write(cp, sq_config);
		radeon_ring_write(cp, sq_gpr_resource_mgmt_1);
		radeon_ring_write(cp, sq_gpr_resource_mgmt_2);
		radeon_ring_write(cp, sq_gpr_resource_mgmt_3);
		radeon_ring_write(cp, 0);
		radeon_ring_write(cp, 0);
		radeon_ring_write(cp, sq_thread_resource_mgmt);
		radeon_ring_write(cp, sq_thread_resource_mgmt_2);
		radeon_ring_write(cp, sq_stack_resource_mgmt_1);
		radeon_ring_write(cp, sq_stack_resource_mgmt_2);
		radeon_ring_write(cp, sq_stack_resource_mgmt_3);
	}

	/* CONTEXT_CONTROL */
	radeon_ring_write(rdev, 0xc0012800);
	radeon_ring_write(rdev, 0x80000000);
	radeon_ring_write(rdev, 0x80000000);
	radeon_ring_write(cp, 0xc0012800);
	radeon_ring_write(cp, 0x80000000);
	radeon_ring_write(cp, 0x80000000);

	/* SQ_VTX_BASE_VTX_LOC */
	radeon_ring_write(rdev, 0xc0026f00);
	radeon_ring_write(rdev, 0x00000000);
	radeon_ring_write(rdev, 0x00000000);
	radeon_ring_write(rdev, 0x00000000);
	radeon_ring_write(cp, 0xc0026f00);
	radeon_ring_write(cp, 0x00000000);
	radeon_ring_write(cp, 0x00000000);
	radeon_ring_write(cp, 0x00000000);

	/* SET_SAMPLER */
	radeon_ring_write(rdev, 0xc0036e00);
	radeon_ring_write(rdev, 0x00000000);
	radeon_ring_write(rdev, 0x00000012);
	radeon_ring_write(rdev, 0x00000000);
	radeon_ring_write(rdev, 0x00000000);
	radeon_ring_write(cp, 0xc0036e00);
	radeon_ring_write(cp, 0x00000000);
	radeon_ring_write(cp, 0x00000012);
	radeon_ring_write(cp, 0x00000000);
	radeon_ring_write(cp, 0x00000000);

	/* set to DX10/11 mode */
	radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
	radeon_ring_write(rdev, 1);
	radeon_ring_write(cp, PACKET3(PACKET3_MODE_CONTROL, 0));
	radeon_ring_write(cp, 1);

	/* emit an IB pointing at default state */
	dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
	radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
	radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
	radeon_ring_write(rdev, dwords);
	radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
	radeon_ring_write(cp, gpu_addr & 0xFFFFFFFC);
	radeon_ring_write(cp, upper_32_bits(gpu_addr) & 0xFF);
	radeon_ring_write(cp, dwords);

}

+68 −61

File changed.

Preview size limit exceeded, changes collapsed.

+79 −70

File changed.

Preview size limit exceeded, changes collapsed.

+11 −10
Original line number Diff line number Diff line
@@ -87,6 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev,
		  unsigned num_gpu_pages,
		  struct radeon_fence *fence)
{
	struct radeon_cp *cp = &rdev->cp;
	uint32_t size;
	uint32_t cur_size;
	int i, num_loops;
@@ -95,33 +96,33 @@ int r200_copy_dma(struct radeon_device *rdev,
	/* radeon pitch is /64 */
	size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
	num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
	r = radeon_ring_lock(rdev, num_loops * 4 + 64);
	r = radeon_ring_lock(rdev, cp, num_loops * 4 + 64);
	if (r) {
		DRM_ERROR("radeon: moving bo (%d).\n", r);
		return r;
	}
	/* Must wait for 2D idle & clean before DMA or hangs might happen */
	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
	radeon_ring_write(rdev, (1 << 16));
	radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
	radeon_ring_write(cp, (1 << 16));
	for (i = 0; i < num_loops; i++) {
		cur_size = size;
		if (cur_size > 0x1FFFFF) {
			cur_size = 0x1FFFFF;
		}
		size -= cur_size;
		radeon_ring_write(rdev, PACKET0(0x720, 2));
		radeon_ring_write(rdev, src_offset);
		radeon_ring_write(rdev, dst_offset);
		radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
		radeon_ring_write(cp, PACKET0(0x720, 2));
		radeon_ring_write(cp, src_offset);
		radeon_ring_write(cp, dst_offset);
		radeon_ring_write(cp, cur_size | (1 << 31) | (1 << 30));
		src_offset += cur_size;
		dst_offset += cur_size;
	}
	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
	radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
	radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
	radeon_ring_write(cp, RADEON_WAIT_DMA_GUI_IDLE);
	if (fence) {
		r = radeon_fence_emit(rdev, fence);
	}
	radeon_ring_unlock_commit(rdev);
	radeon_ring_unlock_commit(rdev, cp);
	return r;
}

Loading